diff options
Diffstat (limited to 'arch/powerpc')
678 files changed, 16419 insertions, 25602 deletions
diff --git a/arch/powerpc/Kbuild b/arch/powerpc/Kbuild index 22cd0d55a892..b010ccb071b6 100644 --- a/arch/powerpc/Kbuild +++ b/arch/powerpc/Kbuild @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror +subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror -Wa,--fatal-warnings +subdir-asflags-$(CONFIG_PPC_WERROR) := -Wa,--fatal-warnings obj-y += kernel/ obj-y += mm/ @@ -18,4 +19,4 @@ obj-$(CONFIG_KEXEC_CORE) += kexec/ obj-$(CONFIG_KEXEC_FILE) += purgatory/ # for cleaning -subdir- += boot +subdir- += boot tools diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a68b9e637eda..c3e0cc83f120 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -127,16 +127,19 @@ config PPC select ARCH_ENABLE_MEMORY_HOTPLUG select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_HAS_COPY_MC if PPC64 + select ARCH_HAS_CRC32 if PPC64 && ALTIVEC + select ARCH_HAS_CRC_T10DIF if PPC64 && ALTIVEC select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES + select ARCH_HAS_DMA_OPS if PPC64 select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_HAS_HUGEPD if HUGETLB_PAGE select ARCH_HAS_KCOV + select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC64 && PPC_FPU select ARCH_HAS_MEMBARRIER_CALLBACKS select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU @@ -144,18 +147,22 @@ config PPC select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PMEM_API + select ARCH_HAS_PREEMPT_LAZY + select ARCH_HAS_PTDUMP select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 select ARCH_HAS_SET_MEMORY - select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION + select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx) && !HIBERNATION select ARCH_HAS_STRICT_KERNEL_RWX if PPC_85xx && !HIBERNATION && !RANDOMIZE_BASE select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_SYSCALL_WRAPPER if !SPU_BASE && !COMPAT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN + select ARCH_HAS_VDSO_ARCH_DATA select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_HAVE_EXTRA_ELF_NOTES if SPU_BASE select ARCH_KEEP_MEMBLOCK select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE if PPC_RADIX_MMU select ARCH_MIGHT_HAVE_PC_PARPORT @@ -165,7 +172,7 @@ config PPC select ARCH_SPLIT_ARG64 if PPC32 select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW - select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x + select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_USE_MEMTEST @@ -184,7 +191,6 @@ config PPC select CPUMASK_OFFSTACK if NR_CPUS >= 8192 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN select DMA_OPS_BYPASS if PPC64 - select DMA_OPS if PPC64 select DYNAMIC_FTRACE if FUNCTION_TRACER select EDAC_ATOMIC_SCRUB select EDAC_SUPPORT @@ -202,9 +208,9 @@ config PPC select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_PCI_IOMAP if PCI - select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL + select GENERIC_VDSO_DATA_STORE select GENERIC_VDSO_TIME_NS select HAS_IOPORT if PCI select HAVE_ARCH_AUDITSYSCALL @@ -233,16 +239,19 @@ config PPC select HAVE_DEBUG_STACKOVERFLOW select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_ARGS if ARCH_USING_PATCHABLE_FUNCTION_ENTRY || MPROFILE_KERNEL || PPC32 + select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS if PPC_FTRACE_OUT_OF_LINE || (PPC32 && ARCH_USING_PATCHABLE_FUNCTION_ENTRY) + select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS if HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS select HAVE_DYNAMIC_FTRACE_WITH_REGS if ARCH_USING_PATCHABLE_FUNCTION_ENTRY || MPROFILE_KERNEL || PPC32 select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS - select HAVE_FAST_GUP + select HAVE_GUP_FAST + select HAVE_FTRACE_GRAPH_FUNC select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1 select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_GRAPH_TRACER - select HAVE_FUNCTION_TRACER if PPC64 || (PPC32 && CC_IS_GCC) + select HAVE_FUNCTION_TRACER if !COMPILE_TEST && (PPC64 || (PPC32 && CC_IS_GCC)) select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC select HAVE_GENERIC_VDSO select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP @@ -268,14 +277,19 @@ config PPC select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_DYNAMIC_KEY + select HAVE_RETHOOK if KPROBES select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE select HAVE_RSEQ + select HAVE_SAMPLE_FTRACE_DIRECT if HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS select HAVE_SETUP_PER_CPU_AREA if PPC64 select HAVE_SOFTIRQ_ON_OWN_STACK - select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) - select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) + select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,$(m32-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r2 -mstack-protector-guard-offset=0) + select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,$(m64-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 -mstack-protector-guard-offset=0) select HAVE_STATIC_CALL if PPC32 + select HAVE_STATIC_CALL_INLINE if PPC32 select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING_GEN @@ -285,7 +299,7 @@ config PPC select IOMMU_HELPER if PPC64 select IRQ_DOMAIN select IRQ_FORCED_THREADING - select KASAN_VMALLOC if KASAN && MODULES + select KASAN_VMALLOC if KASAN && EXECMEM select LOCK_MM_AND_FIND_VMA select MMU_GATHER_PAGE_SIZE select MMU_GATHER_RCU_TABLE_FREE @@ -310,6 +324,7 @@ config PPC select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select TRACE_IRQFLAGS_SUPPORT + select VDSO_GETRANDOM # # Please keep this list sorted alphabetically. # @@ -387,7 +402,7 @@ config ARCH_SUSPEND_POSSIBLE def_bool y depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ (PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \ - || 44x || 40x + || 44x config ARCH_SUSPEND_NONZERO_CPU def_bool y @@ -400,12 +415,9 @@ config ARCH_HAS_ADD_PAGES config PPC_DCR_NATIVE bool -config PPC_DCR_MMIO - bool - config PPC_DCR bool - depends on PPC_DCR_NATIVE || PPC_DCR_MMIO + depends on PPC_DCR_NATIVE default y config PPC_PCI_OF_BUS_MAP @@ -431,17 +443,12 @@ config PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT PCI domain dependent and each PCI controller on own domain can have 256 PCI buses, like it is on other Linux architectures. -config PPC_OF_PLATFORM_PCI - bool - depends on PCI - depends on PPC64 # not supported on 32 bits yet - config ARCH_SUPPORTS_UPROBES def_bool y config PPC_ADV_DEBUG_REGS bool - depends on 40x || BOOKE + depends on BOOKE default y config PPC_ADV_DEBUG_IACS @@ -488,7 +495,7 @@ source "kernel/Kconfig.hz" config MATH_EMULATION bool "Math emulation" - depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE || PPC_MICROWATT + depends on 44x || PPC_8xx || PPC_MPC832x || BOOKE || PPC_MICROWATT select PPC_FPU_REGS help Some PowerPC chips designed for embedded applications do not have @@ -566,6 +573,22 @@ config ARCH_USING_PATCHABLE_FUNCTION_ENTRY def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-fpatchable-function-entry.sh $(CC) -mlittle-endian) if PPC64 && CPU_LITTLE_ENDIAN def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-fpatchable-function-entry.sh $(CC) -mbig-endian) if PPC64 && CPU_BIG_ENDIAN +config PPC_FTRACE_OUT_OF_LINE + def_bool PPC64 && ARCH_USING_PATCHABLE_FUNCTION_ENTRY + select ARCH_WANTS_PRE_LINK_VMLINUX + +config PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE + int "Number of ftrace out-of-line stubs to reserve within .text" + depends on PPC_FTRACE_OUT_OF_LINE + default 32768 + help + Number of stubs to reserve for use by ftrace. This space is + reserved within .text, and is distinct from any additional space + added at the end of .text before the final vmlinux link. Set to + zero to have stubs only be generated at the end of vmlinux (only + if the size of vmlinux is less than 32MB). Set to a higher value + if building vmlinux larger than 48MB. + config HOTPLUG_CPU bool "Support for enabling/disabling CPUs" depends on SMP && (PPC_PSERIES || \ @@ -607,11 +630,6 @@ config PPC64_SUPPORTS_MEMORY_FAILURE config ARCH_SUPPORTS_KEXEC def_bool PPC_BOOK3S || PPC_E500 || (44x && !SMP) -config ARCH_SELECTS_KEXEC - def_bool y - depends on KEXEC - select CRASH_DUMP - config ARCH_SUPPORTS_KEXEC_FILE def_bool PPC64 @@ -622,7 +640,6 @@ config ARCH_SELECTS_KEXEC_FILE def_bool y depends on KEXEC_FILE select KEXEC_ELF - select CRASH_DUMP select HAVE_IMA_KEXEC if IMA config PPC64_BIG_ENDIAN_ELF_ABI_V2 @@ -687,15 +704,25 @@ config RELOCATABLE_TEST config ARCH_SUPPORTS_CRASH_DUMP def_bool PPC64 || PPC_BOOK3S_32 || PPC_85xx || (44x && !SMP) +config ARCH_DEFAULT_CRASH_DUMP + bool + default y if !PPC_BOOK3S_32 + config ARCH_SELECTS_CRASH_DUMP def_bool y depends on CRASH_DUMP select RELOCATABLE if PPC64 || 44x || PPC_85xx +config ARCH_SUPPORTS_CRASH_HOTPLUG + def_bool y + depends on PPC64 + +config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION + def_bool CRASH_RESERVE + config FA_DUMP bool "Firmware-assisted dump" - depends on PPC64 && (PPC_RTAS || PPC_POWERNV) - select CRASH_DUMP + depends on CRASH_DUMP && PPC64 && (PPC_RTAS || PPC_POWERNV) help A robust mechanism to get reliable kernel crash dump with assistance from firmware. This approach does not use kexec, @@ -855,8 +882,8 @@ config DATA_SHIFT_BOOL bool "Set custom data alignment" depends on ADVANCED_OPTIONS depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE - depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \ - PPC_85xx + depends on (PPC_8xx && !PIN_TLB_DATA && (!STRICT_KERNEL_RWX || !PIN_TLB_TEXT)) || \ + PPC_BOOK3S_32 || PPC_85xx help This option allows you to set the kernel data alignment. When RAM is mapped by blocks, the alignment needs to fit the size and @@ -868,23 +895,23 @@ config DATA_SHIFT int "Data shift" if DATA_SHIFT_BOOL default 24 if STRICT_KERNEL_RWX && PPC64 range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 - range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx + range 14 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 - default 23 if STRICT_KERNEL_RWX && PPC_8xx - default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA - default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx + default 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && \ + (PIN_TLB_DATA || PIN_TLB_TEXT) + default 19 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx default 24 if STRICT_KERNEL_RWX && PPC_85xx default PAGE_SHIFT help On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO. Smaller is the alignment, greater is the number of necessary DBATs. - On 8xx, large pages (512kb or 8M) are used to map kernel linear - memory. Aligning to 8M reduces TLB misses as only 8M pages are used - in that case. If PIN_TLB is selected, it must be aligned to 8M as - 8M pages will be pinned. + On 8xx, large pages (16kb or 512kb or 8M) are used to map kernel + linear memory. Aligning to 8M reduces TLB misses as only 8M pages + are used in that case. If PIN_TLB is selected, it must be aligned + to 8M as 8M pages will be pinned. config ARCH_FORCE_MAX_ORDER int "Order of maximal physically contiguous allocations" @@ -966,7 +993,8 @@ config CMDLINE most cases you will need to specify the root device here. choice - prompt "Kernel command line type" if CMDLINE != "" + prompt "Kernel command line type" + depends on CMDLINE != "" default CMDLINE_FROM_BOOTLOADER config CMDLINE_FROM_BOOTLOADER @@ -1027,6 +1055,10 @@ config PPC_MEM_KEYS If unsure, say y. +config ARCH_PKEY_BITS + int + default 5 + config PPC_SECURE_BOOT prompt "Enable secure boot support" bool @@ -1078,7 +1110,7 @@ config GENERIC_ISA_DMA config PPC_INDIRECT_PCI bool depends on PCI - default y if 40x || 44x + default y if 44x config SBUS bool @@ -1103,15 +1135,12 @@ config FSL_PMC config PPC4xx_CPM bool default y - depends on SUSPEND && (44x || 40x) + depends on SUSPEND && 44x help PPC4xx Clock Power Management (CPM) support (suspend/resume). It also enables support for two different idle states (idle-wait and idle-doze). -config 4xx_SOC - bool - config FSL_LBC bool "Freescale Local Bus support" help @@ -1273,10 +1302,37 @@ config TASK_SIZE_BOOL config TASK_SIZE hex "Size of user task space" if TASK_SIZE_BOOL default "0x80000000" if PPC_8xx - default "0xb0000000" if PPC_BOOK3S_32 + default "0xb0000000" if PPC_BOOK3S_32 && EXECMEM default "0xc0000000" + +config MODULES_SIZE_BOOL + bool "Set custom size for modules/execmem area" + depends on EXECMEM && ADVANCED_OPTIONS + help + This option allows you to set the size of kernel virtual address + space dedicated for modules/execmem. + For the time being it is only for 8xx and book3s/32. Other + platform share it with vmalloc space. + + Say N here unless you know what you are doing. + +config MODULES_SIZE + int "Size of modules/execmem area (In Mbytes)" if MODULES_SIZE_BOOL + range 1 256 if EXECMEM + default 64 if EXECMEM && PPC_BOOK3S_32 + default 32 if EXECMEM && PPC_8xx + default 0 + endmenu +config PPC64_PROC_SYSTEMCFG + def_bool y + depends on PPC64 && PROC_FS + help + This option enables the presence of /proc/ppc64/systemcfg through + which the systemcfg page can be accessed. + This interface only exists for backwards-compatibility. + if PPC64 # This value must have zeroes in the bottom 60 bits otherwise lots will break config PAGE_OFFSET diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 8c80b154e814..f15e5920080b 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -216,19 +216,6 @@ config PPC_EARLY_DEBUG_RTAS_PANEL help Select this to enable early debugging via the RTAS panel. -config PPC_EARLY_DEBUG_RTAS_CONSOLE - bool "RTAS Console" - depends on PPC_RTAS - select UDBG_RTAS_CONSOLE - help - Select this to enable early debugging via the RTAS console. - -config PPC_EARLY_DEBUG_MAPLE - bool "Maple real mode" - depends on PPC_MAPLE - help - Select this to enable early debugging for Maple. - config PPC_EARLY_DEBUG_PAS_REALMODE bool "PA Semi real mode" depends on PPC_PASEMI @@ -244,14 +231,6 @@ config PPC_EARLY_DEBUG_44x inbuilt serial port. If you enable this, ensure you set PPC_EARLY_DEBUG_44x_PHYSLOW below to suit your target board. -config PPC_EARLY_DEBUG_40x - bool "Early serial debugging for IBM/AMCC 40x CPUs" - depends on 40x - help - Select this to enable early debugging for IBM 40x chips via the - inbuilt serial port. This works on chips with a 16550 compatible - UART. - config PPC_EARLY_DEBUG_CPM bool "Early serial debugging for Freescale CPM-based serial ports" depends on SERIAL_CPM=y @@ -356,11 +335,6 @@ config PPC_EARLY_DEBUG_44x_PHYSHIGH depends on PPC_EARLY_DEBUG_44x default "0x1" -config PPC_EARLY_DEBUG_40x_PHYSADDR - hex "Early debug UART physical address" - depends on PPC_EARLY_DEBUG_40x - default "0xef600300" - config PPC_EARLY_DEBUG_CPM_ADDR hex "CPM UART early debug transmit descriptor address" depends on PPC_EARLY_DEBUG_CPM @@ -392,12 +366,6 @@ config FAIL_IOMMU If you are unsure, say N. -config PPC_FAST_ENDIAN_SWITCH - bool "Deprecated fast endian-switch syscall" - depends on DEBUG_KERNEL && PPC_BOOK3S_64 - help - If you're unsure what this is, say N. - config KASAN_SHADOW_OFFSET hex depends on KASAN diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 65261cbe5bfd..f3804103c56c 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -62,14 +62,14 @@ KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o endif ifdef CONFIG_CPU_LITTLE_ENDIAN -KBUILD_CFLAGS += -mlittle-endian +KBUILD_CPPFLAGS += -mlittle-endian KBUILD_LDFLAGS += -EL LDEMULATION := lppc GNUTARGET := powerpcle MULTIPLEWORD := -mno-multiple KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) else -KBUILD_CFLAGS += $(call cc-option,-mbig-endian) +KBUILD_CPPFLAGS += $(call cc-option,-mbig-endian) KBUILD_LDFLAGS += -EB LDEMULATION := ppc GNUTARGET := powerpc @@ -95,18 +95,11 @@ aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian ifeq ($(HAS_BIARCH),y) -KBUILD_CFLAGS += -m$(BITS) +KBUILD_CPPFLAGS += -m$(BITS) KBUILD_AFLAGS += -m$(BITS) KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION) endif -cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard=tls -ifdef CONFIG_PPC64 -cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r13 -else -cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r2 -endif - LDFLAGS_vmlinux-y := -Bstatic LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) += -z notext @@ -114,7 +107,6 @@ LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y) ifdef CONFIG_PPC64 ifndef CONFIG_PPC_KERNEL_PCREL -ifeq ($(call cc-option-yn,-mcmodel=medium),y) # -mcmodel=medium breaks modules because it uses 32bit offsets from # the TOC pointer to create pointers where possible. Pointers into the # percpu data area are created by this method. @@ -124,9 +116,6 @@ ifeq ($(call cc-option-yn,-mcmodel=medium),y) # kernel percpu data space (starting with 0xc...). We need a full # 64bit relocation for this to work, hence -mcmodel=large. KBUILD_CFLAGS_MODULE += -mcmodel=large -else - export NO_MINIMAL_TOC := -mno-minimal-toc -endif endif endif @@ -139,7 +128,7 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc) endif endif -CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) +CFLAGS-$(CONFIG_PPC64) += -mcmodel=medium CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mlong-double-128) @@ -153,10 +142,21 @@ CFLAGS-$(CONFIG_PPC32) += $(call cc-option, $(MULTIPLEWORD)) CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata) +CC_FLAGS_FPU := $(call cc-option,-mhard-float) +CC_FLAGS_NO_FPU := $(call cc-option,-msoft-float) + ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY +ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE +CC_FLAGS_FTRACE := -fpatchable-function-entry=1 +else +ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS # PPC32 only +CC_FLAGS_FTRACE := -fpatchable-function-entry=3,1 +else CC_FLAGS_FTRACE := -fpatchable-function-entry=2 +endif +endif else CC_FLAGS_FTRACE := -pg ifdef CONFIG_MPROFILE_KERNEL @@ -174,9 +174,8 @@ asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1) KBUILD_CPPFLAGS += -I $(srctree)/arch/powerpc $(asinstr) KBUILD_AFLAGS += $(AFLAGS-y) -KBUILD_CFLAGS += $(call cc-option,-msoft-float) +KBUILD_CFLAGS += $(CC_FLAGS_NO_FPU) KBUILD_CFLAGS += $(CFLAGS-y) -CPP = $(CC) -E $(KBUILD_CFLAGS) CHECKFLAGS += -m$(BITS) -D__powerpc__ -D__powerpc$(BITS)__ ifdef CONFIG_CPU_BIG_ENDIAN @@ -302,11 +301,6 @@ ppc32_allmodconfig: $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/book3s_32.config \ -f $(srctree)/Makefile allmodconfig -generated_configs += ppc40x_allmodconfig -ppc40x_allmodconfig: - $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/40x.config \ - -f $(srctree)/Makefile allmodconfig - generated_configs += ppc44x_allmodconfig ppc44x_allmodconfig: $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/44x.config \ @@ -365,7 +359,7 @@ define archhelp echo ' install - Install kernel using' echo ' (your) ~/bin/$(INSTALLKERNEL) or' echo ' (distribution) /sbin/$(INSTALLKERNEL) or' - echo ' install to $$(INSTALL_PATH) and run lilo' + echo ' install to $$(INSTALL_PATH)' echo ' *_defconfig - Select default config from arch/powerpc/configs' echo '' echo ' Targets with <dt> embed a device tree blob inside the image' @@ -408,9 +402,13 @@ prepare: stack_protector_prepare PHONY += stack_protector_prepare stack_protector_prepare: prepare0 ifdef CONFIG_PPC64 - $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h)) + $(eval KBUILD_CFLAGS += -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 \ + -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' \ + $(objtree)/include/generated/asm-offsets.h)) else - $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h)) + $(eval KBUILD_CFLAGS += -mstack-protector-guard=tls -mstack-protector-guard-reg=r2 \ + -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' \ + $(objtree)/include/generated/asm-offsets.h)) endif endif diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink index ae5a4256b03d..bb601be36173 100644 --- a/arch/powerpc/Makefile.postlink +++ b/arch/powerpc/Makefile.postlink @@ -24,6 +24,9 @@ else $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" endif +quiet_cmd_ftrace_check = CHKFTRC $@ + cmd_ftrace_check = $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/ftrace_check.sh "$(NM)" "$@" + # `@true` prevents complaint when there is nothing to be done vmlinux: FORCE @@ -34,6 +37,11 @@ endif ifdef CONFIG_RELOCATABLE $(call if_changed,relocs_check) endif +ifdef CONFIG_FUNCTION_TRACER +ifndef CONFIG_PPC64_ELF_ABI_V1 + $(call cmd,ftrace_check) +endif +endif clean: rm -f .tmp_symbols.txt diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore index a4716d138cfc..5a867f23fe7f 100644 --- a/arch/powerpc/boot/.gitignore +++ b/arch/powerpc/boot/.gitignore @@ -30,7 +30,6 @@ zImage.coff zImage.epapr zImage.holly zImage.*lds -zImage.maple zImage.miboot zImage.pmac zImage.pseries diff --git a/arch/powerpc/boot/4xx.c b/arch/powerpc/boot/4xx.c index 00c4d843a023..682ca3827892 100644 --- a/arch/powerpc/boot/4xx.c +++ b/arch/powerpc/boot/4xx.c @@ -253,7 +253,6 @@ void ibm4xx_denali_fixup_memsize(void) dt_fixup_memory(0, memsize); } -#define SPRN_DBCR0_40X 0x3F2 #define SPRN_DBCR0_44X 0x134 #define DBCR0_RST_SYSTEM 0x30000000 @@ -270,18 +269,6 @@ void ibm44x_dbcr_reset(void) } -void ibm40x_dbcr_reset(void) -{ - unsigned long tmp; - - asm volatile ( - "mfspr %0,%1\n" - "oris %0,%0,%2@h\n" - "mtspr %1,%0" - : "=&r"(tmp) : "i"(SPRN_DBCR0_40X), "i"(DBCR0_RST_SYSTEM) - ); -} - #define EMAC_RESET 0x20000000 void ibm4xx_quiesce_eth(u32 *emac0, u32 *emac1) { @@ -544,256 +531,3 @@ void ibm440spe_fixup_clocks(unsigned int sys_clk, eplike_fixup_uart_clk(1, "/plb/opb/serial@f0000300", ser_clk, plb_clk); eplike_fixup_uart_clk(2, "/plb/opb/serial@f0000600", ser_clk, plb_clk); } - -void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk) -{ - u32 pllmr = mfdcr(DCRN_CPC0_PLLMR); - u32 cpc0_cr0 = mfdcr(DCRN_405_CPC0_CR0); - u32 cpc0_cr1 = mfdcr(DCRN_405_CPC0_CR1); - u32 psr = mfdcr(DCRN_405_CPC0_PSR); - u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; - u32 fwdv, fwdvb, fbdv, cbdv, opdv, epdv, ppdv, udiv; - - fwdv = (8 - ((pllmr & 0xe0000000) >> 29)); - fbdv = (pllmr & 0x1e000000) >> 25; - if (fbdv == 0) - fbdv = 16; - cbdv = ((pllmr & 0x00060000) >> 17) + 1; /* CPU:PLB */ - opdv = ((pllmr & 0x00018000) >> 15) + 1; /* PLB:OPB */ - ppdv = ((pllmr & 0x00006000) >> 13) + 1; /* PLB:PCI */ - epdv = ((pllmr & 0x00001800) >> 11) + 2; /* PLB:EBC */ - udiv = ((cpc0_cr0 & 0x3e) >> 1) + 1; - - /* check for 405GPr */ - if ((mfpvr() & 0xfffffff0) == (0x50910951 & 0xfffffff0)) { - fwdvb = 8 - (pllmr & 0x00000007); - if (!(psr & 0x00001000)) /* PCI async mode enable == 0 */ - if (psr & 0x00000020) /* New mode enable */ - m = fwdvb * 2 * ppdv; - else - m = fwdvb * cbdv * ppdv; - else if (psr & 0x00000020) /* New mode enable */ - if (psr & 0x00000800) /* PerClk synch mode */ - m = fwdvb * 2 * epdv; - else - m = fbdv * fwdv; - else if (epdv == fbdv) - m = fbdv * cbdv * epdv; - else - m = fbdv * fwdvb * cbdv; - - cpu = sys_clk * m / fwdv; - plb = sys_clk * m / (fwdvb * cbdv); - } else { - m = fwdv * fbdv * cbdv; - cpu = sys_clk * m / fwdv; - plb = cpu / cbdv; - } - opb = plb / opdv; - ebc = plb / epdv; - - if (cpc0_cr0 & 0x80) - /* uart0 uses the external clock */ - uart0 = ser_clk; - else - uart0 = cpu / udiv; - - if (cpc0_cr0 & 0x40) - /* uart1 uses the external clock */ - uart1 = ser_clk; - else - uart1 = cpu / udiv; - - /* setup the timebase clock to tick at the cpu frequency */ - cpc0_cr1 = cpc0_cr1 & ~0x00800000; - mtdcr(DCRN_405_CPC0_CR1, cpc0_cr1); - tb = cpu; - - dt_fixup_cpu_clocks(cpu, tb, 0); - dt_fixup_clock("/plb", plb); - dt_fixup_clock("/plb/opb", opb); - dt_fixup_clock("/plb/ebc", ebc); - dt_fixup_clock("/plb/opb/serial@ef600300", uart0); - dt_fixup_clock("/plb/opb/serial@ef600400", uart1); -} - - -void ibm405ep_fixup_clocks(unsigned int sys_clk) -{ - u32 pllmr0 = mfdcr(DCRN_CPC0_PLLMR0); - u32 pllmr1 = mfdcr(DCRN_CPC0_PLLMR1); - u32 cpc0_ucr = mfdcr(DCRN_CPC0_UCR); - u32 cpu, plb, opb, ebc, uart0, uart1; - u32 fwdva, fwdvb, fbdv, cbdv, opdv, epdv; - u32 pllmr0_ccdv, tb, m; - - fwdva = 8 - ((pllmr1 & 0x00070000) >> 16); - fwdvb = 8 - ((pllmr1 & 0x00007000) >> 12); - fbdv = (pllmr1 & 0x00f00000) >> 20; - if (fbdv == 0) - fbdv = 16; - - cbdv = ((pllmr0 & 0x00030000) >> 16) + 1; /* CPU:PLB */ - epdv = ((pllmr0 & 0x00000300) >> 8) + 2; /* PLB:EBC */ - opdv = ((pllmr0 & 0x00003000) >> 12) + 1; /* PLB:OPB */ - - m = fbdv * fwdvb; - - pllmr0_ccdv = ((pllmr0 & 0x00300000) >> 20) + 1; - if (pllmr1 & 0x80000000) - cpu = sys_clk * m / (fwdva * pllmr0_ccdv); - else - cpu = sys_clk / pllmr0_ccdv; - - plb = cpu / cbdv; - opb = plb / opdv; - ebc = plb / epdv; - tb = cpu; - uart0 = cpu / (cpc0_ucr & 0x0000007f); - uart1 = cpu / ((cpc0_ucr & 0x00007f00) >> 8); - - dt_fixup_cpu_clocks(cpu, tb, 0); - dt_fixup_clock("/plb", plb); - dt_fixup_clock("/plb/opb", opb); - dt_fixup_clock("/plb/ebc", ebc); - dt_fixup_clock("/plb/opb/serial@ef600300", uart0); - dt_fixup_clock("/plb/opb/serial@ef600400", uart1); -} - -static u8 ibm405ex_fwdv_multi_bits[] = { - /* values for: 1 - 16 */ - 0x01, 0x02, 0x0e, 0x09, 0x04, 0x0b, 0x10, 0x0d, 0x0c, 0x05, - 0x06, 0x0f, 0x0a, 0x07, 0x08, 0x03 -}; - -u32 ibm405ex_get_fwdva(unsigned long cpr_fwdv) -{ - u32 index; - - for (index = 0; index < ARRAY_SIZE(ibm405ex_fwdv_multi_bits); index++) - if (cpr_fwdv == (u32)ibm405ex_fwdv_multi_bits[index]) - return index + 1; - - return 0; -} - -static u8 ibm405ex_fbdv_multi_bits[] = { - /* values for: 1 - 100 */ - 0x00, 0xff, 0x7e, 0xfd, 0x7a, 0xf5, 0x6a, 0xd5, 0x2a, 0xd4, - 0x29, 0xd3, 0x26, 0xcc, 0x19, 0xb3, 0x67, 0xce, 0x1d, 0xbb, - 0x77, 0xee, 0x5d, 0xba, 0x74, 0xe9, 0x52, 0xa5, 0x4b, 0x96, - 0x2c, 0xd8, 0x31, 0xe3, 0x46, 0x8d, 0x1b, 0xb7, 0x6f, 0xde, - 0x3d, 0xfb, 0x76, 0xed, 0x5a, 0xb5, 0x6b, 0xd6, 0x2d, 0xdb, - 0x36, 0xec, 0x59, 0xb2, 0x64, 0xc9, 0x12, 0xa4, 0x48, 0x91, - 0x23, 0xc7, 0x0e, 0x9c, 0x38, 0xf0, 0x61, 0xc2, 0x05, 0x8b, - 0x17, 0xaf, 0x5f, 0xbe, 0x7c, 0xf9, 0x72, 0xe5, 0x4a, 0x95, - 0x2b, 0xd7, 0x2e, 0xdc, 0x39, 0xf3, 0x66, 0xcd, 0x1a, 0xb4, - 0x68, 0xd1, 0x22, 0xc4, 0x09, 0x93, 0x27, 0xcf, 0x1e, 0xbc, - /* values for: 101 - 200 */ - 0x78, 0xf1, 0x62, 0xc5, 0x0a, 0x94, 0x28, 0xd0, 0x21, 0xc3, - 0x06, 0x8c, 0x18, 0xb0, 0x60, 0xc1, 0x02, 0x84, 0x08, 0x90, - 0x20, 0xc0, 0x01, 0x83, 0x07, 0x8f, 0x1f, 0xbf, 0x7f, 0xfe, - 0x7d, 0xfa, 0x75, 0xea, 0x55, 0xaa, 0x54, 0xa9, 0x53, 0xa6, - 0x4c, 0x99, 0x33, 0xe7, 0x4e, 0x9d, 0x3b, 0xf7, 0x6e, 0xdd, - 0x3a, 0xf4, 0x69, 0xd2, 0x25, 0xcb, 0x16, 0xac, 0x58, 0xb1, - 0x63, 0xc6, 0x0d, 0x9b, 0x37, 0xef, 0x5e, 0xbd, 0x7b, 0xf6, - 0x6d, 0xda, 0x35, 0xeb, 0x56, 0xad, 0x5b, 0xb6, 0x6c, 0xd9, - 0x32, 0xe4, 0x49, 0x92, 0x24, 0xc8, 0x11, 0xa3, 0x47, 0x8e, - 0x1c, 0xb8, 0x70, 0xe1, 0x42, 0x85, 0x0b, 0x97, 0x2f, 0xdf, - /* values for: 201 - 255 */ - 0x3e, 0xfc, 0x79, 0xf2, 0x65, 0xca, 0x15, 0xab, 0x57, 0xae, - 0x5c, 0xb9, 0x73, 0xe6, 0x4d, 0x9a, 0x34, 0xe8, 0x51, 0xa2, - 0x44, 0x89, 0x13, 0xa7, 0x4f, 0x9e, 0x3c, 0xf8, 0x71, 0xe2, - 0x45, 0x8a, 0x14, 0xa8, 0x50, 0xa1, 0x43, 0x86, 0x0c, 0x98, - 0x30, 0xe0, 0x41, 0x82, 0x04, 0x88, 0x10, 0xa0, 0x40, 0x81, - 0x03, 0x87, 0x0f, 0x9f, 0x3f /* END */ -}; - -u32 ibm405ex_get_fbdv(unsigned long cpr_fbdv) -{ - u32 index; - - for (index = 0; index < ARRAY_SIZE(ibm405ex_fbdv_multi_bits); index++) - if (cpr_fbdv == (u32)ibm405ex_fbdv_multi_bits[index]) - return index + 1; - - return 0; -} - -void ibm405ex_fixup_clocks(unsigned int sys_clk, unsigned int uart_clk) -{ - /* PLL config */ - u32 pllc = CPR0_READ(DCRN_CPR0_PLLC); - u32 plld = CPR0_READ(DCRN_CPR0_PLLD); - u32 cpud = CPR0_READ(DCRN_CPR0_PRIMAD); - u32 plbd = CPR0_READ(DCRN_CPR0_PRIMBD); - u32 opbd = CPR0_READ(DCRN_CPR0_OPBD); - u32 perd = CPR0_READ(DCRN_CPR0_PERD); - - /* Dividers */ - u32 fbdv = ibm405ex_get_fbdv(__fix_zero((plld >> 24) & 0xff, 1)); - - u32 fwdva = ibm405ex_get_fwdva(__fix_zero((plld >> 16) & 0x0f, 1)); - - u32 cpudv0 = __fix_zero((cpud >> 24) & 7, 8); - - /* PLBDV0 is hardwared to 010. */ - u32 plbdv0 = 2; - u32 plb2xdv0 = __fix_zero((plbd >> 16) & 7, 8); - - u32 opbdv0 = __fix_zero((opbd >> 24) & 3, 4); - - u32 perdv0 = __fix_zero((perd >> 24) & 3, 4); - - /* Resulting clocks */ - u32 cpu, plb, opb, ebc, vco, tb, uart0, uart1; - - /* PLL's VCO is the source for primary forward ? */ - if (pllc & 0x40000000) { - u32 m; - - /* Feedback path */ - switch ((pllc >> 24) & 7) { - case 0: - /* PLLOUTx */ - m = fbdv; - break; - case 1: - /* CPU */ - m = fbdv * fwdva * cpudv0; - break; - case 5: - /* PERClk */ - m = fbdv * fwdva * plb2xdv0 * plbdv0 * opbdv0 * perdv0; - break; - default: - printf("WARNING ! Invalid PLL feedback source !\n"); - goto bypass; - } - - vco = (unsigned int)(sys_clk * m); - } else { -bypass: - /* Bypass system PLL */ - vco = 0; - } - - /* CPU = VCO / ( FWDVA x CPUDV0) */ - cpu = vco / (fwdva * cpudv0); - /* PLB = VCO / ( FWDVA x PLB2XDV0 x PLBDV0) */ - plb = vco / (fwdva * plb2xdv0 * plbdv0); - /* OPB = PLB / OPBDV0 */ - opb = plb / opbdv0; - /* EBC = OPB / PERDV0 */ - ebc = opb / perdv0; - - tb = cpu; - uart0 = uart1 = uart_clk; - - dt_fixup_cpu_clocks(cpu, tb, 0); - dt_fixup_clock("/plb", plb); - dt_fixup_clock("/plb/opb", opb); - dt_fixup_clock("/plb/opb/ebc", ebc); - dt_fixup_clock("/plb/opb/serial@ef600200", uart0); - dt_fixup_clock("/plb/opb/serial@ef600300", uart1); -} diff --git a/arch/powerpc/boot/4xx.h b/arch/powerpc/boot/4xx.h index 77f15d124c81..62df496b7ba6 100644 --- a/arch/powerpc/boot/4xx.h +++ b/arch/powerpc/boot/4xx.h @@ -12,13 +12,9 @@ void ibm4xx_sdram_fixup_memsize(void); void ibm440spe_fixup_memsize(void); void ibm4xx_denali_fixup_memsize(void); void ibm44x_dbcr_reset(void); -void ibm40x_dbcr_reset(void); void ibm4xx_quiesce_eth(u32 *emac0, u32 *emac1); void ibm4xx_fixup_ebc_ranges(const char *ebc); -void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk); -void ibm405ep_fixup_clocks(unsigned int sys_clk); -void ibm405ex_fixup_clocks(unsigned int sys_clk, unsigned int uart_clk); void ibm440gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk); void ibm440ep_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk, unsigned int tmr_clk); diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 968aee2025b8..a7ab087d412c 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -54,10 +54,8 @@ endif $(obj)/4xx.o: BOOTTARGETFLAGS += -mcpu=405 $(obj)/ebony.o: BOOTTARGETFLAGS += -mcpu=440 -$(obj)/cuboot-hotfoot.o: BOOTTARGETFLAGS += -mcpu=405 $(obj)/cuboot-taishan.o: BOOTTARGETFLAGS += -mcpu=440 $(obj)/cuboot-katmai.o: BOOTTARGETFLAGS += -mcpu=440 -$(obj)/cuboot-acadia.o: BOOTTARGETFLAGS += -mcpu=405 $(obj)/treeboot-iss4xx.o: BOOTTARGETFLAGS += -mcpu=405 $(obj)/treeboot-currituck.o: BOOTTARGETFLAGS += -mcpu=405 $(obj)/treeboot-akebono.o: BOOTTARGETFLAGS += -mcpu=405 @@ -72,6 +70,7 @@ BOOTCPPFLAGS := -nostdinc $(LINUXINCLUDE) BOOTCPPFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include) BOOTCFLAGS := $(BOOTTARGETFLAGS) \ + -std=gnu11 \ -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -O2 \ -msoft-float -mno-altivec -mno-vsx \ @@ -108,8 +107,8 @@ DTC_FLAGS ?= -p 1024 # these files into the build dir, fix up any includes and ensure that dependent # files are copied in the right order. -# these need to be seperate variables because they are copied out of different -# directories in the kernel tree. Sure you COULd merge them, but it's a +# these need to be separate variables because they are copied out of different +# directories in the kernel tree. Sure you COULD merge them, but it's a # cure-is-worse-than-disease situation. zlib-decomp-$(CONFIG_KERNEL_GZIP) := decompress_inflate.c zlib-$(CONFIG_KERNEL_GZIP) := inffast.c inflate.c inftrees.c @@ -146,7 +145,6 @@ src-wlib-$(CONFIG_PPC_POWERNV) += opal-calls.S opal.c ifndef CONFIG_PPC64_BOOT_WRAPPER src-wlib-y += crtsavres.S endif -src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c src-wlib-$(CONFIG_PPC_8xx) += mpc8xx.c planetcore.c fsl-soc.c src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c @@ -154,9 +152,6 @@ src-wlib-$(CONFIG_EMBEDDED6xx) += ugecon.c fsl-soc.c src-wlib-$(CONFIG_CPM) += cpm-serial.c src-plat-y := of.c epapr.c -src-plat-$(CONFIG_40x) += fixed-head.S cuboot-hotfoot.c \ - cuboot-acadia.c \ - cuboot-kilauea.c simpleboot.c src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \ cuboot-bamboo.c cuboot-sam440ep.c \ cuboot-sequoia.c cuboot-rainier.c \ @@ -179,7 +174,6 @@ src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S -src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c src-plat-$(CONFIG_PPC_MICROWATT) += fixed-head.S microwatt.c @@ -218,7 +212,7 @@ $(addprefix $(obj)/,$(libfdt) $(libfdtheader)): $(obj)/%: $(srctree)/scripts/dtc $(obj)/empty.c: $(Q)touch $@ -$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S +$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(src)/%.S $(Q)cp $< $@ clean-files := $(zlib-) $(zlibheader-) $(zliblinuxheader-) \ @@ -252,9 +246,9 @@ targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a) zImage.lds extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \ $(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds -dtstree := $(srctree)/$(src)/dts +dtstree := $(src)/dts -wrapper :=$(srctree)/$(src)/wrapper +wrapper := $(src)/wrapper wrapperbits := $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree) \ $(wrapper) FORCE @@ -282,8 +276,6 @@ quiet_cmd_wrap = WRAP $@ image-$(CONFIG_PPC_PSERIES) += zImage.pseries image-$(CONFIG_PPC_POWERNV) += zImage.pseries -image-$(CONFIG_PPC_MAPLE) += zImage.maple -image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries image-$(CONFIG_PPC_PS3) += dtbImage.ps3 image-$(CONFIG_PPC_CHRP) += zImage.chrp image-$(CONFIG_PPC_EFIKA) += zImage.chrp @@ -300,11 +292,6 @@ image-$(CONFIG_EPAPR_BOOT) += zImage.epapr # Boards with newish u-boot firmware can use the uImage target above # -# Board ports in arch/powerpc/platform/40x/Kconfig -image-$(CONFIG_HOTFOOT) += cuImage.hotfoot -image-$(CONFIG_ACADIA) += cuImage.acadia -image-$(CONFIG_OBS600) += uImage.obs600 - # Board ports in arch/powerpc/platform/44x/Kconfig image-$(CONFIG_EBONY) += treeImage.ebony cuImage.ebony image-$(CONFIG_BAMBOO) += treeImage.bamboo cuImage.bamboo @@ -455,7 +442,7 @@ $(obj)/zImage.initrd: $(addprefix $(obj)/, $(initrd-y)) clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \ zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \ zImage.miboot zImage.pmac zImage.pseries \ - zImage.maple simpleImage.* otheros.bld + simpleImage.* otheros.bld # clean up files cached by wrapper clean-kernel-base := vmlinux.strip vmlinux.bin diff --git a/arch/powerpc/boot/cuboot-acadia.c b/arch/powerpc/boot/cuboot-acadia.c deleted file mode 100644 index 46e96756cfe1..000000000000 --- a/arch/powerpc/boot/cuboot-acadia.c +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Old U-boot compatibility for Acadia - * - * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com> - * - * Copyright 2008 IBM Corporation - */ - -#include "ops.h" -#include "io.h" -#include "dcr.h" -#include "stdio.h" -#include "4xx.h" -#include "44x.h" -#include "cuboot.h" - -#define TARGET_4xx -#include "ppcboot.h" - -static bd_t bd; - -#define CPR_PERD0_SPIDV_MASK 0x000F0000 /* SPI Clock Divider */ - -#define PLLC_SRC_MASK 0x20000000 /* PLL feedback source */ - -#define PLLD_FBDV_MASK 0x1F000000 /* PLL feedback divider value */ -#define PLLD_FWDVA_MASK 0x000F0000 /* PLL forward divider A value */ -#define PLLD_FWDVB_MASK 0x00000700 /* PLL forward divider B value */ - -#define PRIMAD_CPUDV_MASK 0x0F000000 /* CPU Clock Divisor Mask */ -#define PRIMAD_PLBDV_MASK 0x000F0000 /* PLB Clock Divisor Mask */ -#define PRIMAD_OPBDV_MASK 0x00000F00 /* OPB Clock Divisor Mask */ -#define PRIMAD_EBCDV_MASK 0x0000000F /* EBC Clock Divisor Mask */ - -#define PERD0_PWMDV_MASK 0xFF000000 /* PWM Divider Mask */ -#define PERD0_SPIDV_MASK 0x000F0000 /* SPI Divider Mask */ -#define PERD0_U0DV_MASK 0x0000FF00 /* UART 0 Divider Mask */ -#define PERD0_U1DV_MASK 0x000000FF /* UART 1 Divider Mask */ - -static void get_clocks(void) -{ - unsigned long sysclk, cpr_plld, cpr_pllc, cpr_primad, plloutb, i; - unsigned long pllFwdDiv, pllFwdDivB, pllFbkDiv, pllPlbDiv, pllExtBusDiv; - unsigned long pllOpbDiv, freqEBC, freqUART, freqOPB; - unsigned long div; /* total divisor udiv * bdiv */ - unsigned long umin; /* minimum udiv */ - unsigned short diff; /* smallest diff */ - unsigned long udiv; /* best udiv */ - unsigned short idiff; /* current diff */ - unsigned short ibdiv; /* current bdiv */ - unsigned long est; /* current estimate */ - unsigned long baud; - void *np; - - /* read the sysclk value from the CPLD */ - sysclk = (in_8((unsigned char *)0x80000000) == 0xc) ? 66666666 : 33333000; - - /* - * Read PLL Mode registers - */ - cpr_plld = CPR0_READ(DCRN_CPR0_PLLD); - cpr_pllc = CPR0_READ(DCRN_CPR0_PLLC); - - /* - * Determine forward divider A - */ - pllFwdDiv = ((cpr_plld & PLLD_FWDVA_MASK) >> 16); - - /* - * Determine forward divider B - */ - pllFwdDivB = ((cpr_plld & PLLD_FWDVB_MASK) >> 8); - if (pllFwdDivB == 0) - pllFwdDivB = 8; - - /* - * Determine FBK_DIV. - */ - pllFbkDiv = ((cpr_plld & PLLD_FBDV_MASK) >> 24); - if (pllFbkDiv == 0) - pllFbkDiv = 256; - - /* - * Read CPR_PRIMAD register - */ - cpr_primad = CPR0_READ(DCRN_CPR0_PRIMAD); - - /* - * Determine PLB_DIV. - */ - pllPlbDiv = ((cpr_primad & PRIMAD_PLBDV_MASK) >> 16); - if (pllPlbDiv == 0) - pllPlbDiv = 16; - - /* - * Determine EXTBUS_DIV. - */ - pllExtBusDiv = (cpr_primad & PRIMAD_EBCDV_MASK); - if (pllExtBusDiv == 0) - pllExtBusDiv = 16; - - /* - * Determine OPB_DIV. - */ - pllOpbDiv = ((cpr_primad & PRIMAD_OPBDV_MASK) >> 8); - if (pllOpbDiv == 0) - pllOpbDiv = 16; - - /* There is a bug in U-Boot that prevents us from using - * bd.bi_opbfreq because U-Boot doesn't populate it for - * 405EZ. We get to calculate it, yay! - */ - freqOPB = (sysclk *pllFbkDiv) /pllOpbDiv; - - freqEBC = (sysclk * pllFbkDiv) / pllExtBusDiv; - - plloutb = ((sysclk * ((cpr_pllc & PLLC_SRC_MASK) ? - pllFwdDivB : pllFwdDiv) * - pllFbkDiv) / pllFwdDivB); - - np = find_node_by_alias("serial0"); - if (getprop(np, "current-speed", &baud, sizeof(baud)) != sizeof(baud)) - fatal("no current-speed property\n\r"); - - udiv = 256; /* Assume lowest possible serial clk */ - div = plloutb / (16 * baud); /* total divisor */ - umin = (plloutb / freqOPB) << 1; /* 2 x OPB divisor */ - diff = 256; /* highest possible */ - - /* i is the test udiv value -- start with the largest - * possible (256) to minimize serial clock and constrain - * search to umin. - */ - for (i = 256; i > umin; i--) { - ibdiv = div / i; - est = i * ibdiv; - idiff = (est > div) ? (est-div) : (div-est); - if (idiff == 0) { - udiv = i; - break; /* can't do better */ - } else if (idiff < diff) { - udiv = i; /* best so far */ - diff = idiff; /* update lowest diff*/ - } - } - freqUART = plloutb / udiv; - - dt_fixup_cpu_clocks(bd.bi_procfreq, bd.bi_intfreq, bd.bi_plb_busfreq); - dt_fixup_clock("/plb/ebc", freqEBC); - dt_fixup_clock("/plb/opb", freqOPB); - dt_fixup_clock("/plb/opb/serial@ef600300", freqUART); - dt_fixup_clock("/plb/opb/serial@ef600400", freqUART); -} - -static void acadia_fixups(void) -{ - dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); - get_clocks(); - dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); -} - -void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7) -{ - CUBOOT_INIT(); - platform_ops.fixups = acadia_fixups; - platform_ops.exit = ibm40x_dbcr_reset; - fdt_init(_dtb_start); - serial_console_init(); -} diff --git a/arch/powerpc/boot/cuboot-hotfoot.c b/arch/powerpc/boot/cuboot-hotfoot.c deleted file mode 100644 index 0e5532f855d6..000000000000 --- a/arch/powerpc/boot/cuboot-hotfoot.c +++ /dev/null @@ -1,139 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Old U-boot compatibility for Esteem 195E Hotfoot CPU Board - * - * Author: Solomon Peachy <solomon@linux-wlan.com> - */ - -#include "ops.h" -#include "stdio.h" -#include "reg.h" -#include "dcr.h" -#include "4xx.h" -#include "cuboot.h" - -#define TARGET_4xx -#define TARGET_HOTFOOT - -#include "ppcboot-hotfoot.h" - -static bd_t bd; - -#define NUM_REGS 3 - -static void hotfoot_fixups(void) -{ - u32 uart = mfdcr(DCRN_CPC0_UCR) & 0x7f; - - dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); - - dt_fixup_cpu_clocks(bd.bi_procfreq, bd.bi_procfreq, 0); - dt_fixup_clock("/plb", bd.bi_plb_busfreq); - dt_fixup_clock("/plb/opb", bd.bi_opbfreq); - dt_fixup_clock("/plb/ebc", bd.bi_pci_busfreq); - dt_fixup_clock("/plb/opb/serial@ef600300", bd.bi_procfreq / uart); - dt_fixup_clock("/plb/opb/serial@ef600400", bd.bi_procfreq / uart); - - dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); - dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); - - /* Is this a single eth/serial board? */ - if ((bd.bi_enet1addr[0] == 0) && - (bd.bi_enet1addr[1] == 0) && - (bd.bi_enet1addr[2] == 0) && - (bd.bi_enet1addr[3] == 0) && - (bd.bi_enet1addr[4] == 0) && - (bd.bi_enet1addr[5] == 0)) { - void *devp; - - printf("Trimming devtree for single serial/eth board\n"); - - devp = finddevice("/plb/opb/serial@ef600300"); - if (!devp) - fatal("Can't find node for /plb/opb/serial@ef600300"); - del_node(devp); - - devp = finddevice("/plb/opb/ethernet@ef600900"); - if (!devp) - fatal("Can't find node for /plb/opb/ethernet@ef600900"); - del_node(devp); - } - - ibm4xx_quiesce_eth((u32 *)0xef600800, (u32 *)0xef600900); - - /* Fix up flash size in fdt for 4M boards. */ - if (bd.bi_flashsize < 0x800000) { - u32 regs[NUM_REGS]; - void *devp = finddevice("/plb/ebc/nor_flash@0"); - if (!devp) - fatal("Can't find FDT node for nor_flash!??"); - - printf("Fixing devtree for 4M Flash\n"); - - /* First fix up the base address */ - getprop(devp, "reg", regs, sizeof(regs)); - regs[0] = 0; - regs[1] = 0xffc00000; - regs[2] = 0x00400000; - setprop(devp, "reg", regs, sizeof(regs)); - - /* Then the offsets */ - devp = finddevice("/plb/ebc/nor_flash@0/partition@0"); - if (!devp) - fatal("Can't find FDT node for partition@0"); - getprop(devp, "reg", regs, 2*sizeof(u32)); - regs[0] -= 0x400000; - setprop(devp, "reg", regs, 2*sizeof(u32)); - - devp = finddevice("/plb/ebc/nor_flash@0/partition@1"); - if (!devp) - fatal("Can't find FDT node for partition@1"); - getprop(devp, "reg", regs, 2*sizeof(u32)); - regs[0] -= 0x400000; - setprop(devp, "reg", regs, 2*sizeof(u32)); - - devp = finddevice("/plb/ebc/nor_flash@0/partition@2"); - if (!devp) - fatal("Can't find FDT node for partition@2"); - getprop(devp, "reg", regs, 2*sizeof(u32)); - regs[0] -= 0x400000; - setprop(devp, "reg", regs, 2*sizeof(u32)); - - devp = finddevice("/plb/ebc/nor_flash@0/partition@3"); - if (!devp) - fatal("Can't find FDT node for partition@3"); - getprop(devp, "reg", regs, 2*sizeof(u32)); - regs[0] -= 0x400000; - setprop(devp, "reg", regs, 2*sizeof(u32)); - - devp = finddevice("/plb/ebc/nor_flash@0/partition@4"); - if (!devp) - fatal("Can't find FDT node for partition@4"); - getprop(devp, "reg", regs, 2*sizeof(u32)); - regs[0] -= 0x400000; - setprop(devp, "reg", regs, 2*sizeof(u32)); - - devp = finddevice("/plb/ebc/nor_flash@0/partition@6"); - if (!devp) - fatal("Can't find FDT node for partition@6"); - getprop(devp, "reg", regs, 2*sizeof(u32)); - regs[0] -= 0x400000; - setprop(devp, "reg", regs, 2*sizeof(u32)); - - /* Delete the FeatFS node */ - devp = finddevice("/plb/ebc/nor_flash@0/partition@5"); - if (!devp) - fatal("Can't find FDT node for partition@5"); - del_node(devp); - } -} - -void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7) -{ - CUBOOT_INIT(); - platform_ops.fixups = hotfoot_fixups; - platform_ops.exit = ibm40x_dbcr_reset; - fdt_init(_dtb_start); - serial_console_init(); -} diff --git a/arch/powerpc/boot/cuboot-kilauea.c b/arch/powerpc/boot/cuboot-kilauea.c deleted file mode 100644 index fda182f518a2..000000000000 --- a/arch/powerpc/boot/cuboot-kilauea.c +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Old U-boot compatibility for PPC405EX. This image is already included - * a dtb. - * - * Author: Tiejun Chen <tiejun.chen@windriver.com> - * - * Copyright (C) 2009 Wind River Systems, Inc. - */ - -#include "ops.h" -#include "io.h" -#include "dcr.h" -#include "stdio.h" -#include "4xx.h" -#include "44x.h" -#include "cuboot.h" - -#define TARGET_4xx -#define TARGET_44x -#include "ppcboot.h" - -#define KILAUEA_SYS_EXT_SERIAL_CLOCK 11059200 /* ext. 11.059MHz clk */ - -static bd_t bd; - -static void kilauea_fixups(void) -{ - unsigned long sysclk = 33333333; - - ibm405ex_fixup_clocks(sysclk, KILAUEA_SYS_EXT_SERIAL_CLOCK); - dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); - ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); - dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); - dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); -} - -void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7) -{ - CUBOOT_INIT(); - platform_ops.fixups = kilauea_fixups; - platform_ops.exit = ibm40x_dbcr_reset; - fdt_init(_dtb_start); - serial_console_init(); -} diff --git a/arch/powerpc/boot/dcr.h b/arch/powerpc/boot/dcr.h index 334ab8b5a668..91dc3a302cc8 100644 --- a/arch/powerpc/boot/dcr.h +++ b/arch/powerpc/boot/dcr.h @@ -153,17 +153,6 @@ static const unsigned long sdram_bxcr[] = { SDRAM0_B0CR, SDRAM0_B1CR, #define CPR0_SCPID 0x120 #define CPR0_PLLC0 0x40 -/* 405GP Clocking/Power Management/Chip Control regs */ -#define DCRN_CPC0_PLLMR 0xb0 -#define DCRN_405_CPC0_CR0 0xb1 -#define DCRN_405_CPC0_CR1 0xb2 -#define DCRN_405_CPC0_PSR 0xb4 - -/* 405EP Clocking/Power Management/Chip Control regs */ -#define DCRN_CPC0_PLLMR0 0xf0 -#define DCRN_CPC0_PLLMR1 0xf4 -#define DCRN_CPC0_UCR 0xf5 - /* 440GX/405EX Clock Control reg */ #define DCRN_CPR0_CLKUPD 0x020 #define DCRN_CPR0_PLLC 0x040 diff --git a/arch/powerpc/boot/decompress.c b/arch/powerpc/boot/decompress.c index 977eb15a6d17..6835cb53f034 100644 --- a/arch/powerpc/boot/decompress.c +++ b/arch/powerpc/boot/decompress.c @@ -101,7 +101,7 @@ static void print_err(char *s) * @input_size: length of the input buffer * @outbuf: output buffer * @output_size: length of the output buffer - * @skip number of output bytes to ignore + * @_skip: number of output bytes to ignore * * This function takes compressed data from inbuf, decompresses and write it to * outbuf. Once output_size bytes are written to the output buffer, or the diff --git a/arch/powerpc/boot/dts/Makefile b/arch/powerpc/boot/dts/Makefile index fb335d05aae8..0cd0d8558b47 100644 --- a/arch/powerpc/boot/dts/Makefile +++ b/arch/powerpc/boot/dts/Makefile @@ -2,5 +2,4 @@ subdir-y += fsl -dtstree := $(srctree)/$(src) -dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) +dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts)) diff --git a/arch/powerpc/boot/dts/acadia.dts b/arch/powerpc/boot/dts/acadia.dts deleted file mode 100644 index deb52e41ab84..000000000000 --- a/arch/powerpc/boot/dts/acadia.dts +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Device Tree Source for AMCC Acadia (405EZ) - * - * Copyright IBM Corp. 2008 - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "amcc,acadia"; - compatible = "amcc,acadia"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC0; - serial0 = &UART0; - serial1 = &UART1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,405EZ"; - reg = <0x0>; - clock-frequency = <0>; /* Filled in by wrapper */ - timebase-frequency = <0>; /* Filled in by wrapper */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; - d-cache-size = <16384>; - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x0 0x0>; /* Filled in by wrapper */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic-405ez", "ibm,uic"; - interrupt-controller; - dcr-reg = <0x0c0 0x009>; - cell-index = <0>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - plb { - compatible = "ibm,plb-405ez", "ibm,plb3"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by wrapper */ - - MAL0: mcmal { - compatible = "ibm,mcmal-405ez", "ibm,mcmal"; - dcr-reg = <0x380 0x62>; - num-tx-chans = <1>; - num-rx-chans = <1>; - interrupt-parent = <&UIC0>; - /* 405EZ has only 3 interrupts to the UIC, as - * SERR, TXDE, and RXDE are or'd together into - * one UIC bit - */ - interrupts = < - 0x13 0x4 /* TXEOB */ - 0x15 0x4 /* RXEOB */ - 0x12 0x4 /* SERR, TXDE, RXDE */>; - }; - - POB0: opb { - compatible = "ibm,opb-405ez", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - dcr-reg = <0x0a 0x05>; - clock-frequency = <0>; /* Filled in by wrapper */ - - UART0: serial@ef600300 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600300 0x8>; - virtual-reg = <0xef600300>; - clock-frequency = <0>; /* Filled in by wrapper */ - current-speed = <115200>; - interrupt-parent = <&UIC0>; - interrupts = <0x5 0x4>; - }; - - UART1: serial@ef600400 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600400 0x8>; - clock-frequency = <0>; /* Filled in by wrapper */ - current-speed = <115200>; - interrupt-parent = <&UIC0>; - interrupts = <0x6 0x4>; - }; - - IIC: i2c@ef600500 { - compatible = "ibm,iic-405ez", "ibm,iic"; - reg = <0xef600500 0x11>; - interrupt-parent = <&UIC0>; - interrupts = <0xa 0x4>; - }; - - GPIO0: gpio@ef600700 { - compatible = "ibm,gpio-405ez"; - reg = <0xef600700 0x20>; - }; - - GPIO1: gpio@ef600800 { - compatible = "ibm,gpio-405ez"; - reg = <0xef600800 0x20>; - }; - - EMAC0: ethernet@ef600900 { - device_type = "network"; - compatible = "ibm,emac-405ez", "ibm,emac"; - interrupt-parent = <&UIC0>; - interrupts = < - 0x10 0x4 /* Ethernet */ - 0x11 0x4 /* Ethernet Wake up */>; - local-mac-address = [000000000000]; /* Filled in by wrapper */ - reg = <0xef600900 0x70>; - mal-device = <&MAL0>; - mal-tx-channel = <0>; - mal-rx-channel = <0>; - cell-index = <0>; - max-frame-size = <1500>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - phy-mode = "mii"; - phy-map = <0x0>; - }; - - CAN0: can@ef601000 { - compatible = "amcc,can-405ez"; - reg = <0xef601000 0x620>; - interrupt-parent = <&UIC0>; - interrupts = <0x7 0x4>; - }; - - CAN1: can@ef601800 { - compatible = "amcc,can-405ez"; - reg = <0xef601800 0x620>; - interrupt-parent = <&UIC0>; - interrupts = <0x8 0x4>; - }; - - cameleon@ef602000 { - compatible = "amcc,cameleon-405ez"; - reg = <0xef602000 0x800>; - interrupt-parent = <&UIC0>; - interrupts = <0xb 0x4 0xc 0x4>; - }; - - ieee1588@ef602800 { - compatible = "amcc,ieee1588-405ez"; - reg = <0xef602800 0x60>; - interrupt-parent = <&UIC0>; - interrupts = <0x4 0x4>; - /* This thing is a bit weird. It has it's own UIC - * that it uses to generate snapshot triggers. We - * don't really support this device yet, and it needs - * work to figure this out. - */ - dcr-reg = <0xe0 0x9>; - }; - - usb@ef603000 { - compatible = "ohci-be"; - reg = <0xef603000 0x80>; - interrupt-parent = <&UIC0>; - interrupts = <0xd 0x4 0xe 0x4>; - }; - - dac@ef603300 { - compatible = "amcc,dac-405ez"; - reg = <0xef603300 0x40>; - interrupt-parent = <&UIC0>; - interrupts = <0x18 0x4>; - }; - - adc@ef603400 { - compatible = "amcc,adc-405ez"; - reg = <0xef603400 0x40>; - interrupt-parent = <&UIC0>; - interrupts = <0x17 0x4>; - }; - - spi@ef603500 { - compatible = "amcc,spi-405ez"; - reg = <0xef603500 0x100>; - interrupt-parent = <&UIC0>; - interrupts = <0x9 0x4>; - }; - }; - - EBC0: ebc { - compatible = "ibm,ebc-405ez", "ibm,ebc"; - dcr-reg = <0x12 0x2>; - #address-cells = <2>; - #size-cells = <1>; - clock-frequency = <0>; /* Filled in by wrapper */ - }; - }; - - chosen { - stdout-path = "/plb/opb/serial@ef600300"; - }; -}; diff --git a/arch/powerpc/boot/dts/fsl/Makefile b/arch/powerpc/boot/dts/fsl/Makefile index 3bae982641e9..d3ecdf14bc42 100644 --- a/arch/powerpc/boot/dts/fsl/Makefile +++ b/arch/powerpc/boot/dts/fsl/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -dtstree := $(srctree)/$(src) -dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) +dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts)) diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi index 4f044b41a776..fb3200b006ad 100644 --- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi @@ -50,7 +50,7 @@ &ifc { #address-cells = <2>; #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; + compatible = "fsl,ifc"; interrupts = <25 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/bsc9131rdb.dts b/arch/powerpc/boot/dts/fsl/bsc9131rdb.dts index 8da984251abc..0ba86a6dce1b 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9131rdb.dts +++ b/arch/powerpc/boot/dts/fsl/bsc9131rdb.dts @@ -15,7 +15,7 @@ device_type = "memory"; }; - board_ifc: ifc: ifc@ff71e000 { + board_ifc: ifc: memory-controller@ff71e000 { /* NAND Flash on board */ ranges = <0x0 0x0 0x0 0xff800000 0x00004000>; reg = <0x0 0xff71e000 0x0 0x2000>; diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi index 2a677fd323eb..5c53cee8755f 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi @@ -35,7 +35,7 @@ &ifc { #address-cells = <2>; #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; + compatible = "fsl,ifc"; interrupts = <16 2 0 0 20 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/bsc9132qds.dts b/arch/powerpc/boot/dts/fsl/bsc9132qds.dts index 7cb2158dfe58..ce642e879a1b 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9132qds.dts +++ b/arch/powerpc/boot/dts/fsl/bsc9132qds.dts @@ -15,7 +15,7 @@ device_type = "memory"; }; - ifc: ifc@ff71e000 { + ifc: memory-controller@ff71e000 { /* NOR, NAND Flash on board */ ranges = <0x0 0x0 0x0 0x88000000 0x08000000 0x1 0x0 0x0 0xff800000 0x00010000>; diff --git a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi index b8e0edd1ac69..4da451e000d9 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi @@ -35,7 +35,7 @@ &ifc { #address-cells = <2>; #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; + compatible = "fsl,ifc"; /* FIXME: Test whether interrupts are split */ interrupts = <16 2 0 0 20 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/c293pcie.dts b/arch/powerpc/boot/dts/fsl/c293pcie.dts index 5e905e0857cf..e2fdac2ed420 100644 --- a/arch/powerpc/boot/dts/fsl/c293pcie.dts +++ b/arch/powerpc/boot/dts/fsl/c293pcie.dts @@ -42,7 +42,7 @@ device_type = "memory"; }; - ifc: ifc@fffe1e000 { + ifc: memory-controller@fffe1e000 { reg = <0xf 0xffe1e000 0 0x2000>; ranges = <0x0 0x0 0xf 0xec000000 0x04000000 0x1 0x0 0xf 0xff800000 0x00010000 diff --git a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi index f208fb8f64b3..2d443d519274 100644 --- a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi @@ -35,7 +35,7 @@ &ifc { #address-cells = <2>; #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; + compatible = "fsl,ifc"; interrupts = <19 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi index 41935709ebe8..fba40a1bccc0 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi @@ -199,6 +199,10 @@ /include/ "pq3-dma-0.dtsi" /include/ "pq3-etsec1-0.dtsi" + enet0: ethernet@24000 { + fsl,wake-on-filer; + fsl,pmc-handle = <&etsec1_clk>; + }; /include/ "pq3-etsec1-timer-0.dtsi" usb@22000 { @@ -222,9 +226,10 @@ }; /include/ "pq3-etsec1-2.dtsi" - - ethernet@26000 { + enet2: ethernet@26000 { cell-index = <1>; + fsl,wake-on-filer; + fsl,pmc-handle = <&etsec3_clk>; }; usb@2b000 { @@ -249,4 +254,9 @@ reg = <0xe0000 0x1000>; fsl,has-rstcr; }; + +/include/ "pq3-power.dtsi" + power@e0070 { + compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc"; + }; }; diff --git a/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi index b68eb119faef..ea7416af7ee3 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi @@ -188,4 +188,6 @@ reg = <0xe0000 0x1000>; fsl,has-rstcr; }; + +/include/ "pq3-power.dtsi" }; diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi index 579d76cb8e32..dddb7374508d 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi @@ -156,4 +156,6 @@ reg = <0xe0000 0x1000>; fsl,has-rstcr; }; + +/include/ "pq3-power.dtsi" }; diff --git a/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi index 49294cf36b4e..40a6cff77032 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi @@ -193,4 +193,6 @@ reg = <0xe0000 0x1000>; fsl,has-rstcr; }; + +/include/ "pq3-power.dtsi" }; diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb-pb.dts b/arch/powerpc/boot/dts/fsl/p1010rdb-pb.dts index 3a94acbb3c03..ce3346d77858 100644 --- a/arch/powerpc/boot/dts/fsl/p1010rdb-pb.dts +++ b/arch/powerpc/boot/dts/fsl/p1010rdb-pb.dts @@ -29,3 +29,19 @@ }; /include/ "p1010si-post.dtsi" + +&pci0 { + pcie@0 { + interrupt-map = < + /* IDSEL 0x0 */ + /* + *irq[4:5] are active-high + *irq[6:7] are active-low + */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x2 0x0 0x0 + 0000 0x0 0x0 0x2 &mpic 0x5 0x2 0x0 0x0 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 + >; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb-pb_36b.dts b/arch/powerpc/boot/dts/fsl/p1010rdb-pb_36b.dts index 4cf255fedc96..83590354f9a0 100644 --- a/arch/powerpc/boot/dts/fsl/p1010rdb-pb_36b.dts +++ b/arch/powerpc/boot/dts/fsl/p1010rdb-pb_36b.dts @@ -56,3 +56,19 @@ }; /include/ "p1010si-post.dtsi" + +&pci0 { + pcie@0 { + interrupt-map = < + /* IDSEL 0x0 */ + /* + *irq[4:5] are active-high + *irq[6:7] are active-low + */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x2 0x0 0x0 + 0000 0x0 0x0 0x2 &mpic 0x5 0x2 0x0 0x0 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 + >; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi b/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi index 2ca9cee2ddeb..ef49a7d6c69d 100644 --- a/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi @@ -215,19 +215,3 @@ phy-connection-type = "sgmii"; }; }; - -&pci0 { - pcie@0 { - interrupt-map = < - /* IDSEL 0x0 */ - /* - *irq[4:5] are active-high - *irq[6:7] are active-low - */ - 0000 0x0 0x0 0x1 &mpic 0x4 0x2 0x0 0x0 - 0000 0x0 0x0 0x2 &mpic 0x5 0x2 0x0 0x0 - 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 - 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 - >; - }; -}; diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb_32b.dtsi b/arch/powerpc/boot/dts/fsl/p1010rdb_32b.dtsi index fdc19aab2f70..583a6cd05079 100644 --- a/arch/powerpc/boot/dts/fsl/p1010rdb_32b.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1010rdb_32b.dtsi @@ -36,7 +36,7 @@ memory { device_type = "memory"; }; -board_ifc: ifc: ifc@ffe1e000 { +board_ifc: ifc: memory-controller@ffe1e000 { /* NOR, NAND Flashes and CPLD on board */ ranges = <0x0 0x0 0x0 0xee000000 0x02000000 0x1 0x0 0x0 0xff800000 0x00010000 diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb_36b.dtsi b/arch/powerpc/boot/dts/fsl/p1010rdb_36b.dtsi index de2fceed4f79..4d41efe0038f 100644 --- a/arch/powerpc/boot/dts/fsl/p1010rdb_36b.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1010rdb_36b.dtsi @@ -36,7 +36,7 @@ memory { device_type = "memory"; }; -board_ifc: ifc: ifc@fffe1e000 { +board_ifc: ifc: memory-controller@fffe1e000 { /* NOR, NAND Flashes and CPLD on board */ ranges = <0x0 0x0 0xf 0xee000000 0x02000000 0x1 0x0 0xf 0xff800000 0x00010000 diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi index ccda0a91abf0..2d2550729dcc 100644 --- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi @@ -35,7 +35,7 @@ &ifc { #address-cells = <2>; #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; + compatible = "fsl,ifc"; interrupts = <16 2 0 0 19 2 0 0>; }; @@ -183,9 +183,23 @@ /include/ "pq3-etsec2-1.dtsi" /include/ "pq3-etsec2-2.dtsi" + enet0: ethernet@b0000 { + fsl,pmc-handle = <&etsec1_clk>; + }; + + enet1: ethernet@b1000 { + fsl,pmc-handle = <&etsec2_clk>; + }; + + enet2: ethernet@b2000 { + fsl,pmc-handle = <&etsec3_clk>; + }; + global-utilities@e0000 { compatible = "fsl,p1010-guts"; reg = <0xe0000 0x1000>; fsl,has-rstcr; }; + +/include/ "pq3-power.dtsi" }; diff --git a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi index 642dc3a83d0e..cc4c7461003b 100644 --- a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi @@ -163,14 +163,17 @@ /include/ "pq3-etsec2-0.dtsi" enet0: enet0_grp2: ethernet@b0000 { + fsl,pmc-handle = <&etsec1_clk>; }; /include/ "pq3-etsec2-1.dtsi" enet1: enet1_grp2: ethernet@b1000 { + fsl,pmc-handle = <&etsec2_clk>; }; /include/ "pq3-etsec2-2.dtsi" enet2: enet2_grp2: ethernet@b2000 { + fsl,pmc-handle = <&etsec3_clk>; }; global-utilities@e0000 { @@ -178,6 +181,8 @@ reg = <0xe0000 0x1000>; fsl,has-rstcr; }; + +/include/ "pq3-power.dtsi" }; /include/ "pq3-etsec2-grp2-0.dtsi" diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi index 407cb5fd0f5b..378195db9fca 100644 --- a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi @@ -159,14 +159,17 @@ /include/ "pq3-etsec2-0.dtsi" enet0: enet0_grp2: ethernet@b0000 { + fsl,pmc-handle = <&etsec1_clk>; }; /include/ "pq3-etsec2-1.dtsi" enet1: enet1_grp2: ethernet@b1000 { + fsl,pmc-handle = <&etsec2_clk>; }; /include/ "pq3-etsec2-2.dtsi" enet2: enet2_grp2: ethernet@b2000 { + fsl,pmc-handle = <&etsec3_clk>; }; global-utilities@e0000 { @@ -174,6 +177,8 @@ reg = <0xe0000 0x1000>; fsl,has-rstcr; }; + +/include/ "pq3-power.dtsi" }; &qe { diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi index 093e4e3ed368..6ac21e81344a 100644 --- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi @@ -225,11 +225,13 @@ /include/ "pq3-etsec2-0.dtsi" enet0: enet0_grp2: ethernet@b0000 { fsl,wake-on-filer; + fsl,pmc-handle = <&etsec1_clk>; }; /include/ "pq3-etsec2-1.dtsi" enet1: enet1_grp2: ethernet@b1000 { fsl,wake-on-filer; + fsl,pmc-handle = <&etsec2_clk>; }; global-utilities@e0000 { @@ -238,9 +240,10 @@ fsl,has-rstcr; }; +/include/ "pq3-power.dtsi" power@e0070 { - compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc"; - reg = <0xe0070 0x20>; + compatible = "fsl,p1022-pmc", "fsl,mpc8536-pmc", + "fsl,mpc8548-pmc"; }; }; diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi index 81b9ab2119be..d410082d21c0 100644 --- a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi @@ -178,6 +178,10 @@ compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr"; }; /include/ "pq3-etsec1-0.dtsi" + enet0: ethernet@24000 { + fsl,pmc-handle = <&etsec1_clk>; + + }; /include/ "pq3-etsec1-timer-0.dtsi" ptp_clock@24e00 { @@ -186,7 +190,15 @@ /include/ "pq3-etsec1-1.dtsi" + enet1: ethernet@25000 { + fsl,pmc-handle = <&etsec2_clk>; + }; + /include/ "pq3-etsec1-2.dtsi" + enet2: ethernet@26000 { + fsl,pmc-handle = <&etsec3_clk>; + }; + /include/ "pq3-esdhc-0.dtsi" sdhc@2e000 { compatible = "fsl,p2020-esdhc", "fsl,esdhc"; @@ -202,8 +214,5 @@ fsl,has-rstcr; }; - pmc: power@e0070 { - compatible = "fsl,mpc8548-pmc"; - reg = <0xe0070 0x20>; - }; +/include/ "pq3-power.dtsi" }; diff --git a/arch/powerpc/boot/dts/fsl/pq3-power.dtsi b/arch/powerpc/boot/dts/fsl/pq3-power.dtsi new file mode 100644 index 000000000000..6af12401004d --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/pq3-power.dtsi @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: (GPL-2.0+) +/* + * Copyright 2024 NXP + */ + +power@e0070 { + compatible = "fsl,mpc8548-pmc"; + reg = <0xe0070 0x20>; + + etsec1_clk: soc-clk@24 { + fsl,pmcdr-mask = <0x00000080>; + }; + etsec2_clk: soc-clk@25 { + fsl,pmcdr-mask = <0x00000040>; + }; + etsec3_clk: soc-clk@26 { + fsl,pmcdr-mask = <0x00000020>; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi index aa5152ca8120..8ef0c020206b 100644 --- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi @@ -52,7 +52,7 @@ &ifc { #address-cells = <2>; #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; + compatible = "fsl,ifc"; interrupts = <25 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts index 270aaf631f2a..7d003e07a9fb 100644 --- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts @@ -91,7 +91,7 @@ board-control@2,0 { #address-cells = <1>; #size-cells = <1>; - compatible = "fsl,t1024-cpld"; + compatible = "fsl,t1024-cpld", "fsl,deepsleep-cpld"; reg = <3 0 0x300>; ranges = <0 3 0 0x300>; bank-width = <1>; diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts index dd3aab81e9de..4347924e9aa7 100644 --- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts @@ -104,7 +104,7 @@ ifc: localbus@ffe124000 { cpld@3,0 { - compatible = "fsl,t1040rdb-cpld"; + compatible = "fsl,t104xrdb-cpld", "fsl,deepsleep-cpld"; }; }; }; diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi index 776788623204..c9542b73bd7f 100644 --- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi @@ -52,7 +52,7 @@ &ifc { #address-cells = <2>; #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; + compatible = "fsl,ifc"; interrupts = <25 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/t1042rdb.dts b/arch/powerpc/boot/dts/fsl/t1042rdb.dts index 3ebb712224cb..099764322b33 100644 --- a/arch/powerpc/boot/dts/fsl/t1042rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1042rdb.dts @@ -68,7 +68,7 @@ ifc: localbus@ffe124000 { cpld@3,0 { - compatible = "fsl,t1042rdb-cpld"; + compatible = "fsl,t104xrdb-cpld", "fsl,deepsleep-cpld"; }; }; }; diff --git a/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts b/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts index 8ec3ff45e6fc..b10cab1a347b 100644 --- a/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts +++ b/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts @@ -41,7 +41,7 @@ ifc: localbus@ffe124000 { cpld@3,0 { - compatible = "fsl,t1042rdb_pi-cpld"; + compatible = "fsl,t104xrdb-cpld", "fsl,deepsleep-cpld"; }; }; diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi index 27714dc2f04a..6bb95878d39d 100644 --- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi @@ -50,7 +50,7 @@ &ifc { #address-cells = <2>; #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; + compatible = "fsl,ifc"; interrupts = <25 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi index fcac73486d48..65f3e17c0d41 100644 --- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi @@ -50,7 +50,7 @@ &ifc { #address-cells = <2>; #size-cells = <1>; - compatible = "fsl,ifc", "simple-bus"; + compatible = "fsl,ifc"; interrupts = <25 2 0 0>; }; diff --git a/arch/powerpc/boot/dts/haleakala.dts b/arch/powerpc/boot/dts/haleakala.dts deleted file mode 100644 index f81ce8786d59..000000000000 --- a/arch/powerpc/boot/dts/haleakala.dts +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Device Tree Source for AMCC Haleakala (405EXr) - * - * Copyright 2008 DENX Software Engineering, Stefan Roese <sr@denx.de> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without - * any warranty of any kind, whether express or implied. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "amcc,haleakala"; - compatible = "amcc,haleakala", "amcc,kilauea"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC0; - serial0 = &UART0; - serial1 = &UART1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,405EXr"; - reg = <0x00000000>; - clock-frequency = <0>; /* Filled in by U-Boot */ - timebase-frequency = <0>; /* Filled in by U-Boot */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; /* 16 kB */ - d-cache-size = <16384>; /* 16 kB */ - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x00000000>; /* Filled in by U-Boot */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic-405exr", "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - UIC1: interrupt-controller1 { - compatible = "ibm,uic-405exr","ibm,uic"; - interrupt-controller; - cell-index = <1>; - dcr-reg = <0x0d0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - UIC2: interrupt-controller2 { - compatible = "ibm,uic-405exr","ibm,uic"; - interrupt-controller; - cell-index = <2>; - dcr-reg = <0x0e0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1c 0x4 0x1d 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - plb { - compatible = "ibm,plb-405exr", "ibm,plb4"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by U-Boot */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-405exr", "ibm,sdram-4xx-ddr2"; - dcr-reg = <0x010 0x002>; - interrupt-parent = <&UIC2>; - interrupts = <0x5 0x4 /* ECC DED Error */ - 0x6 0x4>; /* ECC SEC Error */ - }; - - MAL0: mcmal { - compatible = "ibm,mcmal-405exr", "ibm,mcmal2"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <2>; - num-rx-chans = <2>; - interrupt-parent = <&MAL0>; - interrupts = <0x0 0x1 0x2 0x3 0x4>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*TXEOB*/ 0x0 &UIC0 0xa 0x4 - /*RXEOB*/ 0x1 &UIC0 0xb 0x4 - /*SERR*/ 0x2 &UIC1 0x0 0x4 - /*TXDE*/ 0x3 &UIC1 0x1 0x4 - /*RXDE*/ 0x4 &UIC1 0x2 0x4>; - interrupt-map-mask = <0xffffffff>; - }; - - POB0: opb { - compatible = "ibm,opb-405exr", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x80000000 0x80000000 0x10000000 - 0xef600000 0xef600000 0x00a00000 - 0xf0000000 0xf0000000 0x10000000>; - dcr-reg = <0x0a0 0x005>; - clock-frequency = <0>; /* Filled in by U-Boot */ - - EBC0: ebc { - compatible = "ibm,ebc-405exr", "ibm,ebc"; - dcr-reg = <0x012 0x002>; - #address-cells = <2>; - #size-cells = <1>; - clock-frequency = <0>; /* Filled in by U-Boot */ - /* ranges property is supplied by U-Boot */ - interrupts = <0x5 0x1>; - interrupt-parent = <&UIC1>; - - nor_flash@0,0 { - compatible = "amd,s29gl512n", "cfi-flash"; - bank-width = <2>; - reg = <0x00000000 0x00000000 0x04000000>; - #address-cells = <1>; - #size-cells = <1>; - partition@0 { - label = "kernel"; - reg = <0x00000000 0x00200000>; - }; - partition@200000 { - label = "root"; - reg = <0x00200000 0x00200000>; - }; - partition@400000 { - label = "user"; - reg = <0x00400000 0x03b60000>; - }; - partition@3f60000 { - label = "env"; - reg = <0x03f60000 0x00040000>; - }; - partition@3fa0000 { - label = "u-boot"; - reg = <0x03fa0000 0x00060000>; - }; - }; - }; - - UART0: serial@ef600200 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600200 0x00000008>; - virtual-reg = <0xef600200>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1a 0x4>; - }; - - UART1: serial@ef600300 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600300 0x00000008>; - virtual-reg = <0xef600300>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1 0x4>; - }; - - IIC0: i2c@ef600400 { - compatible = "ibm,iic-405exr", "ibm,iic"; - reg = <0xef600400 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x2 0x4>; - }; - - IIC1: i2c@ef600500 { - compatible = "ibm,iic-405exr", "ibm,iic"; - reg = <0xef600500 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x7 0x4>; - }; - - - RGMII0: emac-rgmii@ef600b00 { - compatible = "ibm,rgmii-405exr", "ibm,rgmii"; - reg = <0xef600b00 0x00000104>; - has-mdio; - }; - - EMAC0: ethernet@ef600900 { - linux,network-index = <0x0>; - device_type = "network"; - compatible = "ibm,emac-405exr", "ibm,emac4sync"; - interrupt-parent = <&EMAC0>; - interrupts = <0x0 0x1>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*Status*/ 0x0 &UIC0 0x18 0x4 - /*Wake*/ 0x1 &UIC1 0x1d 0x4>; - reg = <0xef600900 0x000000c4>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <0>; - mal-rx-channel = <0>; - cell-index = <0>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - rx-fifo-size-gige = <16384>; - tx-fifo-size-gige = <16384>; - phy-mode = "rgmii"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <0>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - }; - - PCIE0: pcie@a0000000 { - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex"; - primary; - port = <0x0>; /* port number */ - reg = <0xa0000000 0x20000000 /* Config space access */ - 0xef000000 0x00001000>; /* Registers */ - dcr-reg = <0x040 0x020>; - sdr-base = <0x400>; - - /* Outbound ranges, one memory and one IO, - * later cannot be changed - */ - ranges = <0x02000000 0x00000000 0x80000000 0x90000000 0x00000000 0x08000000 - 0x01000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00010000>; - - /* Inbound 2GB range starting at 0 */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>; - - /* This drives busses 0x00 to 0x3f */ - bus-range = <0x0 0x3f>; - - /* Legacy interrupts (note the weird polarity, the bridge seems - * to invert PCIe legacy interrupts). - * We are de-swizzling here because the numbers are actually for - * port of the root complex virtual P2P bridge. But I want - * to avoid putting a node for it in the tree, so the numbers - * below are basically de-swizzled numbers. - * The real slot is on idsel 0, so the swizzling is 1:1 - */ - interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = < - 0x0 0x0 0x0 0x1 &UIC2 0x0 0x4 /* swizzled int A */ - 0x0 0x0 0x0 0x2 &UIC2 0x1 0x4 /* swizzled int B */ - 0x0 0x0 0x0 0x3 &UIC2 0x2 0x4 /* swizzled int C */ - 0x0 0x0 0x0 0x4 &UIC2 0x3 0x4 /* swizzled int D */>; - }; - }; -}; diff --git a/arch/powerpc/boot/dts/hotfoot.dts b/arch/powerpc/boot/dts/hotfoot.dts deleted file mode 100644 index b93bf2d9dd5b..000000000000 --- a/arch/powerpc/boot/dts/hotfoot.dts +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Device Tree Source for ESTeem 195E Hotfoot - * - * Copyright 2009 AbsoluteValue Systems <solomon@linux-wlan.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without - * any warranty of any kind, whether express or implied. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "est,hotfoot"; - compatible = "est,hotfoot"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC0; - ethernet1 = &EMAC1; - serial0 = &UART0; - serial1 = &UART1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,405EP"; - reg = <0x00000000>; - clock-frequency = <0>; /* Filled in by zImage */ - timebase-frequency = <0>; /* Filled in by zImage */ - i-cache-line-size = <0x20>; - d-cache-line-size = <0x20>; - i-cache-size = <0x4000>; - d-cache-size = <0x4000>; - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x00000000>; /* Filled in by zImage */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - plb { - compatible = "ibm,plb3"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by zImage */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-405ep"; - dcr-reg = <0x010 0x002>; - }; - - MAL: mcmal { - compatible = "ibm,mcmal-405ep", "ibm,mcmal"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <4>; - num-rx-chans = <2>; - interrupt-parent = <&UIC0>; - interrupts = < - 0xb 0x4 /* TXEOB */ - 0xc 0x4 /* RXEOB */ - 0xa 0x4 /* SERR */ - 0xd 0x4 /* TXDE */ - 0xe 0x4 /* RXDE */>; - }; - - POB0: opb { - compatible = "ibm,opb-405ep", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0xef600000 0xef600000 0x00a00000>; - dcr-reg = <0x0a0 0x005>; - clock-frequency = <0>; /* Filled in by zImage */ - - /* Hotfoot has UART0/UART1 swapped */ - - UART0: serial@ef600400 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600400 0x00000008>; - virtual-reg = <0xef600400>; - clock-frequency = <0>; /* Filled in by zImage */ - current-speed = <0x9600>; - interrupt-parent = <&UIC0>; - interrupts = <0x1 0x4>; - }; - - UART1: serial@ef600300 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600300 0x00000008>; - virtual-reg = <0xef600300>; - clock-frequency = <0>; /* Filled in by zImage */ - current-speed = <0x9600>; - interrupt-parent = <&UIC0>; - interrupts = <0x0 0x4>; - }; - - IIC: i2c@ef600500 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "ibm,iic-405ep", "ibm,iic"; - reg = <0xef600500 0x00000011>; - interrupt-parent = <&UIC0>; - interrupts = <0x2 0x4>; - - rtc@68 { - /* Actually a DS1339 */ - compatible = "dallas,ds1307"; - reg = <0x68>; - }; - - temp@4a { - /* Not present on all boards */ - compatible = "national,lm75"; - reg = <0x4a>; - }; - }; - - GPIO: gpio@ef600700 { - #gpio-cells = <2>; - compatible = "ibm,ppc4xx-gpio"; - reg = <0xef600700 0x00000020>; - gpio-controller; - }; - - gpio-leds { - compatible = "gpio-leds"; - status { - label = "Status"; - gpios = <&GPIO 1 0>; - }; - radiorx { - label = "Rx"; - gpios = <&GPIO 0xe 0>; - }; - }; - - EMAC0: ethernet@ef600800 { - linux,network-index = <0x0>; - device_type = "network"; - compatible = "ibm,emac-405ep", "ibm,emac"; - interrupt-parent = <&UIC0>; - interrupts = < - 0xf 0x4 /* Ethernet */ - 0x9 0x4 /* Ethernet Wake Up */>; - local-mac-address = [000000000000]; /* Filled in by zImage */ - reg = <0xef600800 0x00000070>; - mal-device = <&MAL>; - mal-tx-channel = <0>; - mal-rx-channel = <0>; - cell-index = <0>; - max-frame-size = <0x5dc>; - rx-fifo-size = <0x1000>; - tx-fifo-size = <0x800>; - phy-mode = "mii"; - phy-map = <0x00000000>; - }; - - EMAC1: ethernet@ef600900 { - linux,network-index = <0x1>; - device_type = "network"; - compatible = "ibm,emac-405ep", "ibm,emac"; - interrupt-parent = <&UIC0>; - interrupts = < - 0x11 0x4 /* Ethernet */ - 0x9 0x4 /* Ethernet Wake Up */>; - local-mac-address = [000000000000]; /* Filled in by zImage */ - reg = <0xef600900 0x00000070>; - mal-device = <&MAL>; - mal-tx-channel = <2>; - mal-rx-channel = <1>; - cell-index = <1>; - max-frame-size = <0x5dc>; - rx-fifo-size = <0x1000>; - tx-fifo-size = <0x800>; - mdio-device = <&EMAC0>; - phy-mode = "mii"; - phy-map = <0x0000001>; - }; - }; - - EBC0: ebc { - compatible = "ibm,ebc-405ep", "ibm,ebc"; - dcr-reg = <0x012 0x002>; - #address-cells = <2>; - #size-cells = <1>; - - /* The ranges property is supplied by the bootwrapper - * and is based on the firmware's configuration of the - * EBC bridge - */ - clock-frequency = <0>; /* Filled in by zImage */ - - nor_flash@0 { - compatible = "cfi-flash"; - bank-width = <2>; - reg = <0x0 0xff800000 0x00800000>; - #address-cells = <1>; - #size-cells = <1>; - - /* This mapping is for the 8M flash - 4M flash has all ofssets -= 4M, - and FeatFS partition is not present */ - partition@0 { - label = "Bootloader"; - reg = <0x7c0000 0x40000>; - /* read-only; */ - }; - partition@1 { - label = "Env_and_Config_Primary"; - reg = <0x400000 0x10000>; - }; - partition@2 { - label = "Kernel"; - reg = <0x420000 0x100000>; - }; - partition@3 { - label = "Filesystem"; - reg = <0x520000 0x2a0000>; - }; - partition@4 { - label = "Env_and_Config_Secondary"; - reg = <0x410000 0x10000>; - }; - partition@5 { - label = "FeatFS"; - reg = <0x000000 0x400000>; - }; - partition@6 { - label = "Bootloader_Env"; - reg = <0x7d0000 0x10000>; - }; - }; - }; - - PCI0: pci@ec000000 { - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb405ep-pci", "ibm,plb-pci"; - primary; - reg = <0xeec00000 0x00000008 /* Config space access */ - 0xeed80000 0x00000004 /* IACK */ - 0xeed80000 0x00000004 /* Special cycle */ - 0xef480000 0x00000040>; /* Internal registers */ - - /* Outbound ranges, one memory and one IO, - * later cannot be changed. Chip supports a second - * IO range but we don't use it for now - */ - ranges = <0x02000000 0x00000000 0x80000000 0x80000000 0x00000000 0x20000000 - 0x01000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>; - - /* Inbound 2GB range starting at 0 */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>; - - interrupt-parent = <&UIC0>; - interrupt-map-mask = <0xf800 0x0 0x0 0x7>; - interrupt-map = < - /* IDSEL 3 -- slot1 (optional) 27/29 A/B IRQ2/4 */ - 0x1800 0x0 0x0 0x1 &UIC0 0x1b 0x8 - 0x1800 0x0 0x0 0x2 &UIC0 0x1d 0x8 - - /* IDSEL 4 -- slot0, 26/28 A/B IRQ1/3 */ - 0x2000 0x0 0x0 0x1 &UIC0 0x1a 0x8 - 0x2000 0x0 0x0 0x2 &UIC0 0x1c 0x8 - >; - }; - }; - - chosen { - stdout-path = &UART0; - }; -}; diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts deleted file mode 100644 index c07a7525a72c..000000000000 --- a/arch/powerpc/boot/dts/kilauea.dts +++ /dev/null @@ -1,407 +0,0 @@ -/* - * Device Tree Source for AMCC Kilauea (405EX) - * - * Copyright 2007-2009 DENX Software Engineering, Stefan Roese <sr@denx.de> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without - * any warranty of any kind, whether express or implied. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "amcc,kilauea"; - compatible = "amcc,kilauea"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC0; - ethernet1 = &EMAC1; - serial0 = &UART0; - serial1 = &UART1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,405EX"; - reg = <0x00000000>; - clock-frequency = <0>; /* Filled in by U-Boot */ - timebase-frequency = <0>; /* Filled in by U-Boot */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; /* 16 kB */ - d-cache-size = <16384>; /* 16 kB */ - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x00000000>; /* Filled in by U-Boot */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic-405ex", "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - UIC1: interrupt-controller1 { - compatible = "ibm,uic-405ex","ibm,uic"; - interrupt-controller; - cell-index = <1>; - dcr-reg = <0x0d0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - UIC2: interrupt-controller2 { - compatible = "ibm,uic-405ex","ibm,uic"; - interrupt-controller; - cell-index = <2>; - dcr-reg = <0x0e0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1c 0x4 0x1d 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - CPM0: cpm { - compatible = "ibm,cpm"; - dcr-access-method = "native"; - dcr-reg = <0x0b0 0x003>; - unused-units = <0x00000000>; - idle-doze = <0x02000000>; - standby = <0xe3e74800>; - }; - - plb { - compatible = "ibm,plb-405ex", "ibm,plb4"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by U-Boot */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-405ex", "ibm,sdram-4xx-ddr2"; - dcr-reg = <0x010 0x002>; - interrupt-parent = <&UIC2>; - interrupts = <0x5 0x4 /* ECC DED Error */ - 0x6 0x4>; /* ECC SEC Error */ - }; - - CRYPTO: crypto@ef700000 { - compatible = "amcc,ppc405ex-crypto", "amcc,ppc4xx-crypto"; - reg = <0xef700000 0x80400>; - interrupt-parent = <&UIC0>; - interrupts = <0x17 0x2>; - }; - - MAL0: mcmal { - compatible = "ibm,mcmal-405ex", "ibm,mcmal2"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <2>; - num-rx-chans = <2>; - interrupt-parent = <&MAL0>; - interrupts = <0x0 0x1 0x2 0x3 0x4>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*TXEOB*/ 0x0 &UIC0 0xa 0x4 - /*RXEOB*/ 0x1 &UIC0 0xb 0x4 - /*SERR*/ 0x2 &UIC1 0x0 0x4 - /*TXDE*/ 0x3 &UIC1 0x1 0x4 - /*RXDE*/ 0x4 &UIC1 0x2 0x4>; - interrupt-map-mask = <0xffffffff>; - }; - - POB0: opb { - compatible = "ibm,opb-405ex", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x80000000 0x80000000 0x10000000 - 0xef600000 0xef600000 0x00a00000 - 0xf0000000 0xf0000000 0x10000000>; - dcr-reg = <0x0a0 0x005>; - clock-frequency = <0>; /* Filled in by U-Boot */ - - EBC0: ebc { - compatible = "ibm,ebc-405ex", "ibm,ebc"; - dcr-reg = <0x012 0x002>; - #address-cells = <2>; - #size-cells = <1>; - clock-frequency = <0>; /* Filled in by U-Boot */ - /* ranges property is supplied by U-Boot */ - interrupts = <0x5 0x1>; - interrupt-parent = <&UIC1>; - - nor_flash@0,0 { - compatible = "amd,s29gl512n", "cfi-flash"; - bank-width = <2>; - reg = <0x00000000 0x00000000 0x04000000>; - #address-cells = <1>; - #size-cells = <1>; - partition@0 { - label = "kernel"; - reg = <0x00000000 0x001e0000>; - }; - partition@1e0000 { - label = "dtb"; - reg = <0x001e0000 0x00020000>; - }; - partition@200000 { - label = "root"; - reg = <0x00200000 0x00200000>; - }; - partition@400000 { - label = "user"; - reg = <0x00400000 0x03b60000>; - }; - partition@3f60000 { - label = "env"; - reg = <0x03f60000 0x00040000>; - }; - partition@3fa0000 { - label = "u-boot"; - reg = <0x03fa0000 0x00060000>; - }; - }; - - ndfc@1,0 { - compatible = "ibm,ndfc"; - reg = <0x00000001 0x00000000 0x00002000>; - ccr = <0x00001000>; - bank-settings = <0x80002222>; - #address-cells = <1>; - #size-cells = <1>; - - nand { - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "u-boot"; - reg = <0x00000000 0x00100000>; - }; - partition@100000 { - label = "user"; - reg = <0x00000000 0x03f00000>; - }; - }; - }; - }; - - UART0: serial@ef600200 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600200 0x00000008>; - virtual-reg = <0xef600200>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1a 0x4>; - }; - - UART1: serial@ef600300 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600300 0x00000008>; - virtual-reg = <0xef600300>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1 0x4>; - }; - - IIC0: i2c@ef600400 { - compatible = "ibm,iic-405ex", "ibm,iic"; - reg = <0xef600400 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x2 0x4>; - #address-cells = <1>; - #size-cells = <0>; - - rtc@68 { - compatible = "dallas,ds1338"; - reg = <0x68>; - }; - - dtt@48 { - compatible = "dallas,ds1775"; - reg = <0x48>; - }; - }; - - IIC1: i2c@ef600500 { - compatible = "ibm,iic-405ex", "ibm,iic"; - reg = <0xef600500 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x7 0x4>; - }; - - RGMII0: emac-rgmii@ef600b00 { - compatible = "ibm,rgmii-405ex", "ibm,rgmii"; - reg = <0xef600b00 0x00000104>; - has-mdio; - }; - - EMAC0: ethernet@ef600900 { - linux,network-index = <0x0>; - device_type = "network"; - compatible = "ibm,emac-405ex", "ibm,emac4sync"; - interrupt-parent = <&EMAC0>; - interrupts = <0x0 0x1>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*Status*/ 0x0 &UIC0 0x18 0x4 - /*Wake*/ 0x1 &UIC1 0x1d 0x4>; - reg = <0xef600900 0x000000c4>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <0>; - mal-rx-channel = <0>; - cell-index = <0>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - rx-fifo-size-gige = <16384>; - tx-fifo-size-gige = <16384>; - phy-mode = "rgmii"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <0>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - - EMAC1: ethernet@ef600a00 { - linux,network-index = <0x1>; - device_type = "network"; - compatible = "ibm,emac-405ex", "ibm,emac4sync"; - interrupt-parent = <&EMAC1>; - interrupts = <0x0 0x1>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*Status*/ 0x0 &UIC0 0x19 0x4 - /*Wake*/ 0x1 &UIC1 0x1f 0x4>; - reg = <0xef600a00 0x000000c4>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <1>; - mal-rx-channel = <1>; - cell-index = <1>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - rx-fifo-size-gige = <16384>; - tx-fifo-size-gige = <16384>; - phy-mode = "rgmii"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <1>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - }; - - PCIE0: pcie@a0000000 { - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex"; - primary; - port = <0x0>; /* port number */ - reg = <0xa0000000 0x20000000 /* Config space access */ - 0xef000000 0x00001000>; /* Registers */ - dcr-reg = <0x040 0x020>; - sdr-base = <0x400>; - - /* Outbound ranges, one memory and one IO, - * later cannot be changed - */ - ranges = <0x02000000 0x00000000 0x80000000 0x90000000 0x00000000 0x08000000 - 0x01000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00010000>; - - /* Inbound 2GB range starting at 0 */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>; - - /* This drives busses 0x00 to 0x3f */ - bus-range = <0x0 0x3f>; - - /* Legacy interrupts (note the weird polarity, the bridge seems - * to invert PCIe legacy interrupts). - * We are de-swizzling here because the numbers are actually for - * port of the root complex virtual P2P bridge. But I want - * to avoid putting a node for it in the tree, so the numbers - * below are basically de-swizzled numbers. - * The real slot is on idsel 0, so the swizzling is 1:1 - */ - interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = < - 0x0 0x0 0x0 0x1 &UIC2 0x0 0x4 /* swizzled int A */ - 0x0 0x0 0x0 0x2 &UIC2 0x1 0x4 /* swizzled int B */ - 0x0 0x0 0x0 0x3 &UIC2 0x2 0x4 /* swizzled int C */ - 0x0 0x0 0x0 0x4 &UIC2 0x3 0x4 /* swizzled int D */>; - }; - - PCIE1: pcie@c0000000 { - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex"; - primary; - port = <0x1>; /* port number */ - reg = <0xc0000000 0x20000000 /* Config space access */ - 0xef001000 0x00001000>; /* Registers */ - dcr-reg = <0x060 0x020>; - sdr-base = <0x440>; - - /* Outbound ranges, one memory and one IO, - * later cannot be changed - */ - ranges = <0x02000000 0x00000000 0x80000000 0x98000000 0x00000000 0x08000000 - 0x01000000 0x00000000 0x00000000 0xe0010000 0x00000000 0x00010000>; - - /* Inbound 2GB range starting at 0 */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>; - - /* This drives busses 0x40 to 0x7f */ - bus-range = <0x40 0x7f>; - - /* Legacy interrupts (note the weird polarity, the bridge seems - * to invert PCIe legacy interrupts). - * We are de-swizzling here because the numbers are actually for - * port of the root complex virtual P2P bridge. But I want - * to avoid putting a node for it in the tree, so the numbers - * below are basically de-swizzled numbers. - * The real slot is on idsel 0, so the swizzling is 1:1 - */ - interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = < - 0x0 0x0 0x0 0x1 &UIC2 0xb 0x4 /* swizzled int A */ - 0x0 0x0 0x0 0x2 &UIC2 0xc 0x4 /* swizzled int B */ - 0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */ - 0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>; - }; - }; -}; diff --git a/arch/powerpc/boot/dts/klondike.dts b/arch/powerpc/boot/dts/klondike.dts deleted file mode 100644 index 97432177892a..000000000000 --- a/arch/powerpc/boot/dts/klondike.dts +++ /dev/null @@ -1,212 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Device Tree for Klondike (APM8018X) board. - * - * Copyright (c) 2010, Applied Micro Circuits Corporation - * Author: Tanmay Inamdar <tinamdar@apm.com> - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "apm,klondike"; - compatible = "apm,klondike"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC0; - ethernet1 = &EMAC1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,apm8018x"; - reg = <0x00000000>; - clock-frequency = <300000000>; /* Filled in by U-Boot */ - timebase-frequency = <300000000>; /* Filled in by U-Boot */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; /* 16 kB */ - d-cache-size = <16384>; /* 16 kB */ - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x20000000>; /* Filled in by U-Boot */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x010>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - UIC1: interrupt-controller1 { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <1>; - dcr-reg = <0x0d0 0x010>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - UIC2: interrupt-controller2 { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <2>; - dcr-reg = <0x0e0 0x010>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x0a 0x4 0x0b 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - UIC3: interrupt-controller3 { - compatible = "ibm,uic"; - interrupt-controller; - cell-index = <3>; - dcr-reg = <0x0f0 0x010>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x10 0x4 0x11 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - plb { - compatible = "ibm,plb4"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by U-Boot */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-apm8018x"; - dcr-reg = <0x010 0x002>; - }; - - MAL0: mcmal { - compatible = "ibm,mcmal2"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <2>; - num-rx-chans = <16>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-parent = <&UIC1>; - interrupts = </*TXEOB*/ 0x6 0x4 - /*RXEOB*/ 0x7 0x4 - /*SERR*/ 0x1 0x4 - /*TXDE*/ 0x2 0x4 - /*RXDE*/ 0x3 0x4>; - }; - - POB0: opb { - compatible = "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x20000000 0x20000000 0x30000000 - 0x50000000 0x50000000 0x10000000 - 0x60000000 0x60000000 0x10000000 - 0xFE000000 0xFE000000 0x00010000>; - dcr-reg = <0x100 0x020>; - clock-frequency = <300000000>; /* Filled in by U-Boot */ - - RGMII0: emac-rgmii@400a2000 { - compatible = "ibm,rgmii"; - reg = <0x400a2000 0x00000010>; - has-mdio; - }; - - TAH0: emac-tah@400a3000 { - compatible = "ibm,tah"; - reg = <0x400a3000 0x100>; - }; - - TAH1: emac-tah@400a4000 { - compatible = "ibm,tah"; - reg = <0x400a4000 0x100>; - }; - - EMAC0: ethernet@400a0000 { - compatible = "ibm,emac4", "ibm-emac4sync"; - interrupt-parent = <&EMAC0>; - interrupts = <0x0>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*Status*/ 0x0 &UIC0 0x13 0x4>; - reg = <0x400a0000 0x00000100>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <0x0>; - mal-rx-channel = <0x0>; - cell-index = <0>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - phy-mode = "rgmii"; - phy-address = <0x2>; - turbo = "no"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <0>; - tah-device = <&TAH0>; - tah-channel = <0>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - - EMAC1: ethernet@400a1000 { - compatible = "ibm,emac4", "ibm-emac4sync"; - status = "disabled"; - interrupt-parent = <&EMAC1>; - interrupts = <0x0>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*Status*/ 0x0 &UIC0 0x14 0x4>; - reg = <0x400a1000 0x00000100>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <1>; - mal-rx-channel = <8>; - cell-index = <1>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - phy-mode = "rgmii"; - phy-address = <0x3>; - turbo = "no"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <1>; - tah-device = <&TAH1>; - tah-channel = <0>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - mdio-device = <&EMAC0>; - }; - }; - }; - - chosen { - stdout-path = "/plb/opb/serial@50001000"; - }; -}; diff --git a/arch/powerpc/boot/dts/makalu.dts b/arch/powerpc/boot/dts/makalu.dts deleted file mode 100644 index c473cd911bca..000000000000 --- a/arch/powerpc/boot/dts/makalu.dts +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Device Tree Source for AMCC Makalu (405EX) - * - * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without - * any warranty of any kind, whether express or implied. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "amcc,makalu"; - compatible = "amcc,makalu"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC0; - ethernet1 = &EMAC1; - serial0 = &UART0; - serial1 = &UART1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,405EX"; - reg = <0x00000000>; - clock-frequency = <0>; /* Filled in by U-Boot */ - timebase-frequency = <0>; /* Filled in by U-Boot */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; /* 16 kB */ - d-cache-size = <16384>; /* 16 kB */ - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x00000000>; /* Filled in by U-Boot */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic-405ex", "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - UIC1: interrupt-controller1 { - compatible = "ibm,uic-405ex","ibm,uic"; - interrupt-controller; - cell-index = <1>; - dcr-reg = <0x0d0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - UIC2: interrupt-controller2 { - compatible = "ibm,uic-405ex","ibm,uic"; - interrupt-controller; - cell-index = <2>; - dcr-reg = <0x0e0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1c 0x4 0x1d 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - plb { - compatible = "ibm,plb-405ex", "ibm,plb4"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by U-Boot */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-405ex", "ibm,sdram-4xx-ddr2"; - dcr-reg = <0x010 0x002>; - interrupt-parent = <&UIC2>; - interrupts = <0x5 0x4 /* ECC DED Error */ - 0x6 0x4 /* ECC SEC Error */ >; - }; - - MAL0: mcmal { - compatible = "ibm,mcmal-405ex", "ibm,mcmal2"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <2>; - num-rx-chans = <2>; - interrupt-parent = <&MAL0>; - interrupts = <0x0 0x1 0x2 0x3 0x4>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*TXEOB*/ 0x0 &UIC0 0xa 0x4 - /*RXEOB*/ 0x1 &UIC0 0xb 0x4 - /*SERR*/ 0x2 &UIC1 0x0 0x4 - /*TXDE*/ 0x3 &UIC1 0x1 0x4 - /*RXDE*/ 0x4 &UIC1 0x2 0x4>; - interrupt-map-mask = <0xffffffff>; - }; - - POB0: opb { - compatible = "ibm,opb-405ex", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x80000000 0x80000000 0x10000000 - 0xef600000 0xef600000 0x00a00000 - 0xf0000000 0xf0000000 0x10000000>; - dcr-reg = <0x0a0 0x005>; - clock-frequency = <0>; /* Filled in by U-Boot */ - - EBC0: ebc { - compatible = "ibm,ebc-405ex", "ibm,ebc"; - dcr-reg = <0x012 0x002>; - #address-cells = <2>; - #size-cells = <1>; - clock-frequency = <0>; /* Filled in by U-Boot */ - /* ranges property is supplied by U-Boot */ - interrupts = <0x5 0x1>; - interrupt-parent = <&UIC1>; - - nor_flash@0,0 { - compatible = "amd,s29gl512n", "cfi-flash"; - bank-width = <2>; - reg = <0x00000000 0x00000000 0x04000000>; - #address-cells = <1>; - #size-cells = <1>; - partition@0 { - label = "kernel"; - reg = <0x00000000 0x00200000>; - }; - partition@200000 { - label = "root"; - reg = <0x00200000 0x00200000>; - }; - partition@400000 { - label = "user"; - reg = <0x00400000 0x03b60000>; - }; - partition@3f60000 { - label = "env"; - reg = <0x03f60000 0x00040000>; - }; - partition@3fa0000 { - label = "u-boot"; - reg = <0x03fa0000 0x00060000>; - }; - }; - }; - - UART0: serial@ef600200 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600200 0x00000008>; - virtual-reg = <0xef600200>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1a 0x4>; - }; - - UART1: serial@ef600300 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600300 0x00000008>; - virtual-reg = <0xef600300>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1 0x4>; - }; - - IIC0: i2c@ef600400 { - compatible = "ibm,iic-405ex", "ibm,iic"; - reg = <0xef600400 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x2 0x4>; - }; - - IIC1: i2c@ef600500 { - compatible = "ibm,iic-405ex", "ibm,iic"; - reg = <0xef600500 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x7 0x4>; - }; - - - RGMII0: emac-rgmii@ef600b00 { - compatible = "ibm,rgmii-405ex", "ibm,rgmii"; - reg = <0xef600b00 0x00000104>; - has-mdio; - }; - - EMAC0: ethernet@ef600900 { - linux,network-index = <0x0>; - device_type = "network"; - compatible = "ibm,emac-405ex", "ibm,emac4sync"; - interrupt-parent = <&EMAC0>; - interrupts = <0x0 0x1>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*Status*/ 0x0 &UIC0 0x18 0x4 - /*Wake*/ 0x1 &UIC1 0x1d 0x4>; - reg = <0xef600900 0x000000c4>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <0>; - mal-rx-channel = <0>; - cell-index = <0>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - rx-fifo-size-gige = <16384>; - tx-fifo-size-gige = <16384>; - phy-mode = "rgmii"; - phy-map = <0x0000003f>; /* Start at 6 */ - rgmii-device = <&RGMII0>; - rgmii-channel = <0>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - - EMAC1: ethernet@ef600a00 { - linux,network-index = <0x1>; - device_type = "network"; - compatible = "ibm,emac-405ex", "ibm,emac4sync"; - interrupt-parent = <&EMAC1>; - interrupts = <0x0 0x1>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*Status*/ 0x0 &UIC0 0x19 0x4 - /*Wake*/ 0x1 &UIC1 0x1f 0x4>; - reg = <0xef600a00 0x000000c4>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <1>; - mal-rx-channel = <1>; - cell-index = <1>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - rx-fifo-size-gige = <16384>; - tx-fifo-size-gige = <16384>; - phy-mode = "rgmii"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <1>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - }; - - PCIE0: pcie@a0000000 { - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex"; - primary; - port = <0x0>; /* port number */ - reg = <0xa0000000 0x20000000 /* Config space access */ - 0xef000000 0x00001000>; /* Registers */ - dcr-reg = <0x040 0x020>; - sdr-base = <0x400>; - - /* Outbound ranges, one memory and one IO, - * later cannot be changed - */ - ranges = <0x02000000 0x00000000 0x80000000 0x90000000 0x00000000 0x08000000 - 0x01000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00010000>; - - /* Inbound 2GB range starting at 0 */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>; - - /* This drives busses 0x00 to 0x3f */ - bus-range = <0x0 0x3f>; - - /* Legacy interrupts (note the weird polarity, the bridge seems - * to invert PCIe legacy interrupts). - * We are de-swizzling here because the numbers are actually for - * port of the root complex virtual P2P bridge. But I want - * to avoid putting a node for it in the tree, so the numbers - * below are basically de-swizzled numbers. - * The real slot is on idsel 0, so the swizzling is 1:1 - */ - interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = < - 0x0 0x0 0x0 0x1 &UIC2 0x0 0x4 /* swizzled int A */ - 0x0 0x0 0x0 0x2 &UIC2 0x1 0x4 /* swizzled int B */ - 0x0 0x0 0x0 0x3 &UIC2 0x2 0x4 /* swizzled int C */ - 0x0 0x0 0x0 0x4 &UIC2 0x3 0x4 /* swizzled int D */>; - }; - - PCIE1: pcie@c0000000 { - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex"; - primary; - port = <0x1>; /* port number */ - reg = <0xc0000000 0x20000000 /* Config space access */ - 0xef001000 0x00001000>; /* Registers */ - dcr-reg = <0x060 0x020>; - sdr-base = <0x440>; - - /* Outbound ranges, one memory and one IO, - * later cannot be changed - */ - ranges = <0x02000000 0x00000000 0x80000000 0x98000000 0x00000000 0x08000000 - 0x01000000 0x00000000 0x00000000 0xe0010000 0x00000000 0x00010000>; - - /* Inbound 2GB range starting at 0 */ - dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>; - - /* This drives busses 0x40 to 0x7f */ - bus-range = <0x40 0x7f>; - - /* Legacy interrupts (note the weird polarity, the bridge seems - * to invert PCIe legacy interrupts). - * We are de-swizzling here because the numbers are actually for - * port of the root complex virtual P2P bridge. But I want - * to avoid putting a node for it in the tree, so the numbers - * below are basically de-swizzled numbers. - * The real slot is on idsel 0, so the swizzling is 1:1 - */ - interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = < - 0x0 0x0 0x0 0x1 &UIC2 0xb 0x4 /* swizzled int A */ - 0x0 0x0 0x0 0x2 &UIC2 0xc 0x4 /* swizzled int B */ - 0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */ - 0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>; - }; - }; -}; diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts index 269e930b3b0b..b7eac4e56019 100644 --- a/arch/powerpc/boot/dts/microwatt.dts +++ b/arch/powerpc/boot/dts/microwatt.dts @@ -1,13 +1,15 @@ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> / { #size-cells = <0x02>; #address-cells = <0x02>; - model-name = "microwatt"; + model = "microwatt"; compatible = "microwatt-soc"; aliases { serial0 = &UART0; + ethernet = &enet0; }; reserved-memory { @@ -35,40 +37,79 @@ ibm,powerpc-cpu-features { display-name = "Microwatt"; - isa = <3000>; + isa = <3010>; device_type = "cpu-features"; compatible = "ibm,powerpc-cpu-features"; mmu-radix { isa = <3000>; - usable-privilege = <2>; + usable-privilege = <6>; + os-support = <0>; }; little-endian { - isa = <2050>; - usable-privilege = <3>; + isa = <0>; + usable-privilege = <7>; + os-support = <0>; hwcap-bit-nr = <1>; }; cache-inhibited-large-page { - isa = <2040>; - usable-privilege = <2>; + isa = <0>; + usable-privilege = <6>; + os-support = <0>; }; fixed-point-v3 { isa = <3000>; - usable-privilege = <3>; + usable-privilege = <7>; }; no-execute { - isa = <2010>; + isa = <0x00>; usable-privilege = <2>; + os-support = <0>; }; floating-point { + hfscr-bit-nr = <0>; hwcap-bit-nr = <27>; isa = <0>; - usable-privilege = <3>; + usable-privilege = <7>; + hv-support = <1>; + os-support = <0>; + }; + + prefixed-instructions { + hfscr-bit-nr = <13>; + fscr-bit-nr = <13>; + isa = <3010>; + usable-privilege = <7>; + os-support = <1>; + hv-support = <1>; + }; + + tar { + hfscr-bit-nr = <8>; + fscr-bit-nr = <8>; + isa = <2070>; + usable-privilege = <7>; + os-support = <1>; + hv-support = <1>; + hwcap-bit-nr = <58>; + }; + + control-register { + isa = <0>; + usable-privilege = <7>; + }; + + system-call-vectored { + isa = <3000>; + usable-privilege = <7>; + os-support = <1>; + fscr-bit-nr = <12>; + hwcap-bit-nr = <52>; }; }; @@ -101,6 +142,36 @@ ibm,mmu-lpid-bits = <12>; ibm,mmu-pid-bits = <20>; }; + + PowerPC,Microwatt@1 { + i-cache-sets = <2>; + ibm,dec-bits = <64>; + reservation-granule-size = <64>; + clock-frequency = <100000000>; + timebase-frequency = <100000000>; + i-tlb-sets = <1>; + ibm,ppc-interrupt-server#s = <1>; + i-cache-block-size = <64>; + d-cache-block-size = <64>; + d-cache-sets = <2>; + i-tlb-size = <64>; + cpu-version = <0x990000>; + status = "okay"; + i-cache-size = <0x1000>; + ibm,processor-radix-AP-encodings = <0x0c 0xa0000010 0x20000015 0x4000001e>; + tlb-size = <0>; + tlb-sets = <0>; + device_type = "cpu"; + d-tlb-size = <128>; + d-tlb-sets = <2>; + reg = <1>; + general-purpose; + 64-bit; + d-cache-size = <0x1000>; + ibm,chip-id = <0>; + ibm,mmu-lpid-bits = <12>; + ibm,mmu-pid-bits = <20>; + }; }; soc@c0000000 { @@ -113,8 +184,8 @@ interrupt-controller@4000 { compatible = "openpower,xics-presentation", "ibm,ppc-xicp"; - ibm,interrupt-server-ranges = <0x0 0x1>; - reg = <0x4000 0x100>; + ibm,interrupt-server-ranges = <0x0 0x2>; + reg = <0x4000 0x10 0x4010 0x10>; }; ICS: interrupt-controller@5000 { @@ -138,7 +209,18 @@ interrupts = <0x10 0x1>; }; - ethernet@8020000 { + gpio: gpio@7000 { + device_type = "gpio"; + compatible = "faraday,ftgpio010"; + gpio-controller; + #gpio-cells = <2>; + reg = <0x7000 0x80>; + interrupts = <0x14 1>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + enet0: ethernet@8020000 { compatible = "litex,liteeth"; reg = <0x8021000 0x100 0x8020800 0x100 @@ -160,7 +242,6 @@ reg-names = "phy", "core", "reader", "writer", "irq"; bus-width = <4>; interrupts = <0x13 1>; - cap-sd-highspeed; clocks = <&sys_clk>; }; }; diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts index e09b37d7489d..a89cb3139ca8 100644 --- a/arch/powerpc/boot/dts/mpc8315erdb.dts +++ b/arch/powerpc/boot/dts/mpc8315erdb.dts @@ -6,6 +6,7 @@ */ /dts-v1/; +#include <dt-bindings/interrupt-controller/irq.h> / { compatible = "fsl,mpc8315erdb"; @@ -358,6 +359,15 @@ interrupt-parent = <&ipic>; fsl,mpc8313-wakeup-timer = <>m1>; }; + + gpio: gpio-controller@c00 { + compatible = "fsl,mpc8314-gpio"; + reg = <0xc00 0x100>; + interrupts = <74 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&ipic>; + gpio-controller; + #gpio-cells = <2>; + }; }; pci0: pci@e0008500 { diff --git a/arch/powerpc/boot/dts/obs600.dts b/arch/powerpc/boot/dts/obs600.dts deleted file mode 100644 index d10b0411809b..000000000000 --- a/arch/powerpc/boot/dts/obs600.dts +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Device Tree Source for PlatHome OpenBlockS 600 (405EX) - * - * Copyright 2011 Ben Herrenschmidt, IBM Corp. - * - * Based on Kilauea by: - * - * Copyright 2007-2009 DENX Software Engineering, Stefan Roese <sr@denx.de> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without - * any warranty of any kind, whether express or implied. - */ - -/dts-v1/; - -/ { - #address-cells = <1>; - #size-cells = <1>; - model = "PlatHome,OpenBlockS 600"; - compatible = "plathome,obs600"; - dcr-parent = <&{/cpus/cpu@0}>; - - aliases { - ethernet0 = &EMAC0; - ethernet1 = &EMAC1; - serial0 = &UART0; - serial1 = &UART1; - }; - - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu@0 { - device_type = "cpu"; - model = "PowerPC,405EX"; - reg = <0x00000000>; - clock-frequency = <0>; /* Filled in by U-Boot */ - timebase-frequency = <0>; /* Filled in by U-Boot */ - i-cache-line-size = <32>; - d-cache-line-size = <32>; - i-cache-size = <16384>; /* 16 kB */ - d-cache-size = <16384>; /* 16 kB */ - dcr-controller; - dcr-access-method = "native"; - }; - }; - - memory { - device_type = "memory"; - reg = <0x00000000 0x00000000>; /* Filled in by U-Boot */ - }; - - UIC0: interrupt-controller { - compatible = "ibm,uic-405ex", "ibm,uic"; - interrupt-controller; - cell-index = <0>; - dcr-reg = <0x0c0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - }; - - UIC1: interrupt-controller1 { - compatible = "ibm,uic-405ex","ibm,uic"; - interrupt-controller; - cell-index = <1>; - dcr-reg = <0x0d0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - UIC2: interrupt-controller2 { - compatible = "ibm,uic-405ex","ibm,uic"; - interrupt-controller; - cell-index = <2>; - dcr-reg = <0x0e0 0x009>; - #address-cells = <0>; - #size-cells = <0>; - #interrupt-cells = <2>; - interrupts = <0x1c 0x4 0x1d 0x4>; /* cascade */ - interrupt-parent = <&UIC0>; - }; - - CPM0: cpm { - compatible = "ibm,cpm"; - dcr-access-method = "native"; - dcr-reg = <0x0b0 0x003>; - unused-units = <0x00000000>; - idle-doze = <0x02000000>; - standby = <0xe3e74800>; - }; - - plb { - compatible = "ibm,plb-405ex", "ibm,plb4"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - clock-frequency = <0>; /* Filled in by U-Boot */ - - SDRAM0: memory-controller { - compatible = "ibm,sdram-405ex", "ibm,sdram-4xx-ddr2"; - dcr-reg = <0x010 0x002>; - interrupt-parent = <&UIC2>; - interrupts = <0x5 0x4 /* ECC DED Error */ - 0x6 0x4>; /* ECC SEC Error */ - }; - - CRYPTO: crypto@ef700000 { - compatible = "amcc,ppc405ex-crypto", "amcc,ppc4xx-crypto"; - reg = <0xef700000 0x80400>; - interrupt-parent = <&UIC0>; - interrupts = <0x17 0x2>; - }; - - MAL0: mcmal { - compatible = "ibm,mcmal-405ex", "ibm,mcmal2"; - dcr-reg = <0x180 0x062>; - num-tx-chans = <2>; - num-rx-chans = <2>; - interrupt-parent = <&MAL0>; - interrupts = <0x0 0x1 0x2 0x3 0x4>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*TXEOB*/ 0x0 &UIC0 0xa 0x4 - /*RXEOB*/ 0x1 &UIC0 0xb 0x4 - /*SERR*/ 0x2 &UIC1 0x0 0x4 - /*TXDE*/ 0x3 &UIC1 0x1 0x4 - /*RXDE*/ 0x4 &UIC1 0x2 0x4>; - interrupt-map-mask = <0xffffffff>; - }; - - POB0: opb { - compatible = "ibm,opb-405ex", "ibm,opb"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x80000000 0x80000000 0x10000000 - 0xef600000 0xef600000 0x00a00000 - 0xf0000000 0xf0000000 0x10000000>; - dcr-reg = <0x0a0 0x005>; - clock-frequency = <0>; /* Filled in by U-Boot */ - - EBC0: ebc { - compatible = "ibm,ebc-405ex", "ibm,ebc"; - dcr-reg = <0x012 0x002>; - #address-cells = <2>; - #size-cells = <1>; - clock-frequency = <0>; /* Filled in by U-Boot */ - /* ranges property is supplied by U-Boot */ - interrupts = <0x5 0x1>; - interrupt-parent = <&UIC1>; - - nor_flash@0,0 { - compatible = "amd,s29gl512n", "cfi-flash"; - bank-width = <2>; - reg = <0x00000000 0x00000000 0x08000000>; - #address-cells = <1>; - #size-cells = <1>; - partition@0 { - label = "kernel + initrd"; - reg = <0x00000000 0x03de0000>; - }; - partition@3de0000 { - label = "user config area"; - reg = <0x03de0000 0x00080000>; - }; - partition@3e60000 { - label = "user program area"; - reg = <0x03e60000 0x04000000>; - }; - partition@7e60000 { - label = "flat device tree"; - reg = <0x07e60000 0x00080000>; - }; - partition@7ee0000 { - label = "test program"; - reg = <0x07ee0000 0x00080000>; - }; - partition@7f60000 { - label = "u-boot env"; - reg = <0x07f60000 0x00040000>; - }; - partition@7fa0000 { - label = "u-boot"; - reg = <0x07fa0000 0x00060000>; - }; - }; - }; - - UART0: serial@ef600200 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600200 0x00000008>; - virtual-reg = <0xef600200>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1a 0x4>; - }; - - UART1: serial@ef600300 { - device_type = "serial"; - compatible = "ns16550"; - reg = <0xef600300 0x00000008>; - virtual-reg = <0xef600300>; - clock-frequency = <0>; /* Filled in by U-Boot */ - current-speed = <0>; - interrupt-parent = <&UIC0>; - interrupts = <0x1 0x4>; - }; - - IIC0: i2c@ef600400 { - compatible = "ibm,iic-405ex", "ibm,iic"; - reg = <0xef600400 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x2 0x4>; - #address-cells = <1>; - #size-cells = <0>; - - rtc@68 { - compatible = "dallas,ds1340"; - reg = <0x68>; - }; - }; - - IIC1: i2c@ef600500 { - compatible = "ibm,iic-405ex", "ibm,iic"; - reg = <0xef600500 0x00000014>; - interrupt-parent = <&UIC0>; - interrupts = <0x7 0x4>; - }; - - RGMII0: emac-rgmii@ef600b00 { - compatible = "ibm,rgmii-405ex", "ibm,rgmii"; - reg = <0xef600b00 0x00000104>; - has-mdio; - }; - - EMAC0: ethernet@ef600900 { - linux,network-index = <0x0>; - device_type = "network"; - compatible = "ibm,emac-405ex", "ibm,emac4sync"; - interrupt-parent = <&EMAC0>; - interrupts = <0x0 0x1>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*Status*/ 0x0 &UIC0 0x18 0x4 - /*Wake*/ 0x1 &UIC1 0x1d 0x4>; - reg = <0xef600900 0x000000c4>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <0>; - mal-rx-channel = <0>; - cell-index = <0>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - rx-fifo-size-gige = <16384>; - tx-fifo-size-gige = <16384>; - phy-mode = "rgmii"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <0>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - - EMAC1: ethernet@ef600a00 { - linux,network-index = <0x1>; - device_type = "network"; - compatible = "ibm,emac-405ex", "ibm,emac4sync"; - interrupt-parent = <&EMAC1>; - interrupts = <0x0 0x1>; - #interrupt-cells = <1>; - #address-cells = <0>; - #size-cells = <0>; - interrupt-map = </*Status*/ 0x0 &UIC0 0x19 0x4 - /*Wake*/ 0x1 &UIC1 0x1f 0x4>; - reg = <0xef600a00 0x000000c4>; - local-mac-address = [000000000000]; /* Filled in by U-Boot */ - mal-device = <&MAL0>; - mal-tx-channel = <1>; - mal-rx-channel = <1>; - cell-index = <1>; - max-frame-size = <9000>; - rx-fifo-size = <4096>; - tx-fifo-size = <2048>; - rx-fifo-size-gige = <16384>; - tx-fifo-size-gige = <16384>; - phy-mode = "rgmii"; - phy-map = <0x00000000>; - rgmii-device = <&RGMII0>; - rgmii-channel = <1>; - has-inverted-stacr-oc; - has-new-stacr-staopc; - }; - - GPIO: gpio@ef600800 { - device_type = "gpio"; - compatible = "ibm,gpio-405ex", "ibm,ppc4xx-gpio"; - reg = <0xef600800 0x50>; - }; - }; - }; - chosen { - stdout-path = "/plb/opb/serial@ef600200"; - }; -}; diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index cae31a6e8f02..2c0e2a1cab01 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c @@ -188,7 +188,7 @@ static inline void prep_esm_blob(struct addr_range vmlinux, void *chosen) { } /* A buffer that may be edited by tools operating on a zImage binary so as to * edit the command line passed to vmlinux (by setting /chosen/bootargs). - * The buffer is put in it's own section so that tools may locate it easier. + * The buffer is put in its own section so that tools may locate it easier. */ static char cmdline[BOOT_COMMAND_LINE_SIZE] __attribute__((__section__("__builtin_cmdline"))); diff --git a/arch/powerpc/boot/ppcboot-hotfoot.h b/arch/powerpc/boot/ppcboot-hotfoot.h deleted file mode 100644 index 4728db95f58a..000000000000 --- a/arch/powerpc/boot/ppcboot-hotfoot.h +++ /dev/null @@ -1,119 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * This interface is used for compatibility with old U-boots *ONLY*. - * Please do not imitate or extend this. - */ - -/* - * Unfortunately, the ESTeem Hotfoot board uses a mangled version of - * ppcboot.h for historical reasons, and in the interest of having a - * mainline kernel boot on the production board+bootloader, this was the - * least-offensive solution. Please direct all flames to: - * - * Solomon Peachy <solomon@linux-wlan.com> - * - * (This header is identical to ppcboot.h except for the - * TARGET_HOTFOOT bits) - */ - -/* - * (C) Copyright 2000, 2001 - * Wolfgang Denk, DENX Software Engineering, wd@denx.de. - */ - -#ifndef __PPCBOOT_H__ -#define __PPCBOOT_H__ - -/* - * Board information passed to kernel from PPCBoot - * - * include/asm-ppc/ppcboot.h - */ - -#include "types.h" - -typedef struct bd_info { - unsigned long bi_memstart; /* start of DRAM memory */ - unsigned long bi_memsize; /* size of DRAM memory in bytes */ - unsigned long bi_flashstart; /* start of FLASH memory */ - unsigned long bi_flashsize; /* size of FLASH memory */ - unsigned long bi_flashoffset; /* reserved area for startup monitor */ - unsigned long bi_sramstart; /* start of SRAM memory */ - unsigned long bi_sramsize; /* size of SRAM memory */ -#if defined(TARGET_8xx) || defined(TARGET_CPM2) || defined(TARGET_85xx) ||\ - defined(TARGET_83xx) - unsigned long bi_immr_base; /* base of IMMR register */ -#endif -#if defined(TARGET_PPC_MPC52xx) - unsigned long bi_mbar_base; /* base of internal registers */ -#endif - unsigned long bi_bootflags; /* boot / reboot flag (for LynxOS) */ - unsigned long bi_ip_addr; /* IP Address */ - unsigned char bi_enetaddr[6]; /* Ethernet address */ -#if defined(TARGET_HOTFOOT) - /* second onboard ethernet port */ - unsigned char bi_enet1addr[6]; -#define HAVE_ENET1ADDR -#endif /* TARGET_HOOTFOOT */ - unsigned short bi_ethspeed; /* Ethernet speed in Mbps */ - unsigned long bi_intfreq; /* Internal Freq, in MHz */ - unsigned long bi_busfreq; /* Bus Freq, in MHz */ -#if defined(TARGET_CPM2) - unsigned long bi_cpmfreq; /* CPM_CLK Freq, in MHz */ - unsigned long bi_brgfreq; /* BRG_CLK Freq, in MHz */ - unsigned long bi_sccfreq; /* SCC_CLK Freq, in MHz */ - unsigned long bi_vco; /* VCO Out from PLL, in MHz */ -#endif -#if defined(TARGET_PPC_MPC52xx) - unsigned long bi_ipbfreq; /* IPB Bus Freq, in MHz */ - unsigned long bi_pcifreq; /* PCI Bus Freq, in MHz */ -#endif - unsigned long bi_baudrate; /* Console Baudrate */ -#if defined(TARGET_4xx) - unsigned char bi_s_version[4]; /* Version of this structure */ - unsigned char bi_r_version[32]; /* Version of the ROM (IBM) */ - unsigned int bi_procfreq; /* CPU (Internal) Freq, in Hz */ - unsigned int bi_plb_busfreq; /* PLB Bus speed, in Hz */ - unsigned int bi_pci_busfreq; /* PCI Bus speed, in Hz */ - unsigned char bi_pci_enetaddr[6]; /* PCI Ethernet MAC address */ -#endif -#if defined(TARGET_HOTFOOT) - unsigned int bi_pllouta_freq; /* PLL OUTA speed, in Hz */ -#endif -#if defined(TARGET_HYMOD) - hymod_conf_t bi_hymod_conf; /* hymod configuration information */ -#endif -#if defined(TARGET_EVB64260) || defined(TARGET_405EP) || defined(TARGET_44x) || \ - defined(TARGET_85xx) || defined(TARGET_83xx) || defined(TARGET_HAS_ETH1) - /* second onboard ethernet port */ - unsigned char bi_enet1addr[6]; -#define HAVE_ENET1ADDR -#endif -#if defined(TARGET_EVB64260) || defined(TARGET_440GX) || \ - defined(TARGET_85xx) || defined(TARGET_HAS_ETH2) - /* third onboard ethernet ports */ - unsigned char bi_enet2addr[6]; -#define HAVE_ENET2ADDR -#endif -#if defined(TARGET_440GX) || defined(TARGET_HAS_ETH3) - /* fourth onboard ethernet ports */ - unsigned char bi_enet3addr[6]; -#define HAVE_ENET3ADDR -#endif -#if defined(TARGET_HOTFOOT) - int bi_phynum[2]; /* Determines phy mapping */ - int bi_phymode[2]; /* Determines phy mode */ -#endif -#if defined(TARGET_4xx) - unsigned int bi_opbfreq; /* OB clock in Hz */ - int bi_iic_fast[2]; /* Use fast i2c mode */ -#endif -#if defined(TARGET_440GX) - int bi_phynum[4]; /* phy mapping */ - int bi_phymode[4]; /* phy mode */ -#endif -} bd_t; - -#define bi_tbfreq bi_intfreq - -#endif /* __PPCBOOT_H__ */ diff --git a/arch/powerpc/boot/ppcboot.h b/arch/powerpc/boot/ppcboot.h index a78b0b257698..90c8f452fe6e 100644 --- a/arch/powerpc/boot/ppcboot.h +++ b/arch/powerpc/boot/ppcboot.h @@ -63,7 +63,7 @@ typedef struct bd_info { #if defined(TARGET_HYMOD) hymod_conf_t bi_hymod_conf; /* hymod configuration information */ #endif -#if defined(TARGET_EVB64260) || defined(TARGET_405EP) || defined(TARGET_44x) || \ +#if defined(TARGET_EVB64260) || defined(TARGET_44x) || \ defined(TARGET_85xx) || defined(TARGET_83xx) || defined(TARGET_HAS_ETH1) /* second onboard ethernet port */ unsigned char bi_enet1addr[6]; diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c index f157717ae814..89ff46b8b225 100644 --- a/arch/powerpc/boot/ps3.c +++ b/arch/powerpc/boot/ps3.c @@ -25,7 +25,7 @@ BSS_STACK(4096); /* A buffer that may be edited by tools operating on a zImage binary so as to * edit the command line passed to vmlinux (by setting /chosen/bootargs). - * The buffer is put in it's own section so that tools may locate it easier. + * The buffer is put in its own section so that tools may locate it easier. */ static char cmdline[BOOT_COMMAND_LINE_SIZE] diff --git a/arch/powerpc/boot/rs6000.h b/arch/powerpc/boot/rs6000.h index a9d879155ef9..16df8f3c43f1 100644 --- a/arch/powerpc/boot/rs6000.h +++ b/arch/powerpc/boot/rs6000.h @@ -1,11 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* IBM RS/6000 "XCOFF" file definitions for BFD. Copyright (C) 1990, 1991 Free Software Foundation, Inc. - FIXME: Can someone provide a transliteration of this name into ASCII? - Using the following chars caused a compiler warning on HIUX (so I replaced - them with octal escapes), and isn't useful without an understanding of what - character set it is. - Written by Mimi Ph\373\364ng-Th\345o V\365 of IBM + Written by Mimi Phuong-Thao Vo of IBM and John Gilmore of Cygnus Support. */ /********************** FILE HEADER **********************/ diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index 352d7de24018..3d8dc822282a 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -234,10 +234,8 @@ fi # suppress some warnings in recent ld versions nowarn="-z noexecstack" -if ! ld_is_lld; then - if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then - nowarn="$nowarn --no-warn-rwx-segments" - fi +if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then + nowarn="$nowarn --no-warn-rwx-segments" fi platformo=$object/"$platform".o @@ -271,11 +269,6 @@ pseries) fi make_space=n ;; -maple) - platformo="$object/of.o $object/epapr.o" - link_address='0x400000' - make_space=n - ;; pmac|chrp) platformo="$object/of.o $object/epapr.o" make_space=n @@ -337,7 +330,7 @@ ps3) make_space=n pie= ;; -ep88xc|ep405|ep8248e) +ep88xc|ep8248e) platformo="$object/fixed-head.o $object/$platform.o" binary=y ;; @@ -468,26 +461,6 @@ uboot) fi exit 0 ;; -uboot-obs600) - rm -f "$ofile" - # obs600 wants a multi image with an initrd, so we need to put a fake - # one in even when building a "normal" image. - if [ -n "$initrd" ]; then - real_rd="$initrd" - else - real_rd=`mktemp` - echo "\0" >>"$real_rd" - fi - ${MKIMAGE} -A ppc -O linux -T multi -C gzip -a $membase -e $membase \ - $uboot_version -d "$vmz":"$real_rd":"$dtb" "$ofile" - if [ -z "$initrd" ]; then - rm -f "$real_rd" - fi - if [ -z "$cacheit" ]; then - rm -f "$vmz" - fi - exit 0 - ;; esac addsec() { @@ -537,7 +510,7 @@ fi # post-processing needed for some platforms case "$platform" in -pseries|chrp|maple) +pseries|chrp) $objbin/addnote "$ofile" ;; coff) diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h index ebfadd39e192..9506a96ebbcc 100644 --- a/arch/powerpc/boot/xz_config.h +++ b/arch/powerpc/boot/xz_config.h @@ -50,11 +50,8 @@ static inline void put_unaligned_be32(u32 val, void *p) /* prevent the inclusion of the xz-preboot MM headers */ #define DECOMPR_MM_H #define memmove memmove -#define XZ_EXTERN static /* xz.h needs to be included directly since we need enum xz_mode */ #include "../../../include/linux/xz.h" -#undef XZ_EXTERN - #endif diff --git a/arch/powerpc/configs/40x.config b/arch/powerpc/configs/40x.config deleted file mode 100644 index 82a9d58ddb81..000000000000 --- a/arch/powerpc/configs/40x.config +++ /dev/null @@ -1,2 +0,0 @@ -CONFIG_PPC64=n -CONFIG_40x=y diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig deleted file mode 100644 index 25eed86ec528..000000000000 --- a/arch/powerpc/configs/40x/acadia_defconfig +++ /dev/null @@ -1,61 +0,0 @@ -CONFIG_40x=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_ACADIA=y -CONFIG_PCI=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_IPV6 is not set -CONFIG_CONNECTOR=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=m -CONFIG_MTD_CFI=y -CONFIG_MTD_JEDECPROBE=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_NETDEVICES=y -CONFIG_IBM_EMAC=y -CONFIG_IBM_EMAC_RXB=256 -CONFIG_IBM_EMAC_TXB=256 -CONFIG_IBM_EMAC_DEBUG=y -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -CONFIG_THERMAL=y -# CONFIG_USB_SUPPORT is not set -CONFIG_EXT2_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig deleted file mode 100644 index 3549c9e950e8..000000000000 --- a/arch/powerpc/configs/40x/kilauea_defconfig +++ /dev/null @@ -1,69 +0,0 @@ -CONFIG_40x=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_KILAUEA=y -CONFIG_PCI=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_IPV6 is not set -CONFIG_CONNECTOR=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y -CONFIG_MTD_JEDECPROBE=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_RAW_NAND=y -CONFIG_MTD_NAND_NDFC=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_NETDEVICES=y -CONFIG_IBM_EMAC=y -CONFIG_IBM_EMAC_RXB=256 -CONFIG_IBM_EMAC_TXB=256 -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_IBM_IIC=y -CONFIG_SENSORS_LM75=y -CONFIG_THERMAL=y -# CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_DS1307=y -CONFIG_EXT2_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig deleted file mode 100644 index a974d1e945cc..000000000000 --- a/arch/powerpc/configs/40x/klondike_defconfig +++ /dev/null @@ -1,43 +0,0 @@ -CONFIG_40x=y -CONFIG_SYSVIPC=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED=y -CONFIG_SYSFS_DEPRECATED_V2=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_APM8018X=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_MATH_EMULATION=y -# CONFIG_SUSPEND is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_SAS_ATTRS=y -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -# CONFIG_UNIX98_PTYS is not set -# CONFIG_LEGACY_PTYS is not set -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -# CONFIG_USB_SUPPORT is not set -# CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT2_FS=y -CONFIG_EXT4_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_UTF8=y -CONFIG_MAGIC_SYSRQ=y -# CONFIG_SCHED_DEBUG is not set -# CONFIG_DEBUG_BUGVERBOSE is not set -# CONFIG_FTRACE is not set diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig deleted file mode 100644 index 4563f88acf0c..000000000000 --- a/arch/powerpc/configs/40x/makalu_defconfig +++ /dev/null @@ -1,59 +0,0 @@ -CONFIG_40x=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_MAKALU=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_IPV6 is not set -CONFIG_CONNECTOR=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=m -CONFIG_MTD_CFI=y -CONFIG_MTD_JEDECPROBE=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_NETDEVICES=y -CONFIG_IBM_EMAC=y -CONFIG_IBM_EMAC_RXB=256 -CONFIG_IBM_EMAC_TXB=256 -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -CONFIG_THERMAL=y -# CONFIG_USB_SUPPORT is not set -CONFIG_EXT2_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig deleted file mode 100644 index 2a2bb3f46847..000000000000 --- a/arch/powerpc/configs/40x/obs600_defconfig +++ /dev/null @@ -1,69 +0,0 @@ -CONFIG_40x=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_OBS600=y -CONFIG_MATH_EMULATION=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_IPV6 is not set -CONFIG_CONNECTOR=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y -CONFIG_MTD_JEDECPROBE=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_RAW_NAND=y -CONFIG_MTD_NAND_NDFC=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_NETDEVICES=y -CONFIG_IBM_EMAC=y -CONFIG_IBM_EMAC_RXB=256 -CONFIG_IBM_EMAC_TXB=256 -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_IBM_IIC=y -CONFIG_SENSORS_LM75=y -CONFIG_THERMAL=y -# CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_DS1307=y -CONFIG_EXT2_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig deleted file mode 100644 index 9eaaf1a1d2c6..000000000000 --- a/arch/powerpc/configs/40x/walnut_defconfig +++ /dev/null @@ -1,55 +0,0 @@ -CONFIG_40x=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_IPV6 is not set -CONFIG_CONNECTOR=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=m -CONFIG_MTD_CFI=y -CONFIG_MTD_JEDECPROBE=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_NETDEVICES=y -CONFIG_IBM_EMAC=y -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -CONFIG_THERMAL=y -CONFIG_EXT2_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig index 2479ab62d12f..98221bda380d 100644 --- a/arch/powerpc/configs/44x/sam440ep_defconfig +++ b/arch/powerpc/configs/44x/sam440ep_defconfig @@ -91,5 +91,4 @@ CONFIG_AFFS_FS=m # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig index 20891c413149..5757625469c4 100644 --- a/arch/powerpc/configs/44x/warp_defconfig +++ b/arch/powerpc/configs/44x/warp_defconfig @@ -85,8 +85,6 @@ CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_15=y CONFIG_NLS_UTF8=y -CONFIG_CRC_CCITT=y -CONFIG_CRC_T10DIF=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_FS=y diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig index 1715ff547442..b99caba8724a 100644 --- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig @@ -73,6 +73,5 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_8=y CONFIG_NLS_ISO8859_1=y -CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig index e65c0057147f..11163052fdba 100644 --- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig @@ -80,5 +80,4 @@ CONFIG_TMPFS=y CONFIG_NFS_FS=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y -CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig index 17714bf0ed40..312d39e4242c 100644 --- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig @@ -72,5 +72,4 @@ CONFIG_TMPFS=y CONFIG_NFS_FS=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y -CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig index 58fae5131fa7..ac27f99faab8 100644 --- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig @@ -75,6 +75,5 @@ CONFIG_TMPFS=y CONFIG_NFS_FS=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y -CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config index 524db76f47b7..8aff83217397 100644 --- a/arch/powerpc/configs/85xx-hw.config +++ b/arch/powerpc/configs/85xx-hw.config @@ -24,6 +24,7 @@ CONFIG_FS_ENET=y CONFIG_FSL_CORENET_CF=y CONFIG_FSL_DMA=y CONFIG_FSL_HV_MANAGER=y +CONFIG_FSL_IFC=y CONFIG_FSL_PQ_MDIO=y CONFIG_FSL_RIO=y CONFIG_FSL_XGMAC_MDIO=y @@ -58,6 +59,7 @@ CONFIG_INPUT_FF_MEMLESS=m CONFIG_MARVELL_PHY=y CONFIG_MDIO_BUS_MUX_GPIO=y CONFIG_MDIO_BUS_MUX_MMIOREG=y +CONFIG_MEMORY=y CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI=y diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig index da6fc203e2dc..7beb36a41d45 100644 --- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig +++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig @@ -221,9 +221,6 @@ CONFIG_NLS_ISO8859_15=y CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=y -CONFIG_CRC_CCITT=y -CONFIG_CRC_T10DIF=y -CONFIG_LIBCRC32C=y CONFIG_MAGIC_SYSRQ=y CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_MD5=y diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig index e7080497048d..0a42072fa23c 100644 --- a/arch/powerpc/configs/85xx/stx_gp3_defconfig +++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig @@ -60,8 +60,6 @@ CONFIG_CRAMFS=m CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_NLS=y -CONFIG_CRC_CCITT=y -CONFIG_CRC_T10DIF=m CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_BDI_SWITCH=y diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig index 3a6381aa9fdc..488d03ae6d6c 100644 --- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig +++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig @@ -132,7 +132,6 @@ CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_CRC_T10DIF=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_CRYPTO_HMAC=y diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config index 0cb24b33c88e..e7bd265fae5a 100644 --- a/arch/powerpc/configs/86xx-hw.config +++ b/arch/powerpc/configs/86xx-hw.config @@ -5,7 +5,6 @@ CONFIG_BROADCOM_PHY=y # CONFIG_CARDBUS is not set CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_ST=y -CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_HMAC=y CONFIG_DS1682=y CONFIG_EEPROM_LEGACY=y diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig index 7f35d5bc1229..3c6445c98a85 100644 --- a/arch/powerpc/configs/adder875_defconfig +++ b/arch/powerpc/configs/adder875_defconfig @@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y # CONFIG_ELF_CORE is not set -# CONFIG_BASE_FULL is not set +CONFIG_BASE_SMALL=y # CONFIG_FUTEX is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_BLK_DEV_BSG is not set @@ -44,7 +44,6 @@ CONFIG_TMPFS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -CONFIG_CRC32_SLICEBY4=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig index 200bb1ecb560..69ef3dc31c4b 100644 --- a/arch/powerpc/configs/amigaone_defconfig +++ b/arch/powerpc/configs/amigaone_defconfig @@ -106,7 +106,6 @@ CONFIG_TMPFS=y CONFIG_AFFS_FS=m CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=m -CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index 53f43a34e1a9..3347192b77b8 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig @@ -25,7 +25,6 @@ CONFIG_PS3_DISK=y CONFIG_PS3_ROM=m CONFIG_PS3_FLASH=m CONFIG_PS3_LPM=m -CONFIG_PPC_IBM_CELL_BLADE=y CONFIG_RTAS_FLASH=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -133,7 +132,6 @@ CONFIG_SKGE=m CONFIG_SKY2=m CONFIG_GELIC_NET=m CONFIG_GELIC_WIRELESS=y -CONFIG_SPIDER_NET=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO_I8042 is not set @@ -168,7 +166,6 @@ CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y CONFIG_EDAC=y -CONFIG_EDAC_CELL=y CONFIG_UIO=m CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig index fb314f75ad4b..b799c95480ae 100644 --- a/arch/powerpc/configs/chrp32_defconfig +++ b/arch/powerpc/configs/chrp32_defconfig @@ -110,7 +110,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=m -CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig index a98ef6a4abef..354180ab94bc 100644 --- a/arch/powerpc/configs/ep88xc_defconfig +++ b/arch/powerpc/configs/ep88xc_defconfig @@ -6,7 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y # CONFIG_ELF_CORE is not set -# CONFIG_BASE_FULL is not set +CONFIG_BASE_SMALL=y # CONFIG_FUTEX is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_BLK_DEV_BSG is not set @@ -47,7 +47,6 @@ CONFIG_TMPFS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -CONFIG_CRC32_SLICEBY4=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config index 3009b0efaf34..2f81bc2d819e 100644 --- a/arch/powerpc/configs/fsl-emb-nonhw.config +++ b/arch/powerpc/configs/fsl-emb-nonhw.config @@ -15,7 +15,6 @@ CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_SCHED=y CONFIG_CGROUPS=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_CRC_T10DIF=y CONFIG_CPUSETS=y CONFIG_CRAMFS=y CONFIG_CRYPTO_MD4=y @@ -112,7 +111,6 @@ CONFIG_QNX4FS_FS=m CONFIG_RCU_TRACE=y CONFIG_RESET_CONTROLLER=y CONFIG_ROOT_NFS=y -CONFIG_SYSV_FS=m CONFIG_SYSVIPC=y CONFIG_TMPFS=y CONFIG_UBIFS_FS=y diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 9215bed53291..428f17b45513 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -231,12 +231,11 @@ CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_15=y CONFIG_NLS_UTF8=y -CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y CONFIG_BOOTX_TEXT=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MICHAEL_MIC=m diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig index d77eeb525366..cdd99657b71b 100644 --- a/arch/powerpc/configs/gamecube_defconfig +++ b/arch/powerpc/configs/gamecube_defconfig @@ -82,7 +82,6 @@ CONFIG_ROOT_NFS=y CONFIG_CIFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_CRC_CCITT=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig index fa707de761be..b564f9e33a0d 100644 --- a/arch/powerpc/configs/linkstation_defconfig +++ b/arch/powerpc/configs/linkstation_defconfig @@ -125,8 +125,6 @@ CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_932=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_UTF8=m -CONFIG_CRC_CCITT=m -CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig deleted file mode 100644 index c821a97f4a89..000000000000 --- a/arch/powerpc/configs/maple_defconfig +++ /dev/null @@ -1,111 +0,0 @@ -CONFIG_PPC64=y -CONFIG_SMP=y -CONFIG_NR_CPUS=4 -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_COMPAT_BRK is not set -CONFIG_PROFILING=y -CONFIG_KPROBES=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y -# CONFIG_PPC_POWERNV is not set -# CONFIG_PPC_PSERIES is not set -# CONFIG_PPC_PMAC is not set -CONFIG_PPC_MAPLE=y -CONFIG_UDBG_RTAS_CONSOLE=y -CONFIG_GEN_RTC=y -CONFIG_KEXEC=y -CONFIG_IRQ_ALL_CPUS=y -CONFIG_PPC_4K_PAGES=y -CONFIG_PCI_MSI=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -# CONFIG_IPV6 is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -# CONFIG_SCSI_PROC_FS is not set -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_IPR=y -CONFIG_ATA=y -CONFIG_PATA_AMD=y -CONFIG_ATA_GENERIC=y -CONFIG_NETDEVICES=y -CONFIG_AMD8111_ETH=y -CONFIG_TIGON3=y -CONFIG_E1000=y -CONFIG_USB_PEGASUS=y -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_SERIO is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_HVC_RTAS=y -# CONFIG_HW_RANDOM is not set -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_AMD8111=y -# CONFIG_VGA_CONSOLE is not set -CONFIG_HID_GYRATION=y -CONFIG_HID_PANTHERLORD=y -CONFIG_HID_PETALYNX=y -CONFIG_HID_SAMSUNG=y -CONFIG_HID_SUNPLUS=y -CONFIG_USB=y -CONFIG_USB_MON=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -# CONFIG_USB_EHCI_HCD_PPC_OF is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_UHCI_HCD=y -CONFIG_USB_SERIAL=y -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_CYPRESS_M8=m -CONFIG_USB_SERIAL_GARMIN=m -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_KEYSPAN=y -CONFIG_USB_SERIAL_TI=m -CONFIG_EXT2_FS=y -CONFIG_EXT4_FS=y -CONFIG_FS_DAX=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_HUGETLBFS=y -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_ROOT_NFS=y -CONFIG_NLS_DEFAULT="utf-8" -CONFIG_NLS_UTF8=y -CONFIG_CRC_CCITT=y -CONFIG_CRC_T10DIF=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_STACK_USAGE=y -CONFIG_DEBUG_STACKOVERFLOW=y -CONFIG_XMON=y -CONFIG_XMON_DEFAULT=y -CONFIG_BOOTX_TEXT=y -CONFIG_CRYPTO_ECB=m -CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_HW is not set -CONFIG_PRINTK_TIME=y diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig index 83c4710017e9..a815d9e5e3e8 100644 --- a/arch/powerpc/configs/mpc83xx_defconfig +++ b/arch/powerpc/configs/mpc83xx_defconfig @@ -97,7 +97,6 @@ CONFIG_TMPFS=y CONFIG_NFS_FS=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y -CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_SHA512=y diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig index 5c56d36cdfc5..dfbdd5e8e108 100644 --- a/arch/powerpc/configs/mpc866_ads_defconfig +++ b/arch/powerpc/configs/mpc866_ads_defconfig @@ -6,7 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y # CONFIG_BUG is not set -# CONFIG_BASE_FULL is not set +CONFIG_BASE_SMALL=y # CONFIG_EPOLL is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_BLK_DEV_BSG is not set @@ -38,5 +38,3 @@ CONFIG_TMPFS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -CONFIG_CRC_CCITT=y -CONFIG_CRC32_SLICEBY4=y diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 56b876e418e9..9bc2758a6a9a 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -7,7 +7,7 @@ CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y # CONFIG_ELF_CORE is not set -# CONFIG_BASE_FULL is not set +CONFIG_BASE_SMALL=y # CONFIG_FUTEX is not set CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set @@ -70,7 +70,6 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_CRYPTO=y CONFIG_CRYPTO_DEV_TALITOS=y -CONFIG_CRC32_SLICEBY4=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y @@ -78,4 +77,4 @@ CONFIG_DEBUG_VM_PGTABLE=y CONFIG_DETECT_HUNG_TASK=y CONFIG_BDI_SWITCH=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_GENERIC_PTDUMP=y +CONFIG_PTDUMP_DEBUGFS=y diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig index d1c7fd5bf34b..fa2b3b9c5945 100644 --- a/arch/powerpc/configs/mvme5100_defconfig +++ b/arch/powerpc/configs/mvme5100_defconfig @@ -107,8 +107,6 @@ CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_CODEPAGE_932=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_UTF8=m -CONFIG_CRC_CCITT=m -CONFIG_CRC_T10DIF=y CONFIG_XZ_DEC=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 61993944db40..8bbf51b38480 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -159,7 +159,6 @@ CONFIG_NFSD=y CONFIG_NFSD_V4=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_CRC_CCITT=y CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 57ded82c2840..ae45f70b29f0 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -87,7 +87,6 @@ CONFIG_IP_NF_RAW=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_IP_DCCP=m CONFIG_BT=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y @@ -208,6 +207,7 @@ CONFIG_FB_ATY=y CONFIG_FB_ATY_CT=y CONFIG_FB_ATY_GX=y CONFIG_FB_3DFX=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y @@ -275,7 +275,6 @@ CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m -CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index ee84ade7a033..379229c982a4 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -317,11 +317,9 @@ CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_CRC32C_VPMSUM=m -CONFIG_CRYPTO_CRCT10DIF_VPMSUM=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA1_PPC=m diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig deleted file mode 100644 index 7e48693775f4..000000000000 --- a/arch/powerpc/configs/ppc40x_defconfig +++ /dev/null @@ -1,74 +0,0 @@ -CONFIG_40x=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PPC4xx_GPIO=y -CONFIG_ACADIA=y -CONFIG_HOTFOOT=y -CONFIG_KILAUEA=y -CONFIG_MAKALU=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_CONNECTOR=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=m -CONFIG_MTD_CFI=y -CONFIG_MTD_JEDECPROBE=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_GLUEBI=m -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=35000 -CONFIG_NETDEVICES=y -CONFIG_IBM_EMAC=y -# CONFIG_INPUT is not set -CONFIG_SERIO=m -# CONFIG_SERIO_I8042 is not set -# CONFIG_SERIO_SERPORT is not set -# CONFIG_VT is not set -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_OF_PLATFORM=y -# CONFIG_HW_RANDOM is not set -CONFIG_I2C=m -CONFIG_I2C_CHARDEV=m -CONFIG_I2C_GPIO=m -CONFIG_I2C_IBM_IIC=m -# CONFIG_HWMON is not set -CONFIG_THERMAL=y -CONFIG_FB=m -CONFIG_EXT2_FS=y -CONFIG_EXT4_FS=m -CONFIG_VFAT_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_JFFS2_FS=m -CONFIG_UBIFS_FS=m -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_ISO8859_1=m -CONFIG_DEBUG_FS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_DES=y diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig index 8b595f67068c..41c930f74ed4 100644 --- a/arch/powerpc/configs/ppc44x_defconfig +++ b/arch/powerpc/configs/ppc44x_defconfig @@ -90,7 +90,6 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ISO8859_1=m -CONFIG_CRC_T10DIF=m CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_CRYPTO_ECB=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 544a65fda77b..3423c405cad4 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -44,7 +44,6 @@ CONFIG_PPC_SMLPAR=y CONFIG_IBMEBUS=y CONFIG_PAPR_SCM=m CONFIG_PPC_SVM=y -CONFIG_PPC_MAPLE=y CONFIG_PPC_PASEMI=y CONFIG_PPC_PASEMI_IOMMU=y CONFIG_PPC_PS3=y @@ -52,7 +51,6 @@ CONFIG_PS3_DISK=m CONFIG_PS3_ROM=m CONFIG_PS3_FLASH=m CONFIG_PS3_LPM=m -CONFIG_PPC_IBM_CELL_BLADE=y CONFIG_RTAS_FLASH=m CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -81,7 +79,6 @@ CONFIG_MODULE_SIG_SHA512=y CONFIG_PARTITION_ADVANCED=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=y # CONFIG_SLAB_MERGE_DEFAULT is not set CONFIG_SLAB_FREELIST_RANDOM=y @@ -93,6 +90,7 @@ CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_MEM_SOFT_DIRTY=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_ZONE_DEVICE=y CONFIG_NET=y CONFIG_PACKET=y @@ -229,7 +227,6 @@ CONFIG_NETXEN_NIC=m CONFIG_SUNGEM=y CONFIG_GELIC_NET=m CONFIG_GELIC_WIRELESS=y -CONFIG_SPIDER_NET=m CONFIG_BROADCOM_PHY=m CONFIG_MARVELL_PHY=y CONFIG_PPP=m @@ -380,7 +377,7 @@ CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y CONFIG_IMA_ARCH_POLICY=y CONFIG_IMA_APPRAISE_MODSIG=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_SERPENT=m @@ -390,9 +387,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_LZO=m -CONFIG_CRYPTO_CRC32C_VPMSUM=m -CONFIG_CRYPTO_CRCT10DIF_VPMSUM=m -CONFIG_CRYPTO_VPMSUM_TESTER=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_AES_GCM_P10=m diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 4c05f4e4d505..90247b2a0ab0 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -207,7 +207,6 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y -CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_STACK_USAGE=y @@ -221,7 +220,7 @@ CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 66c7b28d7450..f96f8ed9856c 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -12,7 +12,6 @@ CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_CGROUPS=y CONFIG_CGROUP_SCHED=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_USER_NS=y @@ -226,7 +225,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_IP_DCCP=m CONFIG_TIPC=m CONFIG_ATM=m CONFIG_ATM_CLIP=m @@ -436,7 +434,6 @@ CONFIG_DM9102=m CONFIG_ULI526X=m CONFIG_PCMCIA_XIRCOM=m CONFIG_DL2K=m -CONFIG_SUNDANCE=m CONFIG_S2IO=m CONFIG_FEC_MPC52xx=m CONFIG_GIANFAR=m @@ -487,7 +484,6 @@ CONFIG_VIA_VELOCITY=m CONFIG_PCMCIA_XIRC2PS=m CONFIG_FDDI=y CONFIG_SKFP=m -CONFIG_NET_SB1000=m CONFIG_BROADCOM_PHY=m CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m @@ -718,6 +714,7 @@ CONFIG_FB_TRIDENT=m CONFIG_FB_SM501=m CONFIG_FB_IBM_GXT4500=y CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y @@ -987,7 +984,6 @@ CONFIG_MINIX_FS=m CONFIG_OMFS_FS=m CONFIG_QNX4FS_FS=m CONFIG_ROMFS_FS=m -CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=m CONFIG_NFS_V3_ACL=y @@ -1076,7 +1072,7 @@ CONFIG_SECURITY_NETWORK_XFRM=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index 2b175ddf82f0..0b48d2b776c4 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -148,8 +148,6 @@ CONFIG_NLS_ISO8859_1=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_LZO=m -CONFIG_CRC_CCITT=m -CONFIG_CRC_T10DIF=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig index 9d44e6630908..2b71a6dc399e 100644 --- a/arch/powerpc/configs/skiroot_defconfig +++ b/arch/powerpc/configs/skiroot_defconfig @@ -78,7 +78,6 @@ CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_NVME=m CONFIG_NVME_MULTIPATH=y CONFIG_EEPROM_AT24=m -# CONFIG_CXL is not set # CONFIG_OCXL is not set CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SR=m @@ -279,9 +278,6 @@ CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY=y # CONFIG_INTEGRITY is not set CONFIG_LSM="yama,loadpin,safesetid,integrity" # CONFIG_CRYPTO_HW is not set -CONFIG_CRC16=y -CONFIG_CRC_ITU_T=y -CONFIG_LIBCRC32C=y # CONFIG_XZ_DEC_X86 is not set # CONFIG_XZ_DEC_IA64 is not set # CONFIG_XZ_DEC_ARM is not set diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig index 7a978d396991..e415222bd839 100644 --- a/arch/powerpc/configs/storcenter_defconfig +++ b/arch/powerpc/configs/storcenter_defconfig @@ -75,4 +75,3 @@ CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y -CONFIG_CRC_T10DIF=y diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig index 083c2e57520a..425f10837a18 100644 --- a/arch/powerpc/configs/tqm8xx_defconfig +++ b/arch/powerpc/configs/tqm8xx_defconfig @@ -6,7 +6,7 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y # CONFIG_ELF_CORE is not set -# CONFIG_BASE_FULL is not set +CONFIG_BASE_SMALL=y # CONFIG_FUTEX is not set # CONFIG_VM_EVENT_COUNTERS is not set CONFIG_MODULES=y @@ -54,7 +54,6 @@ CONFIG_TMPFS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -CONFIG_CRC32_SLICEBY4=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig index 5017a697b67b..7c714a19221e 100644 --- a/arch/powerpc/configs/wii_defconfig +++ b/arch/powerpc/configs/wii_defconfig @@ -114,7 +114,6 @@ CONFIG_ROOT_NFS=y CONFIG_CIFS=m CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_CRC_CCITT=y CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_SPINLOCK=y diff --git a/arch/powerpc/crypto/.gitignore b/arch/powerpc/crypto/.gitignore index e1094f08f713..e9fe73aac8b6 100644 --- a/arch/powerpc/crypto/.gitignore +++ b/arch/powerpc/crypto/.gitignore @@ -1,3 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only aesp10-ppc.S +aesp8-ppc.S ghashp10-ppc.S +ghashp8-ppc.S diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 1e201b7ae2fc..caaa359f4742 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -2,42 +2,21 @@ menu "Accelerated Cryptographic Algorithms for CPU (powerpc)" -config CRYPTO_CRC32C_VPMSUM - tristate "CRC32c" - depends on PPC64 && ALTIVEC - select CRYPTO_HASH - select CRC32 - help - CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) - - Architecture: powerpc64 using - - AltiVec extensions - - Enable on POWER8 and newer processors for improved performance. - -config CRYPTO_CRCT10DIF_VPMSUM - tristate "CRC32T10DIF" - depends on PPC64 && ALTIVEC && CRC_T10DIF - select CRYPTO_HASH +config CRYPTO_CURVE25519_PPC64 + tristate + depends on PPC64 && CPU_LITTLE_ENDIAN + select CRYPTO_KPP + select CRYPTO_LIB_CURVE25519_GENERIC + select CRYPTO_ARCH_HAVE_LIB_CURVE25519 + default CRYPTO_LIB_CURVE25519_INTERNAL help - CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) - - Architecture: powerpc64 using - - AltiVec extensions - - Enable on POWER8 and newer processors for improved performance. + Curve25519 algorithm -config CRYPTO_VPMSUM_TESTER - tristate "CRC32c and CRC32T10DIF hardware acceleration tester" - depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM - help - Stress test for CRC32c and CRCT10DIF algorithms implemented with - powerpc64 AltiVec extensions (POWER8 vpmsum instructions). - Unless you are testing these algorithms, you don't need this. + Architecture: PowerPC64 + - Little-endian config CRYPTO_MD5_PPC tristate "Digests: MD5" - depends on PPC select CRYPTO_HASH help MD5 message digest algorithm (RFC1321) @@ -46,7 +25,6 @@ config CRYPTO_MD5_PPC config CRYPTO_SHA1_PPC tristate "Hash functions: SHA-1" - depends on PPC help SHA-1 secure hash algorithm (FIPS 180) @@ -54,27 +32,16 @@ config CRYPTO_SHA1_PPC config CRYPTO_SHA1_PPC_SPE tristate "Hash functions: SHA-1 (SPE)" - depends on PPC && SPE + depends on SPE help SHA-1 secure hash algorithm (FIPS 180) Architecture: powerpc using - SPE (Signal Processing Engine) extensions -config CRYPTO_SHA256_PPC_SPE - tristate "Hash functions: SHA-224 and SHA-256 (SPE)" - depends on PPC && SPE - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: powerpc using - - SPE (Signal Processing Engine) extensions - config CRYPTO_AES_PPC_SPE tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)" - depends on PPC && SPE + depends on SPE select CRYPTO_SKCIPHER help Block ciphers: AES cipher algorithms (FIPS-197) @@ -101,6 +68,7 @@ config CRYPTO_AES_GCM_P10 select CRYPTO_ALGAPI select CRYPTO_AEAD select CRYPTO_SKCIPHER + select CRYPTO_SIMD help AEAD cipher: AES cipher algorithms (FIPS-197) GCM (Galois/Counter Mode) authenticated encryption mode (NIST SP800-38D) @@ -111,32 +79,6 @@ config CRYPTO_AES_GCM_P10 Support for cryptographic acceleration instructions on Power10 or later CPU. This module supports stitched acceleration for AES/GCM. -config CRYPTO_CHACHA20_P10 - tristate "Ciphers: ChaCha20, XChacha20, XChacha12 (P10 or later)" - depends on PPC64 && CPU_LITTLE_ENDIAN && VSX - select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CHACHA - help - Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 - stream cipher algorithms - - Architecture: PowerPC64 - - Power10 or later - - Little-endian - -config CRYPTO_POLY1305_P10 - tristate "Hash functions: Poly1305 (P10 or later)" - depends on PPC64 && CPU_LITTLE_ENDIAN && VSX - select CRYPTO_HASH - select CRYPTO_LIB_POLY1305_GENERIC - help - Poly1305 authenticator algorithm (RFC7539) - - Architecture: PowerPC64 - - Power10 or later - - Little-endian - config CRYPTO_DEV_VMX bool "Support for VMX cryptographic acceleration instructions" depends on PPC64 && VSX diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index fca0e9739869..8c2936ae466f 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -9,26 +9,17 @@ obj-$(CONFIG_CRYPTO_AES_PPC_SPE) += aes-ppc-spe.o obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o -obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o -obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o -obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o -obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o -obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o -obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o +obj-$(CONFIG_CRYPTO_CURVE25519_PPC64) += curve25519-ppc64le.o aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o md5-ppc-y := md5-asm.o md5-glue.o sha1-powerpc-y := sha1-powerpc-asm.o sha1.o sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o -sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o -crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o -crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o -chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o -poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o +curve25519-ppc64le-y := curve25519-ppc64le-core.o curve25519-ppc64le_asm.o ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) override flavour := linux-ppc64le @@ -54,3 +45,4 @@ $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y +OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c index f62ee54076c0..85f4fd4b1bdc 100644 --- a/arch/powerpc/crypto/aes-gcm-p10-glue.c +++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c @@ -5,9 +5,10 @@ * Copyright 2022- IBM Inc. All rights reserved */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/simd.h> #include <asm/switch_to.h> +#include <crypto/gcm.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/b128ops.h> @@ -24,6 +25,7 @@ #define PPC_ALIGN 16 #define GCM_IV_SIZE 12 +#define RFC4106_NONCE_SIZE 4 MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation"); MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com"); @@ -31,15 +33,16 @@ MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("aes"); asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits, - void *key); + void *key); asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key); -asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len, +asmlinkage void aes_p10_gcm_encrypt(const u8 *in, u8 *out, size_t len, void *rkey, u8 *iv, void *Xi); -asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len, +asmlinkage void aes_p10_gcm_decrypt(const u8 *in, u8 *out, size_t len, void *rkey, u8 *iv, void *Xi); asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]); asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable, - unsigned char *aad, unsigned int alen); + unsigned char *aad, unsigned int alen); +asmlinkage void gcm_update(u8 *iv, void *Xi); struct aes_key { u8 key[AES_MAX_KEYLENGTH]; @@ -52,6 +55,7 @@ struct gcm_ctx { u8 aad_hash[16]; u64 aadLen; u64 Plen; /* offset 56 - used in aes_p10_gcm_{en/de}crypt */ + u8 pblock[16]; }; struct Hash_ctx { u8 H[16]; /* subkey */ @@ -60,17 +64,20 @@ struct Hash_ctx { struct p10_aes_gcm_ctx { struct aes_key enc_key; + u8 nonce[RFC4106_NONCE_SIZE]; }; static void vsx_begin(void) { preempt_disable(); + pagefault_disable(); enable_kernel_vsx(); } static void vsx_end(void) { disable_kernel_vsx(); + pagefault_enable(); preempt_enable(); } @@ -185,7 +192,7 @@ static int set_authsize(struct crypto_aead *tfm, unsigned int authsize) } static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) + unsigned int keylen) { struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm); @@ -198,7 +205,8 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, return ret ? -EINVAL : 0; } -static int p10_aes_gcm_crypt(struct aead_request *req, int enc) +static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv, + int assoclen, int enc) { struct crypto_tfm *tfm = req->base.tfm; struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm); @@ -206,11 +214,9 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc) struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN); u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN]; struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN); - struct scatter_walk assoc_sg_walk; struct skcipher_walk walk; u8 *assocmem = NULL; u8 *assoc; - unsigned int assoclen = req->assoclen; unsigned int cryptlen = req->cryptlen; unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN]; unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN); @@ -218,16 +224,16 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc) unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm)); u8 otag[16]; int total_processed = 0; + int nbytes; memset(databuf, 0, sizeof(databuf)); memset(hashbuf, 0, sizeof(hashbuf)); memset(ivbuf, 0, sizeof(ivbuf)); - memcpy(iv, req->iv, GCM_IV_SIZE); + memcpy(iv, riv, GCM_IV_SIZE); /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length) { - scatterwalk_start(&assoc_sg_walk, req->src); - assoc = scatterwalk_map(&assoc_sg_walk); + assoc = sg_virt(req->src); /* ppc64 is !HIGHMEM */ } else { gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -245,10 +251,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc) gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen); vsx_end(); - if (!assocmem) - scatterwalk_unmap(assoc); - else - kfree(assocmem); + kfree(assocmem); if (enc) ret = skcipher_walk_aead_encrypt(&walk, req, false); @@ -257,19 +260,25 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc) if (ret) return ret; - while (walk.nbytes > 0 && ret == 0) { + while ((nbytes = walk.nbytes) > 0 && ret == 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 buf[AES_BLOCK_SIZE]; + + if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) + src = dst = memcpy(buf, src, nbytes); vsx_begin(); if (enc) - aes_p10_gcm_encrypt(walk.src.virt.addr, - walk.dst.virt.addr, - walk.nbytes, + aes_p10_gcm_encrypt(src, dst, nbytes, &ctx->enc_key, gctx->iv, hash->Htable); else - aes_p10_gcm_decrypt(walk.src.virt.addr, - walk.dst.virt.addr, - walk.nbytes, + aes_p10_gcm_decrypt(src, dst, nbytes, &ctx->enc_key, gctx->iv, hash->Htable); + + if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) + memcpy(walk.dst.virt.addr, buf, nbytes); + vsx_end(); total_processed += walk.nbytes; @@ -281,6 +290,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc) /* Finalize hash */ vsx_begin(); + gcm_update(gctx->iv, hash->Htable); finish_tag(gctx, hash, total_processed); vsx_end(); @@ -302,17 +312,63 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc) return 0; } +static int rfc4106_setkey(struct crypto_aead *tfm, const u8 *inkey, + unsigned int keylen) +{ + struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); + int err; + + keylen -= RFC4106_NONCE_SIZE; + err = p10_aes_gcm_setkey(tfm, inkey, keylen); + if (err) + return err; + + memcpy(ctx->nonce, inkey + keylen, RFC4106_NONCE_SIZE); + return 0; +} + +static int rfc4106_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + return crypto_rfc4106_check_authsize(authsize); +} + +static int rfc4106_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); + u8 iv[AES_BLOCK_SIZE]; + + memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE); + memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE); + + return crypto_ipsec_check_assoclen(req->assoclen) ?: + p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 1); +} + +static int rfc4106_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); + u8 iv[AES_BLOCK_SIZE]; + + memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE); + memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE); + + return crypto_ipsec_check_assoclen(req->assoclen) ?: + p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 0); +} + static int p10_aes_gcm_encrypt(struct aead_request *req) { - return p10_aes_gcm_crypt(req, 1); + return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 1); } static int p10_aes_gcm_decrypt(struct aead_request *req) { - return p10_aes_gcm_crypt(req, 0); + return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 0); } -static struct aead_alg gcm_aes_alg = { +static struct aead_alg gcm_aes_algs[] = {{ .ivsize = GCM_IV_SIZE, .maxauthsize = 16, @@ -321,23 +377,57 @@ static struct aead_alg gcm_aes_alg = { .encrypt = p10_aes_gcm_encrypt, .decrypt = p10_aes_gcm_decrypt, - .base.cra_name = "gcm(aes)", - .base.cra_driver_name = "aes_gcm_p10", + .base.cra_name = "__gcm(aes)", + .base.cra_driver_name = "__aes_gcm_p10", .base.cra_priority = 2100, .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx), + .base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx)+ + 4 * sizeof(u64[2]), .base.cra_module = THIS_MODULE, -}; + .base.cra_flags = CRYPTO_ALG_INTERNAL, +}, { + .ivsize = GCM_RFC4106_IV_SIZE, + .maxauthsize = 16, + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = rfc4106_encrypt, + .decrypt = rfc4106_decrypt, + + .base.cra_name = "__rfc4106(gcm(aes))", + .base.cra_driver_name = "__rfc4106_aes_gcm_p10", + .base.cra_priority = 2100, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx) + + 4 * sizeof(u64[2]), + .base.cra_module = THIS_MODULE, + .base.cra_flags = CRYPTO_ALG_INTERNAL, +}}; + +static struct simd_aead_alg *p10_simd_aeads[ARRAY_SIZE(gcm_aes_algs)]; static int __init p10_init(void) { - return crypto_register_aead(&gcm_aes_alg); + int ret; + + if (!cpu_has_feature(CPU_FTR_ARCH_31)) + return 0; + + ret = simd_register_aeads_compat(gcm_aes_algs, + ARRAY_SIZE(gcm_aes_algs), + p10_simd_aeads); + if (ret) { + simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs), + p10_simd_aeads); + return ret; + } + return 0; } static void __exit p10_exit(void) { - crypto_unregister_aead(&gcm_aes_alg); + simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs), + p10_simd_aeads); } -module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init); +module_init(p10_init); module_exit(p10_exit); diff --git a/arch/powerpc/crypto/aes-gcm-p10.S b/arch/powerpc/crypto/aes-gcm-p10.S index a51f4b265308..89f50eef3512 100644 --- a/arch/powerpc/crypto/aes-gcm-p10.S +++ b/arch/powerpc/crypto/aes-gcm-p10.S @@ -1,42 +1,42 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ - # - # Accelerated AES-GCM stitched implementation for ppc64le. - # - # Copyright 2022- IBM Inc. All rights reserved - # - #=================================================================================== - # Written by Danny Tsen <dtsen@linux.ibm.com> - # - # GHASH is based on the Karatsuba multiplication method. - # - # Xi xor X1 - # - # X1 * H^4 + X2 * H^3 + x3 * H^2 + X4 * H = - # (X1.h * H4.h + xX.l * H4.l + X1 * H4) + - # (X2.h * H3.h + X2.l * H3.l + X2 * H3) + - # (X3.h * H2.h + X3.l * H2.l + X3 * H2) + - # (X4.h * H.h + X4.l * H.l + X4 * H) - # - # Xi = v0 - # H Poly = v2 - # Hash keys = v3 - v14 - # ( H.l, H, H.h) - # ( H^2.l, H^2, H^2.h) - # ( H^3.l, H^3, H^3.h) - # ( H^4.l, H^4, H^4.h) - # - # v30 is IV - # v31 - counter 1 - # - # AES used, - # vs0 - vs14 for round keys - # v15, v16, v17, v18, v19, v20, v21, v22 for 8 blocks (encrypted) - # - # This implementation uses stitched AES-GCM approach to improve overall performance. - # AES is implemented with 8x blocks and GHASH is using 2 4x blocks. - # - # =================================================================================== - # +# +# Accelerated AES-GCM stitched implementation for ppc64le. +# +# Copyright 2024- IBM Inc. +# +#=================================================================================== +# Written by Danny Tsen <dtsen@us.ibm.com> +# +# GHASH is based on the Karatsuba multiplication method. +# +# Xi xor X1 +# +# X1 * H^4 + X2 * H^3 + x3 * H^2 + X4 * H = +# (X1.h * H4.h + xX.l * H4.l + X1 * H4) + +# (X2.h * H3.h + X2.l * H3.l + X2 * H3) + +# (X3.h * H2.h + X3.l * H2.l + X3 * H2) + +# (X4.h * H.h + X4.l * H.l + X4 * H) +# +# Xi = v0 +# H Poly = v2 +# Hash keys = v3 - v14 +# ( H.l, H, H.h) +# ( H^2.l, H^2, H^2.h) +# ( H^3.l, H^3, H^3.h) +# ( H^4.l, H^4, H^4.h) +# +# v30 is IV +# v31 - counter 1 +# +# AES used, +# vs0 - round key 0 +# v15, v16, v17, v18, v19, v20, v21, v22 for 8 blocks (encrypted) +# +# This implementation uses stitched AES-GCM approach to improve overall performance. +# AES is implemented with 8x blocks and GHASH is using 2 4x blocks. +# +# =================================================================================== +# #include <asm/ppc_asm.h> #include <linux/linkage.h> @@ -44,483 +44,224 @@ .machine "any" .text - # 4x loops - # v15 - v18 - input states - # vs1 - vs9 - round keys - # -.macro Loop_aes_middle4x - xxlor 19+32, 1, 1 - xxlor 20+32, 2, 2 - xxlor 21+32, 3, 3 - xxlor 22+32, 4, 4 - - vcipher 15, 15, 19 - vcipher 16, 16, 19 - vcipher 17, 17, 19 - vcipher 18, 18, 19 - - vcipher 15, 15, 20 - vcipher 16, 16, 20 - vcipher 17, 17, 20 - vcipher 18, 18, 20 - - vcipher 15, 15, 21 - vcipher 16, 16, 21 - vcipher 17, 17, 21 - vcipher 18, 18, 21 - - vcipher 15, 15, 22 - vcipher 16, 16, 22 - vcipher 17, 17, 22 - vcipher 18, 18, 22 - - xxlor 19+32, 5, 5 - xxlor 20+32, 6, 6 - xxlor 21+32, 7, 7 - xxlor 22+32, 8, 8 - - vcipher 15, 15, 19 - vcipher 16, 16, 19 - vcipher 17, 17, 19 - vcipher 18, 18, 19 - - vcipher 15, 15, 20 - vcipher 16, 16, 20 - vcipher 17, 17, 20 - vcipher 18, 18, 20 - - vcipher 15, 15, 21 - vcipher 16, 16, 21 - vcipher 17, 17, 21 - vcipher 18, 18, 21 - - vcipher 15, 15, 22 - vcipher 16, 16, 22 - vcipher 17, 17, 22 - vcipher 18, 18, 22 - - xxlor 23+32, 9, 9 - vcipher 15, 15, 23 - vcipher 16, 16, 23 - vcipher 17, 17, 23 - vcipher 18, 18, 23 +.macro SAVE_GPR GPR OFFSET FRAME + std \GPR,\OFFSET(\FRAME) .endm - # 8x loops - # v15 - v22 - input states - # vs1 - vs9 - round keys - # -.macro Loop_aes_middle8x - xxlor 23+32, 1, 1 - xxlor 24+32, 2, 2 - xxlor 25+32, 3, 3 - xxlor 26+32, 4, 4 - - vcipher 15, 15, 23 - vcipher 16, 16, 23 - vcipher 17, 17, 23 - vcipher 18, 18, 23 - vcipher 19, 19, 23 - vcipher 20, 20, 23 - vcipher 21, 21, 23 - vcipher 22, 22, 23 - - vcipher 15, 15, 24 - vcipher 16, 16, 24 - vcipher 17, 17, 24 - vcipher 18, 18, 24 - vcipher 19, 19, 24 - vcipher 20, 20, 24 - vcipher 21, 21, 24 - vcipher 22, 22, 24 - - vcipher 15, 15, 25 - vcipher 16, 16, 25 - vcipher 17, 17, 25 - vcipher 18, 18, 25 - vcipher 19, 19, 25 - vcipher 20, 20, 25 - vcipher 21, 21, 25 - vcipher 22, 22, 25 - - vcipher 15, 15, 26 - vcipher 16, 16, 26 - vcipher 17, 17, 26 - vcipher 18, 18, 26 - vcipher 19, 19, 26 - vcipher 20, 20, 26 - vcipher 21, 21, 26 - vcipher 22, 22, 26 - - xxlor 23+32, 5, 5 - xxlor 24+32, 6, 6 - xxlor 25+32, 7, 7 - xxlor 26+32, 8, 8 - - vcipher 15, 15, 23 - vcipher 16, 16, 23 - vcipher 17, 17, 23 - vcipher 18, 18, 23 - vcipher 19, 19, 23 - vcipher 20, 20, 23 - vcipher 21, 21, 23 - vcipher 22, 22, 23 - - vcipher 15, 15, 24 - vcipher 16, 16, 24 - vcipher 17, 17, 24 - vcipher 18, 18, 24 - vcipher 19, 19, 24 - vcipher 20, 20, 24 - vcipher 21, 21, 24 - vcipher 22, 22, 24 - - vcipher 15, 15, 25 - vcipher 16, 16, 25 - vcipher 17, 17, 25 - vcipher 18, 18, 25 - vcipher 19, 19, 25 - vcipher 20, 20, 25 - vcipher 21, 21, 25 - vcipher 22, 22, 25 - - vcipher 15, 15, 26 - vcipher 16, 16, 26 - vcipher 17, 17, 26 - vcipher 18, 18, 26 - vcipher 19, 19, 26 - vcipher 20, 20, 26 - vcipher 21, 21, 26 - vcipher 22, 22, 26 - - xxlor 23+32, 9, 9 - vcipher 15, 15, 23 - vcipher 16, 16, 23 - vcipher 17, 17, 23 - vcipher 18, 18, 23 - vcipher 19, 19, 23 - vcipher 20, 20, 23 - vcipher 21, 21, 23 - vcipher 22, 22, 23 +.macro SAVE_VRS VRS OFFSET FRAME + stxv \VRS+32, \OFFSET(\FRAME) .endm -.macro Loop_aes_middle_1x - xxlor 19+32, 1, 1 - xxlor 20+32, 2, 2 - xxlor 21+32, 3, 3 - xxlor 22+32, 4, 4 - - vcipher 15, 15, 19 - vcipher 15, 15, 20 - vcipher 15, 15, 21 - vcipher 15, 15, 22 - - xxlor 19+32, 5, 5 - xxlor 20+32, 6, 6 - xxlor 21+32, 7, 7 - xxlor 22+32, 8, 8 - - vcipher 15, 15, 19 - vcipher 15, 15, 20 - vcipher 15, 15, 21 - vcipher 15, 15, 22 - - xxlor 19+32, 9, 9 - vcipher 15, 15, 19 +.macro RESTORE_GPR GPR OFFSET FRAME + ld \GPR,\OFFSET(\FRAME) .endm - # - # Compute 4x hash values based on Karatsuba method. - # -.macro ppc_aes_gcm_ghash - vxor 15, 15, 0 - - vpmsumd 23, 12, 15 # H4.L * X.L - vpmsumd 24, 9, 16 - vpmsumd 25, 6, 17 - vpmsumd 26, 3, 18 - - vxor 23, 23, 24 - vxor 23, 23, 25 - vxor 23, 23, 26 # L - - vpmsumd 24, 13, 15 # H4.L * X.H + H4.H * X.L - vpmsumd 25, 10, 16 # H3.L * X1.H + H3.H * X1.L - vpmsumd 26, 7, 17 - vpmsumd 27, 4, 18 - - vxor 24, 24, 25 - vxor 24, 24, 26 - vxor 24, 24, 27 # M - - # sum hash and reduction with H Poly - vpmsumd 28, 23, 2 # reduction - - vxor 29, 29, 29 - vsldoi 26, 24, 29, 8 # mL - vsldoi 29, 29, 24, 8 # mH - vxor 23, 23, 26 # mL + L - - vsldoi 23, 23, 23, 8 # swap - vxor 23, 23, 28 - - vpmsumd 24, 14, 15 # H4.H * X.H - vpmsumd 25, 11, 16 - vpmsumd 26, 8, 17 - vpmsumd 27, 5, 18 - - vxor 24, 24, 25 - vxor 24, 24, 26 - vxor 24, 24, 27 - - vxor 24, 24, 29 - - # sum hash and reduction with H Poly - vsldoi 27, 23, 23, 8 # swap - vpmsumd 23, 23, 2 - vxor 27, 27, 24 - vxor 23, 23, 27 - - xxlor 32, 23+32, 23+32 # update hash - +.macro RESTORE_VRS VRS OFFSET FRAME + lxv \VRS+32, \OFFSET(\FRAME) .endm - # - # Combine two 4x ghash - # v15 - v22 - input blocks - # -.macro ppc_aes_gcm_ghash2_4x - # first 4x hash - vxor 15, 15, 0 # Xi + X - - vpmsumd 23, 12, 15 # H4.L * X.L - vpmsumd 24, 9, 16 - vpmsumd 25, 6, 17 - vpmsumd 26, 3, 18 - - vxor 23, 23, 24 - vxor 23, 23, 25 - vxor 23, 23, 26 # L - - vpmsumd 24, 13, 15 # H4.L * X.H + H4.H * X.L - vpmsumd 25, 10, 16 # H3.L * X1.H + H3.H * X1.L - vpmsumd 26, 7, 17 - vpmsumd 27, 4, 18 - - vxor 24, 24, 25 - vxor 24, 24, 26 - - # sum hash and reduction with H Poly - vpmsumd 28, 23, 2 # reduction - - vxor 29, 29, 29 - - vxor 24, 24, 27 # M - vsldoi 26, 24, 29, 8 # mL - vsldoi 29, 29, 24, 8 # mH - vxor 23, 23, 26 # mL + L - - vsldoi 23, 23, 23, 8 # swap - vxor 23, 23, 28 +.macro SAVE_REGS + mflr 0 + std 0, 16(1) + stdu 1,-512(1) + + SAVE_GPR 14, 112, 1 + SAVE_GPR 15, 120, 1 + SAVE_GPR 16, 128, 1 + SAVE_GPR 17, 136, 1 + SAVE_GPR 18, 144, 1 + SAVE_GPR 19, 152, 1 + SAVE_GPR 20, 160, 1 + SAVE_GPR 21, 168, 1 + SAVE_GPR 22, 176, 1 + SAVE_GPR 23, 184, 1 + SAVE_GPR 24, 192, 1 + + addi 9, 1, 256 + SAVE_VRS 20, 0, 9 + SAVE_VRS 21, 16, 9 + SAVE_VRS 22, 32, 9 + SAVE_VRS 23, 48, 9 + SAVE_VRS 24, 64, 9 + SAVE_VRS 25, 80, 9 + SAVE_VRS 26, 96, 9 + SAVE_VRS 27, 112, 9 + SAVE_VRS 28, 128, 9 + SAVE_VRS 29, 144, 9 + SAVE_VRS 30, 160, 9 + SAVE_VRS 31, 176, 9 +.endm # SAVE_REGS - vpmsumd 24, 14, 15 # H4.H * X.H - vpmsumd 25, 11, 16 - vpmsumd 26, 8, 17 - vpmsumd 27, 5, 18 +.macro RESTORE_REGS + addi 9, 1, 256 + RESTORE_VRS 20, 0, 9 + RESTORE_VRS 21, 16, 9 + RESTORE_VRS 22, 32, 9 + RESTORE_VRS 23, 48, 9 + RESTORE_VRS 24, 64, 9 + RESTORE_VRS 25, 80, 9 + RESTORE_VRS 26, 96, 9 + RESTORE_VRS 27, 112, 9 + RESTORE_VRS 28, 128, 9 + RESTORE_VRS 29, 144, 9 + RESTORE_VRS 30, 160, 9 + RESTORE_VRS 31, 176, 9 + + RESTORE_GPR 14, 112, 1 + RESTORE_GPR 15, 120, 1 + RESTORE_GPR 16, 128, 1 + RESTORE_GPR 17, 136, 1 + RESTORE_GPR 18, 144, 1 + RESTORE_GPR 19, 152, 1 + RESTORE_GPR 20, 160, 1 + RESTORE_GPR 21, 168, 1 + RESTORE_GPR 22, 176, 1 + RESTORE_GPR 23, 184, 1 + RESTORE_GPR 24, 192, 1 + + addi 1, 1, 512 + ld 0, 16(1) + mtlr 0 +.endm # RESTORE_REGS + +# 4x loops +.macro AES_CIPHER_4x _VCIPHER ST r + \_VCIPHER \ST, \ST, \r + \_VCIPHER \ST+1, \ST+1, \r + \_VCIPHER \ST+2, \ST+2, \r + \_VCIPHER \ST+3, \ST+3, \r +.endm - vxor 24, 24, 25 - vxor 24, 24, 26 - vxor 24, 24, 27 # H +# 8x loops +.macro AES_CIPHER_8x _VCIPHER ST r + \_VCIPHER \ST, \ST, \r + \_VCIPHER \ST+1, \ST+1, \r + \_VCIPHER \ST+2, \ST+2, \r + \_VCIPHER \ST+3, \ST+3, \r + \_VCIPHER \ST+4, \ST+4, \r + \_VCIPHER \ST+5, \ST+5, \r + \_VCIPHER \ST+6, \ST+6, \r + \_VCIPHER \ST+7, \ST+7, \r +.endm - vxor 24, 24, 29 # H + mH +.macro LOOP_8AES_STATE + xxlor 32+23, 1, 1 + xxlor 32+24, 2, 2 + xxlor 32+25, 3, 3 + xxlor 32+26, 4, 4 + AES_CIPHER_8x vcipher, 15, 23 + AES_CIPHER_8x vcipher, 15, 24 + AES_CIPHER_8x vcipher, 15, 25 + AES_CIPHER_8x vcipher, 15, 26 + xxlor 32+23, 5, 5 + xxlor 32+24, 6, 6 + xxlor 32+25, 7, 7 + xxlor 32+26, 8, 8 + AES_CIPHER_8x vcipher, 15, 23 + AES_CIPHER_8x vcipher, 15, 24 + AES_CIPHER_8x vcipher, 15, 25 + AES_CIPHER_8x vcipher, 15, 26 +.endm - # sum hash and reduction with H Poly - vsldoi 27, 23, 23, 8 # swap - vpmsumd 23, 23, 2 - vxor 27, 27, 24 - vxor 27, 23, 27 # 1st Xi - - # 2nd 4x hash - vpmsumd 24, 9, 20 - vpmsumd 25, 6, 21 - vpmsumd 26, 3, 22 - vxor 19, 19, 27 # Xi + X - vpmsumd 23, 12, 19 # H4.L * X.L - - vxor 23, 23, 24 - vxor 23, 23, 25 - vxor 23, 23, 26 # L - - vpmsumd 24, 13, 19 # H4.L * X.H + H4.H * X.L - vpmsumd 25, 10, 20 # H3.L * X1.H + H3.H * X1.L - vpmsumd 26, 7, 21 - vpmsumd 27, 4, 22 - - vxor 24, 24, 25 - vxor 24, 24, 26 +# +# PPC_GHASH4x(H, S1, S2, S3, S4): Compute 4x hash values based on Karatsuba method. +# H: returning digest +# S#: states +# +# S1 should xor with the previous digest +# +# Xi = v0 +# H Poly = v2 +# Hash keys = v3 - v14 +# Scratch: v23 - v29 +# +.macro PPC_GHASH4x H S1 S2 S3 S4 + + vpmsumd 23, 12, \S1 # H4.L * X.L + vpmsumd 24, 9, \S2 + vpmsumd 25, 6, \S3 + vpmsumd 26, 3, \S4 + + vpmsumd 27, 13, \S1 # H4.L * X.H + H4.H * X.L + vpmsumd 28, 10, \S2 # H3.L * X1.H + H3.H * X1.L + + vxor 23, 23, 24 + vxor 23, 23, 25 + vxor 23, 23, 26 # L + + vxor 24, 27, 28 + vpmsumd 25, 7, \S3 + vpmsumd 26, 4, \S4 + + vxor 24, 24, 25 + vxor 24, 24, 26 # M # sum hash and reduction with H Poly - vpmsumd 28, 23, 2 # reduction - - vxor 29, 29, 29 + vpmsumd 28, 23, 2 # reduction - vxor 24, 24, 27 # M - vsldoi 26, 24, 29, 8 # mL - vsldoi 29, 29, 24, 8 # mH - vxor 23, 23, 26 # mL + L + vxor 1, 1, 1 + vsldoi 25, 24, 1, 8 # mL + vsldoi 1, 1, 24, 8 # mH + vxor 23, 23, 25 # mL + L - vsldoi 23, 23, 23, 8 # swap - vxor 23, 23, 28 + # This performs swap and xor like, + # vsldoi 23, 23, 23, 8 # swap + # vxor 23, 23, 28 + xxlor 32+25, 10, 10 + vpermxor 23, 23, 28, 25 - vpmsumd 24, 14, 19 # H4.H * X.H - vpmsumd 25, 11, 20 - vpmsumd 26, 8, 21 - vpmsumd 27, 5, 22 + vpmsumd 26, 14, \S1 # H4.H * X.H + vpmsumd 27, 11, \S2 + vpmsumd 28, 8, \S3 + vpmsumd 29, 5, \S4 - vxor 24, 24, 25 - vxor 24, 24, 26 - vxor 24, 24, 27 # H + vxor 24, 26, 27 + vxor 24, 24, 28 + vxor 24, 24, 29 - vxor 24, 24, 29 # H + mH + vxor 24, 24, 1 # sum hash and reduction with H Poly - vsldoi 27, 23, 23, 8 # swap - vpmsumd 23, 23, 2 - vxor 27, 27, 24 - vxor 23, 23, 27 - - xxlor 32, 23+32, 23+32 # update hash - + vsldoi 25, 23, 23, 8 # swap + vpmsumd 23, 23, 2 + vxor 27, 25, 24 + vxor \H, 23, 27 .endm - # - # Compute update single hash - # -.macro ppc_update_hash_1x - vxor 28, 28, 0 - - vxor 19, 19, 19 +# +# Compute update single ghash +# scratch: v1, v22..v27 +# +.macro PPC_GHASH1x H S1 - vpmsumd 22, 3, 28 # L - vpmsumd 23, 4, 28 # M - vpmsumd 24, 5, 28 # H + vxor 1, 1, 1 - vpmsumd 27, 22, 2 # reduction + vpmsumd 22, 3, \S1 # L + vpmsumd 23, 4, \S1 # M + vpmsumd 24, 5, \S1 # H - vsldoi 25, 23, 19, 8 # mL - vsldoi 26, 19, 23, 8 # mH - vxor 22, 22, 25 # LL + LL - vxor 24, 24, 26 # HH + HH + vpmsumd 27, 22, 2 # reduction - vsldoi 22, 22, 22, 8 # swap - vxor 22, 22, 27 + vsldoi 25, 23, 1, 8 # mL + vsldoi 26, 1, 23, 8 # mH + vxor 22, 22, 25 # LL + LL + vxor 24, 24, 26 # HH + HH - vsldoi 20, 22, 22, 8 # swap - vpmsumd 22, 22, 2 # reduction - vxor 20, 20, 24 - vxor 22, 22, 20 + xxlor 32+25, 10, 10 + vpermxor 22, 22, 27, 25 - vmr 0, 22 # update hash - -.endm - -.macro SAVE_REGS - stdu 1,-640(1) - mflr 0 - - std 14,112(1) - std 15,120(1) - std 16,128(1) - std 17,136(1) - std 18,144(1) - std 19,152(1) - std 20,160(1) - std 21,168(1) - li 9, 256 - stvx 20, 9, 1 - addi 9, 9, 16 - stvx 21, 9, 1 - addi 9, 9, 16 - stvx 22, 9, 1 - addi 9, 9, 16 - stvx 23, 9, 1 - addi 9, 9, 16 - stvx 24, 9, 1 - addi 9, 9, 16 - stvx 25, 9, 1 - addi 9, 9, 16 - stvx 26, 9, 1 - addi 9, 9, 16 - stvx 27, 9, 1 - addi 9, 9, 16 - stvx 28, 9, 1 - addi 9, 9, 16 - stvx 29, 9, 1 - addi 9, 9, 16 - stvx 30, 9, 1 - addi 9, 9, 16 - stvx 31, 9, 1 - stxv 14, 464(1) - stxv 15, 480(1) - stxv 16, 496(1) - stxv 17, 512(1) - stxv 18, 528(1) - stxv 19, 544(1) - stxv 20, 560(1) - stxv 21, 576(1) - stxv 22, 592(1) - std 0, 656(1) -.endm - -.macro RESTORE_REGS - lxv 14, 464(1) - lxv 15, 480(1) - lxv 16, 496(1) - lxv 17, 512(1) - lxv 18, 528(1) - lxv 19, 544(1) - lxv 20, 560(1) - lxv 21, 576(1) - lxv 22, 592(1) - li 9, 256 - lvx 20, 9, 1 - addi 9, 9, 16 - lvx 21, 9, 1 - addi 9, 9, 16 - lvx 22, 9, 1 - addi 9, 9, 16 - lvx 23, 9, 1 - addi 9, 9, 16 - lvx 24, 9, 1 - addi 9, 9, 16 - lvx 25, 9, 1 - addi 9, 9, 16 - lvx 26, 9, 1 - addi 9, 9, 16 - lvx 27, 9, 1 - addi 9, 9, 16 - lvx 28, 9, 1 - addi 9, 9, 16 - lvx 29, 9, 1 - addi 9, 9, 16 - lvx 30, 9, 1 - addi 9, 9, 16 - lvx 31, 9, 1 - - ld 0, 656(1) - ld 14,112(1) - ld 15,120(1) - ld 16,128(1) - ld 17,136(1) - ld 18,144(1) - ld 19,152(1) - ld 20,160(1) - ld 21,168(1) - - mtlr 0 - addi 1, 1, 640 + vsldoi 23, 22, 22, 8 # swap + vpmsumd 22, 22, 2 # reduction + vxor 23, 23, 24 + vxor \H, 22, 23 .endm +# +# LOAD_HASH_TABLE +# Xi = v0 +# H Poly = v2 +# Hash keys = v3 - v14 +# .macro LOAD_HASH_TABLE # Load Xi lxvb16x 32, 0, 8 # load Xi @@ -557,657 +298,434 @@ lxvd2x 14+32, 10, 8 # H^4h .endm - # - # aes_p10_gcm_encrypt (const void *inp, void *out, size_t len, - # const char *rk, unsigned char iv[16], void *Xip); - # - # r3 - inp - # r4 - out - # r5 - len - # r6 - AES round keys - # r7 - iv and other data - # r8 - Xi, HPoli, hash keys - # - # rounds is at offset 240 in rk - # Xi is at 0 in gcm_table (Xip). - # -_GLOBAL(aes_p10_gcm_encrypt) -.align 5 - - SAVE_REGS - - LOAD_HASH_TABLE - - # initialize ICB: GHASH( IV ), IV - r7 - lxvb16x 30+32, 0, 7 # load IV - v30 - - mr 12, 5 # length - li 11, 0 # block index - - # counter 1 - vxor 31, 31, 31 - vspltisb 22, 1 - vsldoi 31, 31, 22,1 # counter 1 - - # load round key to VSR - lxv 0, 0(6) - lxv 1, 0x10(6) - lxv 2, 0x20(6) - lxv 3, 0x30(6) - lxv 4, 0x40(6) - lxv 5, 0x50(6) - lxv 6, 0x60(6) - lxv 7, 0x70(6) - lxv 8, 0x80(6) - lxv 9, 0x90(6) - lxv 10, 0xa0(6) - - # load rounds - 10 (128), 12 (192), 14 (256) - lwz 9,240(6) - - # - # vxor state, state, w # addroundkey - xxlor 32+29, 0, 0 - vxor 15, 30, 29 # IV + round key - add round key 0 - - cmpdi 9, 10 - beq Loop_aes_gcm_8x - - # load 2 more round keys (v11, v12) - lxv 11, 0xb0(6) - lxv 12, 0xc0(6) - - cmpdi 9, 12 - beq Loop_aes_gcm_8x - - # load 2 more round keys (v11, v12, v13, v14) - lxv 13, 0xd0(6) - lxv 14, 0xe0(6) - cmpdi 9, 14 - beq Loop_aes_gcm_8x - - b aes_gcm_out - -.align 5 -Loop_aes_gcm_8x: - mr 14, 3 - mr 9, 4 - - # - # check partial block - # -Continue_partial_check: - ld 15, 56(7) - cmpdi 15, 0 - beq Continue - bgt Final_block - cmpdi 15, 16 - blt Final_block - -Continue: - # n blcoks - li 10, 128 - divdu 10, 12, 10 # n 128 bytes-blocks - cmpdi 10, 0 - beq Loop_last_block - - vaddudm 30, 30, 31 # IV + counter - vxor 16, 30, 29 - vaddudm 30, 30, 31 - vxor 17, 30, 29 - vaddudm 30, 30, 31 - vxor 18, 30, 29 - vaddudm 30, 30, 31 - vxor 19, 30, 29 - vaddudm 30, 30, 31 - vxor 20, 30, 29 - vaddudm 30, 30, 31 - vxor 21, 30, 29 - vaddudm 30, 30, 31 - vxor 22, 30, 29 - - mtctr 10 - - li 15, 16 - li 16, 32 - li 17, 48 - li 18, 64 - li 19, 80 - li 20, 96 - li 21, 112 - - lwz 10, 240(6) - -Loop_8x_block: - - lxvb16x 15, 0, 14 # load block - lxvb16x 16, 15, 14 # load block - lxvb16x 17, 16, 14 # load block - lxvb16x 18, 17, 14 # load block - lxvb16x 19, 18, 14 # load block - lxvb16x 20, 19, 14 # load block - lxvb16x 21, 20, 14 # load block - lxvb16x 22, 21, 14 # load block - addi 14, 14, 128 - - Loop_aes_middle8x - - xxlor 23+32, 10, 10 - - cmpdi 10, 10 - beq Do_next_ghash - - # 192 bits - xxlor 24+32, 11, 11 - - vcipher 15, 15, 23 - vcipher 16, 16, 23 - vcipher 17, 17, 23 - vcipher 18, 18, 23 - vcipher 19, 19, 23 - vcipher 20, 20, 23 - vcipher 21, 21, 23 - vcipher 22, 22, 23 - - vcipher 15, 15, 24 - vcipher 16, 16, 24 - vcipher 17, 17, 24 - vcipher 18, 18, 24 - vcipher 19, 19, 24 - vcipher 20, 20, 24 - vcipher 21, 21, 24 - vcipher 22, 22, 24 - - xxlor 23+32, 12, 12 - - cmpdi 10, 12 - beq Do_next_ghash - - # 256 bits - xxlor 24+32, 13, 13 - - vcipher 15, 15, 23 - vcipher 16, 16, 23 - vcipher 17, 17, 23 - vcipher 18, 18, 23 - vcipher 19, 19, 23 - vcipher 20, 20, 23 - vcipher 21, 21, 23 - vcipher 22, 22, 23 - - vcipher 15, 15, 24 - vcipher 16, 16, 24 - vcipher 17, 17, 24 - vcipher 18, 18, 24 - vcipher 19, 19, 24 - vcipher 20, 20, 24 - vcipher 21, 21, 24 - vcipher 22, 22, 24 - - xxlor 23+32, 14, 14 - - cmpdi 10, 14 - beq Do_next_ghash - b aes_gcm_out - -Do_next_ghash: - - # - # last round - vcipherlast 15, 15, 23 - vcipherlast 16, 16, 23 - - xxlxor 47, 47, 15 - stxvb16x 47, 0, 9 # store output - xxlxor 48, 48, 16 - stxvb16x 48, 15, 9 # store output - - vcipherlast 17, 17, 23 - vcipherlast 18, 18, 23 - - xxlxor 49, 49, 17 - stxvb16x 49, 16, 9 # store output - xxlxor 50, 50, 18 - stxvb16x 50, 17, 9 # store output - - vcipherlast 19, 19, 23 - vcipherlast 20, 20, 23 - - xxlxor 51, 51, 19 - stxvb16x 51, 18, 9 # store output - xxlxor 52, 52, 20 - stxvb16x 52, 19, 9 # store output - - vcipherlast 21, 21, 23 - vcipherlast 22, 22, 23 - - xxlxor 53, 53, 21 - stxvb16x 53, 20, 9 # store output - xxlxor 54, 54, 22 - stxvb16x 54, 21, 9 # store output - - addi 9, 9, 128 - - # ghash here - ppc_aes_gcm_ghash2_4x - - xxlor 27+32, 0, 0 - vaddudm 30, 30, 31 # IV + counter - vmr 29, 30 - vxor 15, 30, 27 # add round key - vaddudm 30, 30, 31 - vxor 16, 30, 27 - vaddudm 30, 30, 31 - vxor 17, 30, 27 - vaddudm 30, 30, 31 - vxor 18, 30, 27 - vaddudm 30, 30, 31 - vxor 19, 30, 27 - vaddudm 30, 30, 31 - vxor 20, 30, 27 - vaddudm 30, 30, 31 - vxor 21, 30, 27 - vaddudm 30, 30, 31 - vxor 22, 30, 27 - - addi 12, 12, -128 - addi 11, 11, 128 - - bdnz Loop_8x_block - - vmr 30, 29 - stxvb16x 30+32, 0, 7 # update IV - -Loop_last_block: - cmpdi 12, 0 - beq aes_gcm_out - - # loop last few blocks +################################################################################ +# Compute AES and ghash one block at a time. +# r23: AES rounds +# v30: current IV +# vs0: roundkey 0 +# +################################################################################ +SYM_FUNC_START_LOCAL(aes_gcm_crypt_1x) + + cmpdi 5, 16 + bge __More_1x + blr +__More_1x: li 10, 16 - divdu 10, 12, 10 - - mtctr 10 - - lwz 10, 240(6) - - cmpdi 12, 16 - blt Final_block - -Next_rem_block: - lxvb16x 15, 0, 14 # load block - - Loop_aes_middle_1x - - xxlor 23+32, 10, 10 - - cmpdi 10, 10 - beq Do_next_1x - - # 192 bits - xxlor 24+32, 11, 11 - - vcipher 15, 15, 23 - vcipher 15, 15, 24 - - xxlor 23+32, 12, 12 + divdu 12, 5, 10 + + xxlxor 32+15, 32+30, 0 + + # Pre-load 8 AES rounds to scratch vectors. + xxlor 32+16, 1, 1 + xxlor 32+17, 2, 2 + xxlor 32+18, 3, 3 + xxlor 32+19, 4, 4 + xxlor 32+20, 5, 5 + xxlor 32+21, 6, 6 + xxlor 32+28, 7, 7 + xxlor 32+29, 8, 8 + lwz 23, 240(6) # n rounds + addi 22, 23, -9 # remaing AES rounds - cmpdi 10, 12 - beq Do_next_1x - - # 256 bits - xxlor 24+32, 13, 13 - - vcipher 15, 15, 23 - vcipher 15, 15, 24 - - xxlor 23+32, 14, 14 - - cmpdi 10, 14 - beq Do_next_1x - -Do_next_1x: - vcipherlast 15, 15, 23 - - xxlxor 47, 47, 15 - stxvb16x 47, 0, 9 # store output - addi 14, 14, 16 - addi 9, 9, 16 - - vmr 28, 15 - ppc_update_hash_1x - - addi 12, 12, -16 - addi 11, 11, 16 - xxlor 19+32, 0, 0 - vaddudm 30, 30, 31 # IV + counter - vxor 15, 30, 19 # add round key - - bdnz Next_rem_block - - li 15, 0 - std 15, 56(7) # clear partial? - stxvb16x 30+32, 0, 7 # update IV cmpdi 12, 0 - beq aes_gcm_out - -Final_block: - lwz 10, 240(6) - Loop_aes_middle_1x - - xxlor 23+32, 10, 10 - - cmpdi 10, 10 - beq Do_final_1x - - # 192 bits - xxlor 24+32, 11, 11 - - vcipher 15, 15, 23 - vcipher 15, 15, 24 - - xxlor 23+32, 12, 12 - - cmpdi 10, 12 - beq Do_final_1x - - # 256 bits - xxlor 24+32, 13, 13 - - vcipher 15, 15, 23 - vcipher 15, 15, 24 + bgt __Loop_1x + blr - xxlor 23+32, 14, 14 +__Loop_1x: + mtctr 22 + addi 10, 6, 144 + vcipher 15, 15, 16 + vcipher 15, 15, 17 + vcipher 15, 15, 18 + vcipher 15, 15, 19 + vcipher 15, 15, 20 + vcipher 15, 15, 21 + vcipher 15, 15, 28 + vcipher 15, 15, 29 - cmpdi 10, 14 - beq Do_final_1x +__Loop_aes_1state: + lxv 32+1, 0(10) + vcipher 15, 15, 1 + addi 10, 10, 16 + bdnz __Loop_aes_1state + lxv 32+1, 0(10) # last round key + lxvb16x 11, 0, 14 # load input block + vcipherlast 15, 15, 1 + + xxlxor 32+15, 32+15, 11 + stxvb16x 32+15, 0, 9 # store output + addi 14, 14, 16 + addi 9, 9, 16 -Do_final_1x: - vcipherlast 15, 15, 23 + cmpdi 24, 0 # decrypt? + bne __Encrypt_1x + xxlor 15+32, 11, 11 +__Encrypt_1x: + vxor 15, 15, 0 + PPC_GHASH1x 0, 15 - # check partial block - li 21, 0 # encrypt - ld 15, 56(7) # partial? - cmpdi 15, 0 - beq Normal_block - bl Do_partial_block + addi 5, 5, -16 + addi 11, 11, 16 + vadduwm 30, 30, 31 # IV + counter + xxlxor 32+15, 32+30, 0 + addi 12, 12, -1 cmpdi 12, 0 - ble aes_gcm_out + bgt __Loop_1x - b Continue_partial_check - -Normal_block: - lxvb16x 15, 0, 14 # load last block - xxlxor 47, 47, 15 - - # create partial block mask - li 15, 16 - sub 15, 15, 12 # index to the mask - - vspltisb 16, -1 # first 16 bytes - 0xffff...ff - vspltisb 17, 0 # second 16 bytes - 0x0000...00 - li 10, 192 - stvx 16, 10, 1 + stxvb16x 32+30, 0, 7 # update IV + stxvb16x 32+0, 0, 8 # update Xi + blr +SYM_FUNC_END(aes_gcm_crypt_1x) + +################################################################################ +# Process a normal partial block when we come here. +# Compute partial mask, Load and store partial block to stack. +# Update partial_len and pblock. +# pblock is (encrypted ^ AES state) for encrypt +# and (input ^ AES state) for decrypt. +# +################################################################################ +SYM_FUNC_START_LOCAL(__Process_partial) + + # create partial mask + vspltisb 16, -1 + li 12, 16 + sub 12, 12, 5 + sldi 12, 12, 3 + mtvsrdd 32+17, 0, 12 + vslo 16, 16, 17 # partial block mask + + lxvb16x 11, 0, 14 # load partial block + xxland 11, 11, 32+16 + + # AES crypt partial + xxlxor 32+15, 32+30, 0 + lwz 23, 240(6) # n rounds + addi 22, 23, -1 # loop - 1 + mtctr 22 + addi 10, 6, 16 + +__Loop_aes_pstate: + lxv 32+1, 0(10) + vcipher 15, 15, 1 addi 10, 10, 16 - stvx 17, 10, 1 - - addi 10, 1, 192 - lxvb16x 16, 15, 10 # load partial block mask - xxland 47, 47, 16 - - vmr 28, 15 - ppc_update_hash_1x + bdnz __Loop_aes_pstate + lxv 32+1, 0(10) # last round key + vcipherlast 15, 15, 1 - # * should store only the remaining bytes. - bl Write_partial_block - - stxvb16x 30+32, 0, 7 # update IV - std 12, 56(7) # update partial? - li 16, 16 + xxlxor 32+15, 32+15, 11 + vand 15, 15, 16 - stxvb16x 32, 0, 8 # write out Xi - stxvb16x 32, 16, 8 # write out Xi - b aes_gcm_out - - # - # Compute data mask - # -.macro GEN_MASK _mask _start _end - vspltisb 16, -1 # first 16 bytes - 0xffff...ff - vspltisb 17, 0 # second 16 bytes - 0x0000...00 - li 10, 192 - stxvb16x 17+32, 10, 1 - add 10, 10, \_start - stxvb16x 16+32, 10, 1 - add 10, 10, \_end - stxvb16x 17+32, 10, 1 - - addi 10, 1, 192 - lxvb16x \_mask, 0, 10 # load partial block mask -.endm + # AES crypt output v15 + # Write partial + li 10, 224 + stxvb16x 15+32, 10, 1 # write v15 to stack + addi 10, 1, 223 + addi 12, 9, -1 + mtctr 5 # partial block len +__Write_partial: + lbzu 22, 1(10) + stbu 22, 1(12) + bdnz __Write_partial + + cmpdi 24, 0 # decrypt? + bne __Encrypt_partial + xxlor 32+15, 11, 11 # decrypt using the input block +__Encrypt_partial: + #vxor 15, 15, 0 # ^ previous hash + #PPC_GHASH1x 0, 15 + + add 14, 14, 5 + add 9, 9, 5 + std 5, 56(7) # update partial + sub 11, 11, 5 + li 5, 0 # done last byte - # - # Handle multiple partial blocks for encrypt and decrypt - # operations. - # -SYM_FUNC_START_LOCAL(Do_partial_block) - add 17, 15, 5 - cmpdi 17, 16 - bgt Big_block - GEN_MASK 18, 15, 5 - b _Partial -SYM_FUNC_END(Do_partial_block) -Big_block: + # + # Don't increase IV since this is the last partial. + # It should get updated in gcm_update if no more data blocks. + #vadduwm 30, 30, 31 # increase IV + stxvb16x 32+30, 0, 7 # update IV + li 10, 64 + stxvb16x 32+0, 0, 8 # Update X1 + stxvb16x 32+15, 10, 7 # Update pblock + blr +SYM_FUNC_END(__Process_partial) + +################################################################################ +# Combine partial blocks and ghash when we come here. +# +# The partial block has to be shifted to the right location to encrypt/decrypt +# and compute ghash if combing the previous partial block is needed. +# - Compute ghash for a full block. Clear Partial_len and pblock. Update IV. +# Write Xi. +# - Don't compute ghash if not full block. gcm_update will take care of it +# is the last block. Update Partial_len and pblock. +# +################################################################################ +SYM_FUNC_START_LOCAL(__Combine_partial) + + ld 12, 56(7) + mr 21, 5 # these bytes to be processed + + li 17, 0 li 16, 16 - GEN_MASK 18, 15, 16 - -_Partial: - lxvb16x 17+32, 0, 14 # load last block - sldi 16, 15, 3 - mtvsrdd 32+16, 0, 16 - vsro 17, 17, 16 - xxlxor 47, 47, 17+32 - xxland 47, 47, 18 - - vxor 0, 0, 0 # clear Xi - vmr 28, 15 - - cmpdi 21, 0 # encrypt/decrypt ops? - beq Skip_decrypt - xxland 32+28, 32+17, 18 - -Skip_decrypt: - - ppc_update_hash_1x + sub 22, 16, 12 # bytes to complete a block + sub 17, 22, 5 # remaining bytes in a block + cmpdi 5, 16 + ble __Inp_msg_less16 + li 17, 0 + mr 21, 22 + b __Combine_continue +__Inp_msg_less16: + cmpd 22, 5 + bgt __Combine_continue + li 17, 0 + mr 21, 22 # these bytes to be processed + +__Combine_continue: + # load msg and shift to the proper location and mask + vspltisb 16, -1 + sldi 15, 12, 3 + mtvsrdd 32+17, 0, 15 + vslo 16, 16, 17 + vsro 16, 16, 17 + sldi 15, 17, 3 + mtvsrdd 32+17, 0, 15 + vsro 16, 16, 17 + vslo 16, 16, 17 # mask + + lxvb16x 32+19, 0, 14 # load partial block + sldi 15, 12, 3 + mtvsrdd 32+17, 0, 15 + vsro 19, 19, 17 # 0x00..xxxx??..?? + sldi 15, 17, 3 + mtvsrdd 32+17, 0, 15 + vsro 19, 19, 17 # 0x00..xxxx + vslo 19, 19, 17 # shift back to form 0x00..xxxx00..00 + + # AES crypt partial + xxlxor 32+15, 32+30, 0 + lwz 23, 240(6) # n rounds + addi 22, 23, -1 # loop - 1 + mtctr 22 + addi 10, 6, 16 + +__Loop_aes_cpstate: + lxv 32+1, 0(10) + vcipher 15, 15, 1 + addi 10, 10, 16 + bdnz __Loop_aes_cpstate + lxv 32+1, 0(10) # last round key + vcipherlast 15, 15, 1 - li 16, 16 - lxvb16x 32+29, 16, 8 - vxor 0, 0, 29 - stxvb16x 32, 0, 8 # save Xi - stxvb16x 32, 16, 8 # save Xi - - # store partial block - # loop the rest of the stream if any - sldi 16, 15, 3 - mtvsrdd 32+16, 0, 16 - vslo 15, 15, 16 - #stxvb16x 15+32, 0, 9 # last block + vxor 15, 15, 19 + vand 15, 15, 16 - li 16, 16 - sub 17, 16, 15 # 16 - partial - - add 16, 15, 5 - cmpdi 16, 16 - bgt Larger_16 - mr 17, 5 -Larger_16: - - # write partial - li 10, 192 - stxvb16x 15+32, 10, 1 # save current block - - addi 10, 9, -1 - addi 16, 1, 191 - mtctr 17 # move partial byte count - -Write_last_partial: - lbzu 18, 1(16) - stbu 18, 1(10) - bdnz Write_last_partial - # Complete loop partial - - add 14, 14, 17 - add 9, 9, 17 - sub 12, 12, 17 - add 11, 11, 17 - - add 15, 15, 5 - cmpdi 15, 16 - blt Save_partial - - vaddudm 30, 30, 31 - stxvb16x 30+32, 0, 7 # update IV - xxlor 32+29, 0, 0 - vxor 15, 30, 29 # IV + round key - add round key 0 - li 15, 0 - std 15, 56(7) # partial done - clear - b Partial_done -Save_partial: - std 15, 56(7) # partial - -Partial_done: + # AES crypt output v15 + # Write partial + li 10, 224 + stxvb16x 15+32, 10, 1 # write v15 to stack + addi 10, 1, 223 + add 10, 10, 12 # add offset + addi 15, 9, -1 + mtctr 21 # partial block len +__Write_combine_partial: + lbzu 22, 1(10) + stbu 22, 1(15) + bdnz __Write_combine_partial + + add 14, 14, 21 + add 11, 11, 21 + add 9, 9, 21 + sub 5, 5, 21 + + # Encrypt/Decrypt? + cmpdi 24, 0 # decrypt? + bne __Encrypt_combine_partial + vmr 15, 19 # decrypt using the input block + +__Encrypt_combine_partial: + # + # Update partial flag and combine ghash. +__Update_partial_ghash: + li 10, 64 + lxvb16x 32+17, 10, 7 # load previous pblock + add 12, 12, 21 # combined pprocessed + vxor 15, 15, 17 # combined pblock + + cmpdi 12, 16 + beq __Clear_partial_flag + std 12, 56(7) # update partial len + stxvb16x 32+15, 10, 7 # Update current pblock blr - # - # Write partial block - # r9 - output - # r12 - remaining bytes - # v15 - partial input data - # -SYM_FUNC_START_LOCAL(Write_partial_block) - li 10, 192 - stxvb16x 15+32, 10, 1 # last block - - addi 10, 9, -1 - addi 16, 1, 191 - - mtctr 12 # remaining bytes - li 15, 0 - -Write_last_byte: - lbzu 14, 1(16) - stbu 14, 1(10) - bdnz Write_last_byte +__Clear_partial_flag: + li 12, 0 + std 12, 56(7) + # Update IV and ghash here + vadduwm 30, 30, 31 # increase IV + stxvb16x 32+30, 0, 7 # update IV + + # v15 either is either (input blockor encrypted)^(AES state) + vxor 15, 15, 0 + PPC_GHASH1x 0, 15 + stxvb16x 32+0, 10, 7 # update pblock for debug? + stxvb16x 32+0, 0, 8 # update Xi blr -SYM_FUNC_END(Write_partial_block) +SYM_FUNC_END(__Combine_partial) -aes_gcm_out: - # out = state - stxvb16x 32, 0, 8 # write out Xi - add 3, 11, 12 # return count +################################################################################ +# gcm_update(iv, Xi) - compute last hash +# +################################################################################ +SYM_FUNC_START(gcm_update) - RESTORE_REGS - blr + ld 10, 56(3) + cmpdi 10, 0 + beq __no_update - # - # 8x Decrypt - # -_GLOBAL(aes_p10_gcm_decrypt) -.align 5 + lxvb16x 32, 0, 4 # load Xi + # load Hash - h^4, h^3, h^2, h + li 10, 32 + lxvd2x 2+32, 10, 4 # H Poli + li 10, 48 + lxvd2x 3+32, 10, 4 # Hl + li 10, 64 + lxvd2x 4+32, 10, 4 # H + li 10, 80 + lxvd2x 5+32, 10, 4 # Hh + + addis 11, 2, permx@toc@ha + addi 11, 11, permx@toc@l + lxv 10, 0(11) # vs10: vpermxor vector + + li 9, 64 + lxvb16x 32+6, 9, 3 # load pblock + vxor 6, 6, 0 + + vxor 1, 1, 1 + vpmsumd 12, 3, 6 # L + vpmsumd 13, 4, 6 # M + vpmsumd 14, 5, 6 # H + vpmsumd 17, 12, 2 # reduction + vsldoi 15, 13, 1, 8 # mL + vsldoi 16, 1, 13, 8 # mH + vxor 12, 12, 15 # LL + LL + vxor 14, 14, 16 # HH + HH + xxlor 32+15, 10, 10 + vpermxor 12, 12, 17, 15 + vsldoi 13, 12, 12, 8 # swap + vpmsumd 12, 12, 2 # reduction + vxor 13, 13, 14 + vxor 7, 12, 13 + + #vxor 0, 0, 0 + #stxvb16x 32+0, 9, 3 + li 10, 0 + std 10, 56(3) + stxvb16x 32+7, 0, 4 + +__no_update: + blr +SYM_FUNC_END(gcm_update) + +################################################################################ +# aes_p10_gcm_encrypt (const void *inp, void *out, size_t len, +# const char *rk, unsigned char iv[16], void *Xip); +# +# r3 - inp +# r4 - out +# r5 - len +# r6 - AES round keys +# r7 - iv and other data +# r8 - Xi, HPoli, hash keys +# +# rounds is at offset 240 in rk +# Xi is at 0 in gcm_table (Xip). +# +################################################################################ +SYM_FUNC_START(aes_p10_gcm_encrypt) + + cmpdi 5, 0 + ble __Invalid_msg_len SAVE_REGS - LOAD_HASH_TABLE # initialize ICB: GHASH( IV ), IV - r7 lxvb16x 30+32, 0, 7 # load IV - v30 - mr 12, 5 # length - li 11, 0 # block index + mr 14, 3 + mr 9, 4 # counter 1 vxor 31, 31, 31 vspltisb 22, 1 vsldoi 31, 31, 22,1 # counter 1 - # load round key to VSR - lxv 0, 0(6) - lxv 1, 0x10(6) - lxv 2, 0x20(6) - lxv 3, 0x30(6) - lxv 4, 0x40(6) - lxv 5, 0x50(6) - lxv 6, 0x60(6) - lxv 7, 0x70(6) - lxv 8, 0x80(6) - lxv 9, 0x90(6) - lxv 10, 0xa0(6) + addis 11, 2, permx@toc@ha + addi 11, 11, permx@toc@l + lxv 10, 0(11) # vs10: vpermxor vector + li 11, 0 + + # load 9 round keys to VSR + lxv 0, 0(6) # round key 0 + lxv 1, 16(6) # round key 1 + lxv 2, 32(6) # round key 2 + lxv 3, 48(6) # round key 3 + lxv 4, 64(6) # round key 4 + lxv 5, 80(6) # round key 5 + lxv 6, 96(6) # round key 6 + lxv 7, 112(6) # round key 7 + lxv 8, 128(6) # round key 8 # load rounds - 10 (128), 12 (192), 14 (256) - lwz 9,240(6) + lwz 23, 240(6) # n rounds + li 24, 1 # encrypt +__Process_encrypt: # - # vxor state, state, w # addroundkey - xxlor 32+29, 0, 0 - vxor 15, 30, 29 # IV + round key - add round key 0 - - cmpdi 9, 10 - beq Loop_aes_gcm_8x_dec - - # load 2 more round keys (v11, v12) - lxv 11, 0xb0(6) - lxv 12, 0xc0(6) - - cmpdi 9, 12 - beq Loop_aes_gcm_8x_dec - - # load 2 more round keys (v11, v12, v13, v14) - lxv 13, 0xd0(6) - lxv 14, 0xe0(6) - cmpdi 9, 14 - beq Loop_aes_gcm_8x_dec + # Process different blocks + # + ld 12, 56(7) + cmpdi 12, 0 + bgt __Do_combine_enc + cmpdi 5, 128 + blt __Process_more_enc + +# +# Process 8x AES/GCM blocks +# +__Process_8x_enc: + # 8x blcoks + li 10, 128 + divdu 12, 5, 10 # n 128 bytes-blocks - b aes_gcm_out + addi 12, 12, -1 # loop - 1 -.align 5 -Loop_aes_gcm_8x_dec: - mr 14, 3 - mr 9, 4 + vmr 15, 30 # first state: IV + vadduwm 16, 15, 31 # state + counter + vadduwm 17, 16, 31 + vadduwm 18, 17, 31 + vadduwm 19, 18, 31 + vadduwm 20, 19, 31 + vadduwm 21, 20, 31 + vadduwm 22, 21, 31 + xxlor 9, 32+22, 32+22 # save last state - # - # check partial block - # -Continue_partial_check_dec: - ld 15, 56(7) - cmpdi 15, 0 - beq Continue_dec - bgt Final_block_dec - cmpdi 15, 16 - blt Final_block_dec - -Continue_dec: - # n blcoks - li 10, 128 - divdu 10, 12, 10 # n 128 bytes-blocks - cmpdi 10, 0 - beq Loop_last_block_dec - - vaddudm 30, 30, 31 # IV + counter - vxor 16, 30, 29 - vaddudm 30, 30, 31 - vxor 17, 30, 29 - vaddudm 30, 30, 31 - vxor 18, 30, 29 - vaddudm 30, 30, 31 - vxor 19, 30, 29 - vaddudm 30, 30, 31 - vxor 20, 30, 29 - vaddudm 30, 30, 31 - vxor 21, 30, 29 - vaddudm 30, 30, 31 - vxor 22, 30, 29 - - mtctr 10 + # vxor state, state, w # addroundkey + xxlor 32+29, 0, 0 + vxor 15, 15, 29 # IV + round key - add round key 0 + vxor 16, 16, 29 + vxor 17, 17, 29 + vxor 18, 18, 29 + vxor 19, 19, 29 + vxor 20, 20, 29 + vxor 21, 21, 29 + vxor 22, 22, 29 li 15, 16 li 16, 32 @@ -1217,305 +735,502 @@ Continue_dec: li 20, 96 li 21, 112 - lwz 10, 240(6) - -Loop_8x_block_dec: - - lxvb16x 15, 0, 14 # load block - lxvb16x 16, 15, 14 # load block - lxvb16x 17, 16, 14 # load block - lxvb16x 18, 17, 14 # load block - lxvb16x 19, 18, 14 # load block - lxvb16x 20, 19, 14 # load block - lxvb16x 21, 20, 14 # load block - lxvb16x 22, 21, 14 # load block - addi 14, 14, 128 - - Loop_aes_middle8x - - xxlor 23+32, 10, 10 - - cmpdi 10, 10 - beq Do_next_ghash_dec - - # 192 bits - xxlor 24+32, 11, 11 - - vcipher 15, 15, 23 - vcipher 16, 16, 23 - vcipher 17, 17, 23 - vcipher 18, 18, 23 - vcipher 19, 19, 23 - vcipher 20, 20, 23 - vcipher 21, 21, 23 - vcipher 22, 22, 23 - - vcipher 15, 15, 24 - vcipher 16, 16, 24 - vcipher 17, 17, 24 - vcipher 18, 18, 24 - vcipher 19, 19, 24 - vcipher 20, 20, 24 - vcipher 21, 21, 24 - vcipher 22, 22, 24 - - xxlor 23+32, 12, 12 - - cmpdi 10, 12 - beq Do_next_ghash_dec - - # 256 bits - xxlor 24+32, 13, 13 - - vcipher 15, 15, 23 - vcipher 16, 16, 23 - vcipher 17, 17, 23 - vcipher 18, 18, 23 - vcipher 19, 19, 23 - vcipher 20, 20, 23 - vcipher 21, 21, 23 - vcipher 22, 22, 23 - - vcipher 15, 15, 24 - vcipher 16, 16, 24 - vcipher 17, 17, 24 - vcipher 18, 18, 24 - vcipher 19, 19, 24 - vcipher 20, 20, 24 - vcipher 21, 21, 24 - vcipher 22, 22, 24 - - xxlor 23+32, 14, 14 - - cmpdi 10, 14 - beq Do_next_ghash_dec - b aes_gcm_out + # + # Pre-compute first 8 AES state and leave 1/3/5 more rounds + # for the loop. + # + addi 22, 23, -9 # process 8 keys + mtctr 22 # AES key loop + addi 10, 6, 144 -Do_next_ghash_dec: + LOOP_8AES_STATE # process 8 AES keys - # - # last round - vcipherlast 15, 15, 23 - vcipherlast 16, 16, 23 - - xxlxor 47, 47, 15 - stxvb16x 47, 0, 9 # store output - xxlxor 48, 48, 16 - stxvb16x 48, 15, 9 # store output - - vcipherlast 17, 17, 23 - vcipherlast 18, 18, 23 - - xxlxor 49, 49, 17 - stxvb16x 49, 16, 9 # store output - xxlxor 50, 50, 18 - stxvb16x 50, 17, 9 # store output - - vcipherlast 19, 19, 23 - vcipherlast 20, 20, 23 - - xxlxor 51, 51, 19 - stxvb16x 51, 18, 9 # store output - xxlxor 52, 52, 20 - stxvb16x 52, 19, 9 # store output - - vcipherlast 21, 21, 23 - vcipherlast 22, 22, 23 - - xxlxor 53, 53, 21 - stxvb16x 53, 20, 9 # store output - xxlxor 54, 54, 22 - stxvb16x 54, 21, 9 # store output - - addi 9, 9, 128 - - xxlor 15+32, 15, 15 - xxlor 16+32, 16, 16 - xxlor 17+32, 17, 17 - xxlor 18+32, 18, 18 - xxlor 19+32, 19, 19 - xxlor 20+32, 20, 20 - xxlor 21+32, 21, 21 - xxlor 22+32, 22, 22 +__PreLoop_aes_state: + lxv 32+1, 0(10) # round key + AES_CIPHER_8x vcipher 15 1 + addi 10, 10, 16 + bdnz __PreLoop_aes_state + lxv 32+1, 0(10) # last round key (v1) + + cmpdi 12, 0 # Only one loop (8 block) + beq __Finish_ghash + +# +# Loop 8x blocks and compute ghash +# +__Loop_8x_block_enc: + vcipherlast 15, 15, 1 + vcipherlast 16, 16, 1 + vcipherlast 17, 17, 1 + vcipherlast 18, 18, 1 + vcipherlast 19, 19, 1 + vcipherlast 20, 20, 1 + vcipherlast 21, 21, 1 + vcipherlast 22, 22, 1 + + lxvb16x 32+23, 0, 14 # load block + lxvb16x 32+24, 15, 14 # load block + lxvb16x 32+25, 16, 14 # load block + lxvb16x 32+26, 17, 14 # load block + lxvb16x 32+27, 18, 14 # load block + lxvb16x 32+28, 19, 14 # load block + lxvb16x 32+29, 20, 14 # load block + lxvb16x 32+30, 21, 14 # load block + addi 14, 14, 128 + + vxor 15, 15, 23 + vxor 16, 16, 24 + vxor 17, 17, 25 + vxor 18, 18, 26 + vxor 19, 19, 27 + vxor 20, 20, 28 + vxor 21, 21, 29 + vxor 22, 22, 30 + + stxvb16x 47, 0, 9 # store output + stxvb16x 48, 15, 9 # store output + stxvb16x 49, 16, 9 # store output + stxvb16x 50, 17, 9 # store output + stxvb16x 51, 18, 9 # store output + stxvb16x 52, 19, 9 # store output + stxvb16x 53, 20, 9 # store output + stxvb16x 54, 21, 9 # store output + addi 9, 9, 128 # ghash here - ppc_aes_gcm_ghash2_4x - - xxlor 27+32, 0, 0 - vaddudm 30, 30, 31 # IV + counter - vmr 29, 30 - vxor 15, 30, 27 # add round key - vaddudm 30, 30, 31 - vxor 16, 30, 27 - vaddudm 30, 30, 31 - vxor 17, 30, 27 - vaddudm 30, 30, 31 - vxor 18, 30, 27 - vaddudm 30, 30, 31 - vxor 19, 30, 27 - vaddudm 30, 30, 31 - vxor 20, 30, 27 - vaddudm 30, 30, 31 - vxor 21, 30, 27 - vaddudm 30, 30, 31 - vxor 22, 30, 27 - - addi 12, 12, -128 + vxor 15, 15, 0 + PPC_GHASH4x 0, 15, 16, 17, 18 + + vxor 19, 19, 0 + PPC_GHASH4x 0, 19, 20, 21, 22 + + xxlor 32+15, 9, 9 # last state + vadduwm 15, 15, 31 # state + counter + vadduwm 16, 15, 31 + vadduwm 17, 16, 31 + vadduwm 18, 17, 31 + vadduwm 19, 18, 31 + vadduwm 20, 19, 31 + vadduwm 21, 20, 31 + vadduwm 22, 21, 31 + xxlor 9, 32+22, 32+22 # save last state + + xxlor 32+27, 0, 0 # restore roundkey 0 + vxor 15, 15, 27 # IV + round key - add round key 0 + vxor 16, 16, 27 + vxor 17, 17, 27 + vxor 18, 18, 27 + vxor 19, 19, 27 + vxor 20, 20, 27 + vxor 21, 21, 27 + vxor 22, 22, 27 + + addi 5, 5, -128 addi 11, 11, 128 - bdnz Loop_8x_block_dec - - vmr 30, 29 - stxvb16x 30+32, 0, 7 # update IV - -Loop_last_block_dec: - cmpdi 12, 0 - beq aes_gcm_out - - # loop last few blocks - li 10, 16 - divdu 10, 12, 10 - - mtctr 10 - - lwz 10, 240(6) - - cmpdi 12, 16 - blt Final_block_dec - -Next_rem_block_dec: - lxvb16x 15, 0, 14 # load block - - Loop_aes_middle_1x - - xxlor 23+32, 10, 10 + LOOP_8AES_STATE # process 8 AES keys + mtctr 22 # AES key loop + addi 10, 6, 144 +__LastLoop_aes_state: + lxv 32+1, 0(10) # round key + AES_CIPHER_8x vcipher 15 1 + addi 10, 10, 16 + bdnz __LastLoop_aes_state + lxv 32+1, 0(10) # last round key (v1) - cmpdi 10, 10 - beq Do_next_1x_dec + addi 12, 12, -1 + cmpdi 12, 0 + bne __Loop_8x_block_enc + +__Finish_ghash: + vcipherlast 15, 15, 1 + vcipherlast 16, 16, 1 + vcipherlast 17, 17, 1 + vcipherlast 18, 18, 1 + vcipherlast 19, 19, 1 + vcipherlast 20, 20, 1 + vcipherlast 21, 21, 1 + vcipherlast 22, 22, 1 + + lxvb16x 32+23, 0, 14 # load block + lxvb16x 32+24, 15, 14 # load block + lxvb16x 32+25, 16, 14 # load block + lxvb16x 32+26, 17, 14 # load block + lxvb16x 32+27, 18, 14 # load block + lxvb16x 32+28, 19, 14 # load block + lxvb16x 32+29, 20, 14 # load block + lxvb16x 32+30, 21, 14 # load block + addi 14, 14, 128 + + vxor 15, 15, 23 + vxor 16, 16, 24 + vxor 17, 17, 25 + vxor 18, 18, 26 + vxor 19, 19, 27 + vxor 20, 20, 28 + vxor 21, 21, 29 + vxor 22, 22, 30 + + stxvb16x 47, 0, 9 # store output + stxvb16x 48, 15, 9 # store output + stxvb16x 49, 16, 9 # store output + stxvb16x 50, 17, 9 # store output + stxvb16x 51, 18, 9 # store output + stxvb16x 52, 19, 9 # store output + stxvb16x 53, 20, 9 # store output + stxvb16x 54, 21, 9 # store output + addi 9, 9, 128 + + vxor 15, 15, 0 + PPC_GHASH4x 0, 15, 16, 17, 18 + + vxor 19, 19, 0 + PPC_GHASH4x 0, 19, 20, 21, 22 + + xxlor 30+32, 9, 9 # last ctr + vadduwm 30, 30, 31 # increase ctr + stxvb16x 32+30, 0, 7 # update IV + stxvb16x 32+0, 0, 8 # update Xi + + addi 5, 5, -128 + addi 11, 11, 128 - # 192 bits - xxlor 24+32, 11, 11 + # + # Done 8x blocks + # - vcipher 15, 15, 23 - vcipher 15, 15, 24 + cmpdi 5, 0 + beq aes_gcm_out - xxlor 23+32, 12, 12 +__Process_more_enc: + li 24, 1 # encrypt + bl aes_gcm_crypt_1x + cmpdi 5, 0 + beq aes_gcm_out - cmpdi 10, 12 - beq Do_next_1x_dec + bl __Process_partial + cmpdi 5, 0 + beq aes_gcm_out +__Do_combine_enc: + bl __Combine_partial + cmpdi 5, 0 + bgt __Process_encrypt + b aes_gcm_out - # 256 bits - xxlor 24+32, 13, 13 +SYM_FUNC_END(aes_p10_gcm_encrypt) - vcipher 15, 15, 23 - vcipher 15, 15, 24 +################################################################################ +# aes_p10_gcm_decrypt (const void *inp, void *out, size_t len, +# const char *rk, unsigned char iv[16], void *Xip); +# 8x Decrypt +# +################################################################################ +SYM_FUNC_START(aes_p10_gcm_decrypt) - xxlor 23+32, 14, 14 + cmpdi 5, 0 + ble __Invalid_msg_len - cmpdi 10, 14 - beq Do_next_1x_dec + SAVE_REGS + LOAD_HASH_TABLE -Do_next_1x_dec: - vcipherlast 15, 15, 23 + # initialize ICB: GHASH( IV ), IV - r7 + lxvb16x 30+32, 0, 7 # load IV - v30 - xxlxor 47, 47, 15 - stxvb16x 47, 0, 9 # store output - addi 14, 14, 16 - addi 9, 9, 16 + mr 14, 3 + mr 9, 4 - xxlor 28+32, 15, 15 - #vmr 28, 15 - ppc_update_hash_1x + # counter 1 + vxor 31, 31, 31 + vspltisb 22, 1 + vsldoi 31, 31, 22,1 # counter 1 - addi 12, 12, -16 - addi 11, 11, 16 - xxlor 19+32, 0, 0 - vaddudm 30, 30, 31 # IV + counter - vxor 15, 30, 19 # add round key + addis 11, 2, permx@toc@ha + addi 11, 11, permx@toc@l + lxv 10, 0(11) # vs10: vpermxor vector + li 11, 0 + + # load 9 round keys to VSR + lxv 0, 0(6) # round key 0 + lxv 1, 16(6) # round key 1 + lxv 2, 32(6) # round key 2 + lxv 3, 48(6) # round key 3 + lxv 4, 64(6) # round key 4 + lxv 5, 80(6) # round key 5 + lxv 6, 96(6) # round key 6 + lxv 7, 112(6) # round key 7 + lxv 8, 128(6) # round key 8 - bdnz Next_rem_block_dec + # load rounds - 10 (128), 12 (192), 14 (256) + lwz 23, 240(6) # n rounds + li 24, 0 # decrypt - li 15, 0 - std 15, 56(7) # clear partial? - stxvb16x 30+32, 0, 7 # update IV +__Process_decrypt: + # + # Process different blocks + # + ld 12, 56(7) cmpdi 12, 0 - beq aes_gcm_out - -Final_block_dec: - lwz 10, 240(6) - Loop_aes_middle_1x - - xxlor 23+32, 10, 10 - - cmpdi 10, 10 - beq Do_final_1x_dec + bgt __Do_combine_dec + cmpdi 5, 128 + blt __Process_more_dec + +# +# Process 8x AES/GCM blocks +# +__Process_8x_dec: + # 8x blcoks + li 10, 128 + divdu 12, 5, 10 # n 128 bytes-blocks - # 192 bits - xxlor 24+32, 11, 11 + addi 12, 12, -1 # loop - 1 - vcipher 15, 15, 23 - vcipher 15, 15, 24 + vmr 15, 30 # first state: IV + vadduwm 16, 15, 31 # state + counter + vadduwm 17, 16, 31 + vadduwm 18, 17, 31 + vadduwm 19, 18, 31 + vadduwm 20, 19, 31 + vadduwm 21, 20, 31 + vadduwm 22, 21, 31 + xxlor 9, 32+22, 32+22 # save last state - xxlor 23+32, 12, 12 + # vxor state, state, w # addroundkey + xxlor 32+29, 0, 0 + vxor 15, 15, 29 # IV + round key - add round key 0 + vxor 16, 16, 29 + vxor 17, 17, 29 + vxor 18, 18, 29 + vxor 19, 19, 29 + vxor 20, 20, 29 + vxor 21, 21, 29 + vxor 22, 22, 29 - cmpdi 10, 12 - beq Do_final_1x_dec + li 15, 16 + li 16, 32 + li 17, 48 + li 18, 64 + li 19, 80 + li 20, 96 + li 21, 112 - # 256 bits - xxlor 24+32, 13, 13 + # + # Pre-compute first 8 AES state and leave 1/3/5 more rounds + # for the loop. + # + addi 22, 23, -9 # process 8 keys + mtctr 22 # AES key loop + addi 10, 6, 144 - vcipher 15, 15, 23 - vcipher 15, 15, 24 + LOOP_8AES_STATE # process 8 AES keys - xxlor 23+32, 14, 14 +__PreLoop_aes_state_dec: + lxv 32+1, 0(10) # round key + AES_CIPHER_8x vcipher 15 1 + addi 10, 10, 16 + bdnz __PreLoop_aes_state_dec + lxv 32+1, 0(10) # last round key (v1) + + cmpdi 12, 0 # Only one loop (8 block) + beq __Finish_ghash_dec + +# +# Loop 8x blocks and compute ghash +# +__Loop_8x_block_dec: + vcipherlast 15, 15, 1 + vcipherlast 16, 16, 1 + vcipherlast 17, 17, 1 + vcipherlast 18, 18, 1 + vcipherlast 19, 19, 1 + vcipherlast 20, 20, 1 + vcipherlast 21, 21, 1 + vcipherlast 22, 22, 1 + + lxvb16x 32+23, 0, 14 # load block + lxvb16x 32+24, 15, 14 # load block + lxvb16x 32+25, 16, 14 # load block + lxvb16x 32+26, 17, 14 # load block + lxvb16x 32+27, 18, 14 # load block + lxvb16x 32+28, 19, 14 # load block + lxvb16x 32+29, 20, 14 # load block + lxvb16x 32+30, 21, 14 # load block + addi 14, 14, 128 + + vxor 15, 15, 23 + vxor 16, 16, 24 + vxor 17, 17, 25 + vxor 18, 18, 26 + vxor 19, 19, 27 + vxor 20, 20, 28 + vxor 21, 21, 29 + vxor 22, 22, 30 + + stxvb16x 47, 0, 9 # store output + stxvb16x 48, 15, 9 # store output + stxvb16x 49, 16, 9 # store output + stxvb16x 50, 17, 9 # store output + stxvb16x 51, 18, 9 # store output + stxvb16x 52, 19, 9 # store output + stxvb16x 53, 20, 9 # store output + stxvb16x 54, 21, 9 # store output + + addi 9, 9, 128 + + vmr 15, 23 + vmr 16, 24 + vmr 17, 25 + vmr 18, 26 + vmr 19, 27 + vmr 20, 28 + vmr 21, 29 + vmr 22, 30 - cmpdi 10, 14 - beq Do_final_1x_dec + # ghash here + vxor 15, 15, 0 + PPC_GHASH4x 0, 15, 16, 17, 18 + + vxor 19, 19, 0 + PPC_GHASH4x 0, 19, 20, 21, 22 + + xxlor 32+15, 9, 9 # last state + vadduwm 15, 15, 31 # state + counter + vadduwm 16, 15, 31 + vadduwm 17, 16, 31 + vadduwm 18, 17, 31 + vadduwm 19, 18, 31 + vadduwm 20, 19, 31 + vadduwm 21, 20, 31 + vadduwm 22, 21, 31 + xxlor 9, 32+22, 32+22 # save last state + + xxlor 32+27, 0, 0 # restore roundkey 0 + vxor 15, 15, 27 # IV + round key - add round key 0 + vxor 16, 16, 27 + vxor 17, 17, 27 + vxor 18, 18, 27 + vxor 19, 19, 27 + vxor 20, 20, 27 + vxor 21, 21, 27 + vxor 22, 22, 27 + + addi 5, 5, -128 + addi 11, 11, 128 -Do_final_1x_dec: - vcipherlast 15, 15, 23 + LOOP_8AES_STATE # process 8 AES keys + mtctr 22 # AES key loop + addi 10, 6, 144 +__LastLoop_aes_state_dec: + lxv 32+1, 0(10) # round key + AES_CIPHER_8x vcipher 15 1 + addi 10, 10, 16 + bdnz __LastLoop_aes_state_dec + lxv 32+1, 0(10) # last round key (v1) - # check partial block - li 21, 1 # decrypt - ld 15, 56(7) # partial? - cmpdi 15, 0 - beq Normal_block_dec - bl Do_partial_block + addi 12, 12, -1 cmpdi 12, 0 - ble aes_gcm_out - - b Continue_partial_check_dec + bne __Loop_8x_block_dec + +__Finish_ghash_dec: + vcipherlast 15, 15, 1 + vcipherlast 16, 16, 1 + vcipherlast 17, 17, 1 + vcipherlast 18, 18, 1 + vcipherlast 19, 19, 1 + vcipherlast 20, 20, 1 + vcipherlast 21, 21, 1 + vcipherlast 22, 22, 1 + + lxvb16x 32+23, 0, 14 # load block + lxvb16x 32+24, 15, 14 # load block + lxvb16x 32+25, 16, 14 # load block + lxvb16x 32+26, 17, 14 # load block + lxvb16x 32+27, 18, 14 # load block + lxvb16x 32+28, 19, 14 # load block + lxvb16x 32+29, 20, 14 # load block + lxvb16x 32+30, 21, 14 # load block + addi 14, 14, 128 + + vxor 15, 15, 23 + vxor 16, 16, 24 + vxor 17, 17, 25 + vxor 18, 18, 26 + vxor 19, 19, 27 + vxor 20, 20, 28 + vxor 21, 21, 29 + vxor 22, 22, 30 + + stxvb16x 47, 0, 9 # store output + stxvb16x 48, 15, 9 # store output + stxvb16x 49, 16, 9 # store output + stxvb16x 50, 17, 9 # store output + stxvb16x 51, 18, 9 # store output + stxvb16x 52, 19, 9 # store output + stxvb16x 53, 20, 9 # store output + stxvb16x 54, 21, 9 # store output + addi 9, 9, 128 + + #vmr 15, 23 + vxor 15, 23, 0 + vmr 16, 24 + vmr 17, 25 + vmr 18, 26 + vmr 19, 27 + vmr 20, 28 + vmr 21, 29 + vmr 22, 30 + + #vxor 15, 15, 0 + PPC_GHASH4x 0, 15, 16, 17, 18 + + vxor 19, 19, 0 + PPC_GHASH4x 0, 19, 20, 21, 22 + + xxlor 30+32, 9, 9 # last ctr + vadduwm 30, 30, 31 # increase ctr + stxvb16x 32+30, 0, 7 # update IV + stxvb16x 32+0, 0, 8 # update Xi + + addi 5, 5, -128 + addi 11, 11, 128 -Normal_block_dec: - lxvb16x 15, 0, 14 # load last block - xxlxor 47, 47, 15 + # + # Done 8x blocks + # - # create partial block mask - li 15, 16 - sub 15, 15, 12 # index to the mask + cmpdi 5, 0 + beq aes_gcm_out - vspltisb 16, -1 # first 16 bytes - 0xffff...ff - vspltisb 17, 0 # second 16 bytes - 0x0000...00 - li 10, 192 - stvx 16, 10, 1 - addi 10, 10, 16 - stvx 17, 10, 1 +__Process_more_dec: + li 24, 0 # decrypt + bl aes_gcm_crypt_1x + cmpdi 5, 0 + beq aes_gcm_out - addi 10, 1, 192 - lxvb16x 16, 15, 10 # load partial block mask - xxland 47, 47, 16 + bl __Process_partial + cmpdi 5, 0 + beq aes_gcm_out +__Do_combine_dec: + bl __Combine_partial + cmpdi 5, 0 + bgt __Process_decrypt + b aes_gcm_out +SYM_FUNC_END(aes_p10_gcm_decrypt) - xxland 32+28, 15, 16 - #vmr 28, 15 - ppc_update_hash_1x +SYM_FUNC_START_LOCAL(aes_gcm_out) - # * should store only the remaining bytes. - bl Write_partial_block + mr 3, 11 # return count - stxvb16x 30+32, 0, 7 # update IV - std 12, 56(7) # update partial? - li 16, 16 + RESTORE_REGS + blr - stxvb16x 32, 0, 8 # write out Xi - stxvb16x 32, 16, 8 # write out Xi - b aes_gcm_out +__Invalid_msg_len: + li 3, 0 + blr +SYM_FUNC_END(aes_gcm_out) + +SYM_DATA_START_LOCAL(PERMX) +.align 4 +# for vector permute and xor +permx: +.long 0x4c5d6e7f, 0x08192a3b, 0xc4d5e6f7, 0x8091a2b3 +SYM_DATA_END(permx) diff --git a/arch/powerpc/crypto/aes.c b/arch/powerpc/crypto/aes.c index ec06189fbf99..3f1e5e894902 100644 --- a/arch/powerpc/crypto/aes.c +++ b/arch/powerpc/crypto/aes.c @@ -7,15 +7,15 @@ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> */ -#include <linux/types.h> -#include <linux/err.h> -#include <linux/crypto.h> -#include <linux/delay.h> #include <asm/simd.h> #include <asm/switch_to.h> #include <crypto/aes.h> #include <crypto/internal/cipher.h> #include <crypto/internal/simd.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> #include "aesp8-ppc.h" diff --git a/arch/powerpc/crypto/aes_cbc.c b/arch/powerpc/crypto/aes_cbc.c index ed0debc7acb5..5f2a4f375eef 100644 --- a/arch/powerpc/crypto/aes_cbc.c +++ b/arch/powerpc/crypto/aes_cbc.c @@ -12,6 +12,10 @@ #include <crypto/aes.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> #include "aesp8-ppc.h" diff --git a/arch/powerpc/crypto/aes_ctr.c b/arch/powerpc/crypto/aes_ctr.c index 9a3da8cd62f3..e27c4036e711 100644 --- a/arch/powerpc/crypto/aes_ctr.c +++ b/arch/powerpc/crypto/aes_ctr.c @@ -12,6 +12,10 @@ #include <crypto/aes.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> #include "aesp8-ppc.h" @@ -69,9 +73,9 @@ static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key, static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx, struct skcipher_walk *walk) { + const u8 *src = walk->src.virt.addr; u8 *ctrblk = walk->iv; u8 keystream[AES_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; diff --git a/arch/powerpc/crypto/aes_xts.c b/arch/powerpc/crypto/aes_xts.c index dabbccb41550..9440e771cede 100644 --- a/arch/powerpc/crypto/aes_xts.c +++ b/arch/powerpc/crypto/aes_xts.c @@ -13,6 +13,10 @@ #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> #include "aesp8-ppc.h" diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c deleted file mode 100644 index 74fb86b0d209..000000000000 --- a/arch/powerpc/crypto/chacha-p10-glue.c +++ /dev/null @@ -1,221 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers, - * including ChaCha20 (RFC7539) - * - * Copyright 2023- IBM Corp. All rights reserved. - */ - -#include <crypto/algapi.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/simd.h> -#include <crypto/internal/skcipher.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/cpufeature.h> -#include <linux/sizes.h> -#include <asm/simd.h> -#include <asm/switch_to.h> - -asmlinkage void chacha_p10le_8x(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - -static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10); - -static void vsx_begin(void) -{ - preempt_disable(); - enable_kernel_vsx(); -} - -static void vsx_end(void) -{ - disable_kernel_vsx(); - preempt_enable(); -} - -static void chacha_p10_do_8x(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes, int nrounds) -{ - unsigned int l = bytes & ~0x0FF; - - if (l > 0) { - chacha_p10le_8x(state, dst, src, l, nrounds); - bytes -= l; - src += l; - dst += l; - state[12] += l / CHACHA_BLOCK_SIZE; - } - - if (bytes > 0) - chacha_crypt_generic(state, dst, src, bytes, nrounds); -} - -void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) -{ - hchacha_block_generic(state, stream, nrounds); -} -EXPORT_SYMBOL(hchacha_block_arch); - -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -{ - chacha_init_generic(state, key, iv); -} -EXPORT_SYMBOL(chacha_init_arch); - -void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, - int nrounds) -{ - if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE || - !crypto_simd_usable()) - return chacha_crypt_generic(state, dst, src, bytes, nrounds); - - do { - unsigned int todo = min_t(unsigned int, bytes, SZ_4K); - - vsx_begin(); - chacha_p10_do_8x(state, dst, src, todo, nrounds); - vsx_end(); - - bytes -= todo; - src += todo; - dst += todo; - } while (bytes); -} -EXPORT_SYMBOL(chacha_crypt_arch); - -static int chacha_p10_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) -{ - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, false); - if (err) - return err; - - chacha_init_generic(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = rounddown(nbytes, walk.stride); - - if (!crypto_simd_usable()) { - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, - ctx->nrounds); - } else { - vsx_begin(); - chacha_p10_do_8x(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, ctx->nrounds); - vsx_end(); - } - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - if (err) - break; - } - - return err; -} - -static int chacha_p10(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_p10_stream_xor(req, ctx, req->iv); -} - -static int xchacha_p10(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct chacha_ctx subctx; - u32 state[16]; - u8 real_iv[16]; - - chacha_init_generic(state, ctx->key, req->iv); - hchacha_block_arch(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); - memcpy(&real_iv[8], req->iv + 16, 8); - return chacha_p10_stream_xor(req, &subctx, real_iv); -} - -static struct skcipher_alg algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-p10", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = chacha_p10, - .decrypt = chacha_p10, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-p10", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = xchacha_p10, - .decrypt = xchacha_p10, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-p10", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = xchacha_p10, - .decrypt = xchacha_p10, - } -}; - -static int __init chacha_p10_init(void) -{ - static_branch_enable(&have_p10); - - return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); -} - -static void __exit chacha_p10_exit(void) -{ - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -} - -module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init); -module_exit(chacha_p10_exit); - -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)"); -MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-p10"); -MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-p10"); -MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-p10"); diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c deleted file mode 100644 index c61a874a3a5c..000000000000 --- a/arch/powerpc/crypto/crc-vpmsum_test.c +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * CRC vpmsum tester - * Copyright 2017 Daniel Axtens, IBM Corporation. - */ - -#include <linux/crc-t10dif.h> -#include <linux/crc32.h> -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/random.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/cpufeature.h> -#include <asm/switch_to.h> - -static unsigned long iterations = 10000; - -#define MAX_CRC_LENGTH 65535 - - -static int __init crc_test_init(void) -{ - u16 crc16 = 0, verify16 = 0; - __le32 verify32le = 0; - unsigned char *data; - u32 verify32 = 0; - unsigned long i; - __le32 crc32; - int ret; - - struct crypto_shash *crct10dif_tfm; - struct crypto_shash *crc32c_tfm; - - if (!cpu_has_feature(CPU_FTR_ARCH_207S)) - return -ENODEV; - - data = kmalloc(MAX_CRC_LENGTH, GFP_KERNEL); - if (!data) - return -ENOMEM; - - crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); - - if (IS_ERR(crct10dif_tfm)) { - pr_err("Error allocating crc-t10dif\n"); - goto free_buf; - } - - crc32c_tfm = crypto_alloc_shash("crc32c", 0, 0); - - if (IS_ERR(crc32c_tfm)) { - pr_err("Error allocating crc32c\n"); - goto free_16; - } - - do { - SHASH_DESC_ON_STACK(crct10dif_shash, crct10dif_tfm); - SHASH_DESC_ON_STACK(crc32c_shash, crc32c_tfm); - - crct10dif_shash->tfm = crct10dif_tfm; - ret = crypto_shash_init(crct10dif_shash); - - if (ret) { - pr_err("Error initing crc-t10dif\n"); - goto free_32; - } - - - crc32c_shash->tfm = crc32c_tfm; - ret = crypto_shash_init(crc32c_shash); - - if (ret) { - pr_err("Error initing crc32c\n"); - goto free_32; - } - - pr_info("crc-vpmsum_test begins, %lu iterations\n", iterations); - for (i=0; i<iterations; i++) { - size_t offset = get_random_u32_below(16); - size_t len = get_random_u32_below(MAX_CRC_LENGTH); - - if (len <= offset) - continue; - get_random_bytes(data, len); - len -= offset; - - crypto_shash_update(crct10dif_shash, data+offset, len); - crypto_shash_final(crct10dif_shash, (u8 *)(&crc16)); - verify16 = crc_t10dif_generic(verify16, data+offset, len); - - - if (crc16 != verify16) { - pr_err("FAILURE in CRC16: got 0x%04x expected 0x%04x (len %lu)\n", - crc16, verify16, len); - break; - } - - crypto_shash_update(crc32c_shash, data+offset, len); - crypto_shash_final(crc32c_shash, (u8 *)(&crc32)); - verify32 = le32_to_cpu(verify32le); - verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len)); - if (crc32 != verify32le) { - pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n", - crc32, verify32, len); - break; - } - cond_resched(); - } - pr_info("crc-vpmsum_test done, completed %lu iterations\n", i); - } while (0); - -free_32: - crypto_free_shash(crc32c_tfm); - -free_16: - crypto_free_shash(crct10dif_tfm); - -free_buf: - kfree(data); - - return 0; -} - -static void __exit crc_test_exit(void) {} - -module_init(crc_test_init); -module_exit(crc_test_exit); -module_param(iterations, long, 0400); - -MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>"); -MODULE_DESCRIPTION("Vector polynomial multiply-sum CRC tester"); -MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c deleted file mode 100644 index 63760b7dbb76..000000000000 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ /dev/null @@ -1,173 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#include <linux/crc32.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/cpufeature.h> -#include <asm/simd.h> -#include <asm/switch_to.h> - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -#define VMX_ALIGN 16 -#define VMX_ALIGN_MASK (VMX_ALIGN-1) - -#define VECTOR_BREAKPOINT 512 - -u32 __crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len); - -static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len) -{ - unsigned int prealign; - unsigned int tail; - - if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable()) - return __crc32c_le(crc, p, len); - - if ((unsigned long)p & VMX_ALIGN_MASK) { - prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); - crc = __crc32c_le(crc, p, prealign); - len -= prealign; - p += prealign; - } - - if (len & ~VMX_ALIGN_MASK) { - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); - disable_kernel_altivec(); - pagefault_enable(); - preempt_enable(); - } - - tail = len & VMX_ALIGN_MASK; - if (tail) { - p += len & ~VMX_ALIGN_MASK; - crc = __crc32c_le(crc, p, tail); - } - - return crc; -} - -static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = ~0; - - return 0; -} - -/* - * Setting the seed allows arbitrary accumulators and flexible XOR policy - * If your algorithm starts with ~0, then XOR with ~0 before you set - * the seed. - */ -static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) - return -EINVAL; - *mctx = le32_to_cpup((__le32 *)key); - return 0; -} - -static int crc32c_vpmsum_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *crcp = shash_desc_ctx(desc); - - *crcp = *mctx; - - return 0; -} - -static int crc32c_vpmsum_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - u32 *crcp = shash_desc_ctx(desc); - - *crcp = crc32c_vpmsum(*crcp, data, len); - - return 0; -} - -static int __crc32c_vpmsum_finup(u32 *crcp, const u8 *data, unsigned int len, - u8 *out) -{ - *(__le32 *)out = ~cpu_to_le32(crc32c_vpmsum(*crcp, data, len)); - - return 0; -} - -static int crc32c_vpmsum_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32c_vpmsum_finup(shash_desc_ctx(desc), data, len, out); -} - -static int crc32c_vpmsum_final(struct shash_desc *desc, u8 *out) -{ - u32 *crcp = shash_desc_ctx(desc); - - *(__le32 *)out = ~cpu_to_le32p(crcp); - - return 0; -} - -static int crc32c_vpmsum_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32c_vpmsum_finup(crypto_shash_ctx(desc->tfm), data, len, - out); -} - -static struct shash_alg alg = { - .setkey = crc32c_vpmsum_setkey, - .init = crc32c_vpmsum_init, - .update = crc32c_vpmsum_update, - .final = crc32c_vpmsum_final, - .finup = crc32c_vpmsum_finup, - .digest = crc32c_vpmsum_digest, - .descsize = sizeof(u32), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32c", - .cra_driver_name = "crc32c-vpmsum", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_init = crc32c_vpmsum_cra_init, - } -}; - -static int __init crc32c_vpmsum_mod_init(void) -{ - if (!cpu_has_feature(CPU_FTR_ARCH_207S)) - return -ENODEV; - - return crypto_register_shash(&alg); -} - -static void __exit crc32c_vpmsum_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init); -module_exit(crc32c_vpmsum_mod_fini); - -MODULE_AUTHOR("Anton Blanchard <anton@samba.org>"); -MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("crc32c"); -MODULE_ALIAS_CRYPTO("crc32c-vpmsum"); diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c deleted file mode 100644 index 1dc8b6915178..000000000000 --- a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c +++ /dev/null @@ -1,126 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Calculate a CRC T10-DIF with vpmsum acceleration - * - * Copyright 2017, Daniel Axtens, IBM Corporation. - * [based on crc32c-vpmsum_glue.c] - */ - -#include <linux/crc-t10dif.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/cpufeature.h> -#include <asm/simd.h> -#include <asm/switch_to.h> - -#define VMX_ALIGN 16 -#define VMX_ALIGN_MASK (VMX_ALIGN-1) - -#define VECTOR_BREAKPOINT 64 - -u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len); - -static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len) -{ - unsigned int prealign; - unsigned int tail; - u32 crc = crci; - - if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable()) - return crc_t10dif_generic(crc, p, len); - - if ((unsigned long)p & VMX_ALIGN_MASK) { - prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); - crc = crc_t10dif_generic(crc, p, prealign); - len -= prealign; - p += prealign; - } - - if (len & ~VMX_ALIGN_MASK) { - crc <<= 16; - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - crc = __crct10dif_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); - disable_kernel_altivec(); - pagefault_enable(); - preempt_enable(); - crc >>= 16; - } - - tail = len & VMX_ALIGN_MASK; - if (tail) { - p += len & ~VMX_ALIGN_MASK; - crc = crc_t10dif_generic(crc, p, tail); - } - - return crc & 0xffff; -} - -static int crct10dif_vpmsum_init(struct shash_desc *desc) -{ - u16 *crc = shash_desc_ctx(desc); - - *crc = 0; - return 0; -} - -static int crct10dif_vpmsum_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u16 *crc = shash_desc_ctx(desc); - - *crc = crct10dif_vpmsum(*crc, data, length); - - return 0; -} - - -static int crct10dif_vpmsum_final(struct shash_desc *desc, u8 *out) -{ - u16 *crcp = shash_desc_ctx(desc); - - *(u16 *)out = *crcp; - return 0; -} - -static struct shash_alg alg = { - .init = crct10dif_vpmsum_init, - .update = crct10dif_vpmsum_update, - .final = crct10dif_vpmsum_final, - .descsize = CRC_T10DIF_DIGEST_SIZE, - .digestsize = CRC_T10DIF_DIGEST_SIZE, - .base = { - .cra_name = "crct10dif", - .cra_driver_name = "crct10dif-vpmsum", - .cra_priority = 200, - .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init crct10dif_vpmsum_mod_init(void) -{ - if (!cpu_has_feature(CPU_FTR_ARCH_207S)) - return -ENODEV; - - return crypto_register_shash(&alg); -} - -static void __exit crct10dif_vpmsum_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crct10dif_vpmsum_mod_init); -module_exit(crct10dif_vpmsum_mod_fini); - -MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>"); -MODULE_DESCRIPTION("CRCT10DIF using vector polynomial multiply-sum instructions"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("crct10dif"); -MODULE_ALIAS_CRYPTO("crct10dif-vpmsum"); diff --git a/arch/powerpc/crypto/curve25519-ppc64le-core.c b/arch/powerpc/crypto/curve25519-ppc64le-core.c new file mode 100644 index 000000000000..f7810be0b292 --- /dev/null +++ b/arch/powerpc/crypto/curve25519-ppc64le-core.c @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2024- IBM Corp. + * + * X25519 scalar multiplication with 51 bits limbs for PPC64le. + * Based on RFC7748 and AArch64 optimized implementation for X25519 + * - Algorithm 1 Scalar multiplication of a variable point + */ + +#include <crypto/curve25519.h> +#include <crypto/internal/kpp.h> + +#include <linux/types.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/scatterlist.h> + +#include <linux/cpufeature.h> +#include <linux/processor.h> + +typedef uint64_t fe51[5]; + +asmlinkage void x25519_fe51_mul(fe51 h, const fe51 f, const fe51 g); +asmlinkage void x25519_fe51_sqr(fe51 h, const fe51 f); +asmlinkage void x25519_fe51_mul121666(fe51 h, fe51 f); +asmlinkage void x25519_fe51_sqr_times(fe51 h, const fe51 f, int n); +asmlinkage void x25519_fe51_frombytes(fe51 h, const uint8_t *s); +asmlinkage void x25519_fe51_tobytes(uint8_t *s, const fe51 h); +asmlinkage void x25519_cswap(fe51 p, fe51 q, unsigned int bit); + +#define fmul x25519_fe51_mul +#define fsqr x25519_fe51_sqr +#define fmul121666 x25519_fe51_mul121666 +#define fe51_tobytes x25519_fe51_tobytes + +static void fadd(fe51 h, const fe51 f, const fe51 g) +{ + h[0] = f[0] + g[0]; + h[1] = f[1] + g[1]; + h[2] = f[2] + g[2]; + h[3] = f[3] + g[3]; + h[4] = f[4] + g[4]; +} + +/* + * Prime = 2 ** 255 - 19, 255 bits + * (0x7fffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffed) + * + * Prime in 5 51-bit limbs + */ +static fe51 prime51 = { 0x7ffffffffffed, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff}; + +static void fsub(fe51 h, const fe51 f, const fe51 g) +{ + h[0] = (f[0] + ((prime51[0] * 2))) - g[0]; + h[1] = (f[1] + ((prime51[1] * 2))) - g[1]; + h[2] = (f[2] + ((prime51[2] * 2))) - g[2]; + h[3] = (f[3] + ((prime51[3] * 2))) - g[3]; + h[4] = (f[4] + ((prime51[4] * 2))) - g[4]; +} + +static void fe51_frombytes(fe51 h, const uint8_t *s) +{ + /* + * Make sure 64-bit aligned. + */ + unsigned char sbuf[32+8]; + unsigned char *sb = PTR_ALIGN((void *)sbuf, 8); + + memcpy(sb, s, 32); + x25519_fe51_frombytes(h, sb); +} + +static void finv(fe51 o, const fe51 i) +{ + fe51 a0, b, c, t00; + + fsqr(a0, i); + x25519_fe51_sqr_times(t00, a0, 2); + + fmul(b, t00, i); + fmul(a0, b, a0); + + fsqr(t00, a0); + + fmul(b, t00, b); + x25519_fe51_sqr_times(t00, b, 5); + + fmul(b, t00, b); + x25519_fe51_sqr_times(t00, b, 10); + + fmul(c, t00, b); + x25519_fe51_sqr_times(t00, c, 20); + + fmul(t00, t00, c); + x25519_fe51_sqr_times(t00, t00, 10); + + fmul(b, t00, b); + x25519_fe51_sqr_times(t00, b, 50); + + fmul(c, t00, b); + x25519_fe51_sqr_times(t00, c, 100); + + fmul(t00, t00, c); + x25519_fe51_sqr_times(t00, t00, 50); + + fmul(t00, t00, b); + x25519_fe51_sqr_times(t00, t00, 5); + + fmul(o, t00, a0); +} + +static void curve25519_fe51(uint8_t out[32], const uint8_t scalar[32], + const uint8_t point[32]) +{ + fe51 x1, x2, z2, x3, z3; + uint8_t s[32]; + unsigned int swap = 0; + int i; + + memcpy(s, scalar, 32); + s[0] &= 0xf8; + s[31] &= 0x7f; + s[31] |= 0x40; + fe51_frombytes(x1, point); + + z2[0] = z2[1] = z2[2] = z2[3] = z2[4] = 0; + x3[0] = x1[0]; + x3[1] = x1[1]; + x3[2] = x1[2]; + x3[3] = x1[3]; + x3[4] = x1[4]; + + x2[0] = z3[0] = 1; + x2[1] = z3[1] = 0; + x2[2] = z3[2] = 0; + x2[3] = z3[3] = 0; + x2[4] = z3[4] = 0; + + for (i = 254; i >= 0; --i) { + unsigned int k_t = 1 & (s[i / 8] >> (i & 7)); + fe51 a, b, c, d, e; + fe51 da, cb, aa, bb; + fe51 dacb_p, dacb_m; + + swap ^= k_t; + x25519_cswap(x2, x3, swap); + x25519_cswap(z2, z3, swap); + swap = k_t; + + fsub(b, x2, z2); // B = x_2 - z_2 + fadd(a, x2, z2); // A = x_2 + z_2 + fsub(d, x3, z3); // D = x_3 - z_3 + fadd(c, x3, z3); // C = x_3 + z_3 + + fsqr(bb, b); // BB = B^2 + fsqr(aa, a); // AA = A^2 + fmul(da, d, a); // DA = D * A + fmul(cb, c, b); // CB = C * B + + fsub(e, aa, bb); // E = AA - BB + fmul(x2, aa, bb); // x2 = AA * BB + fadd(dacb_p, da, cb); // DA + CB + fsub(dacb_m, da, cb); // DA - CB + + fmul121666(z3, e); // 121666 * E + fsqr(z2, dacb_m); // (DA - CB)^2 + fsqr(x3, dacb_p); // x3 = (DA + CB)^2 + fadd(b, bb, z3); // BB + 121666 * E + fmul(z3, x1, z2); // z3 = x1 * (DA - CB)^2 + fmul(z2, e, b); // z2 = e * (BB + (DA + CB)^2) + } + + finv(z2, z2); + fmul(x2, x2, z2); + fe51_tobytes(out, x2); +} + +void curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE], + const u8 basepoint[CURVE25519_KEY_SIZE]) +{ + curve25519_fe51(mypublic, secret, basepoint); +} +EXPORT_SYMBOL(curve25519_arch); + +void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], + const u8 secret[CURVE25519_KEY_SIZE]) +{ + curve25519_fe51(pub, secret, curve25519_base_point); +} +EXPORT_SYMBOL(curve25519_base_arch); + +static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + u8 *secret = kpp_tfm_ctx(tfm); + + if (!len) + curve25519_generate_secret(secret); + else if (len == CURVE25519_KEY_SIZE && + crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) + memcpy(secret, buf, CURVE25519_KEY_SIZE); + else + return -EINVAL; + return 0; +} + +static int curve25519_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + const u8 *secret = kpp_tfm_ctx(tfm); + u8 buf[CURVE25519_KEY_SIZE]; + int copied, nbytes; + + if (req->src) + return -EINVAL; + + curve25519_base_arch(buf, secret); + + /* might want less than we've got */ + nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len); + copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, + nbytes), + buf, nbytes); + if (copied != nbytes) + return -EINVAL; + return 0; +} + +static int curve25519_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + const u8 *secret = kpp_tfm_ctx(tfm); + u8 public_key[CURVE25519_KEY_SIZE]; + u8 buf[CURVE25519_KEY_SIZE]; + int copied, nbytes; + + if (!req->src) + return -EINVAL; + + copied = sg_copy_to_buffer(req->src, + sg_nents_for_len(req->src, + CURVE25519_KEY_SIZE), + public_key, CURVE25519_KEY_SIZE); + if (copied != CURVE25519_KEY_SIZE) + return -EINVAL; + + curve25519_arch(buf, secret, public_key); + + /* might want less than we've got */ + nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len); + copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, + nbytes), + buf, nbytes); + if (copied != nbytes) + return -EINVAL; + return 0; +} + +static unsigned int curve25519_max_size(struct crypto_kpp *tfm) +{ + return CURVE25519_KEY_SIZE; +} + +static struct kpp_alg curve25519_alg = { + .base.cra_name = "curve25519", + .base.cra_driver_name = "curve25519-ppc64le", + .base.cra_priority = 200, + .base.cra_module = THIS_MODULE, + .base.cra_ctxsize = CURVE25519_KEY_SIZE, + + .set_secret = curve25519_set_secret, + .generate_public_key = curve25519_generate_public_key, + .compute_shared_secret = curve25519_compute_shared_secret, + .max_size = curve25519_max_size, +}; + + +static int __init curve25519_mod_init(void) +{ + return IS_REACHABLE(CONFIG_CRYPTO_KPP) ? + crypto_register_kpp(&curve25519_alg) : 0; +} + +static void __exit curve25519_mod_exit(void) +{ + if (IS_REACHABLE(CONFIG_CRYPTO_KPP)) + crypto_unregister_kpp(&curve25519_alg); +} + +module_init(curve25519_mod_init); +module_exit(curve25519_mod_exit); + +MODULE_ALIAS_CRYPTO("curve25519"); +MODULE_ALIAS_CRYPTO("curve25519-ppc64le"); +MODULE_DESCRIPTION("PPC64le Curve25519 scalar multiplication with 51 bits limbs"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Danny Tsen <dtsen@us.ibm.com>"); diff --git a/arch/powerpc/crypto/curve25519-ppc64le_asm.S b/arch/powerpc/crypto/curve25519-ppc64le_asm.S new file mode 100644 index 000000000000..06c1febe24b9 --- /dev/null +++ b/arch/powerpc/crypto/curve25519-ppc64le_asm.S @@ -0,0 +1,671 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +# +# This code is taken from CRYPTOGAMs[1] and is included here using the option +# in the license to distribute the code under the GPL. Therefore this program +# is free software; you can redistribute it and/or modify it under the terms of +# the GNU General Public License version 2 as published by the Free Software +# Foundation. +# +# [1] https://github.com/dot-asm/cryptogams/ + +# Copyright (c) 2006-2017, CRYPTOGAMS by <appro@openssl.org> +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain copyright notices, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# * Neither the name of the CRYPTOGAMS nor the names of its +# copyright holder and contributors may be used to endorse or +# promote products derived from this software without specific +# prior written permission. +# +# ALTERNATIVELY, provided that this notice is retained in full, this +# product may be distributed under the terms of the GNU General Public +# License (GPL), in which case the provisions of the GPL apply INSTEAD OF +# those given above. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see https://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# +# ==================================================================== +# Written and Modified by Danny Tsen <dtsen@us.ibm.com> +# - Added x25519_fe51_sqr_times, x25519_fe51_frombytes, x25519_fe51_tobytes +# and x25519_cswap +# +# Copyright 2024- IBM Corp. +# +# X25519 lower-level primitives for PPC64. +# + +#include <linux/linkage.h> + +.text + +.align 5 +SYM_FUNC_START(x25519_fe51_mul) + + stdu 1,-144(1) + std 21,56(1) + std 22,64(1) + std 23,72(1) + std 24,80(1) + std 25,88(1) + std 26,96(1) + std 27,104(1) + std 28,112(1) + std 29,120(1) + std 30,128(1) + std 31,136(1) + + ld 6,0(5) + ld 7,0(4) + ld 8,8(4) + ld 9,16(4) + ld 10,24(4) + ld 11,32(4) + + mulld 22,7,6 + mulhdu 23,7,6 + + mulld 24,8,6 + mulhdu 25,8,6 + + mulld 30,11,6 + mulhdu 31,11,6 + ld 4,8(5) + mulli 11,11,19 + + mulld 26,9,6 + mulhdu 27,9,6 + + mulld 28,10,6 + mulhdu 29,10,6 + mulld 12,11,4 + mulhdu 21,11,4 + addc 22,22,12 + adde 23,23,21 + + mulld 12,7,4 + mulhdu 21,7,4 + addc 24,24,12 + adde 25,25,21 + + mulld 12,10,4 + mulhdu 21,10,4 + ld 6,16(5) + mulli 10,10,19 + addc 30,30,12 + adde 31,31,21 + + mulld 12,8,4 + mulhdu 21,8,4 + addc 26,26,12 + adde 27,27,21 + + mulld 12,9,4 + mulhdu 21,9,4 + addc 28,28,12 + adde 29,29,21 + mulld 12,10,6 + mulhdu 21,10,6 + addc 22,22,12 + adde 23,23,21 + + mulld 12,11,6 + mulhdu 21,11,6 + addc 24,24,12 + adde 25,25,21 + + mulld 12,9,6 + mulhdu 21,9,6 + ld 4,24(5) + mulli 9,9,19 + addc 30,30,12 + adde 31,31,21 + + mulld 12,7,6 + mulhdu 21,7,6 + addc 26,26,12 + adde 27,27,21 + + mulld 12,8,6 + mulhdu 21,8,6 + addc 28,28,12 + adde 29,29,21 + mulld 12,9,4 + mulhdu 21,9,4 + addc 22,22,12 + adde 23,23,21 + + mulld 12,10,4 + mulhdu 21,10,4 + addc 24,24,12 + adde 25,25,21 + + mulld 12,8,4 + mulhdu 21,8,4 + ld 6,32(5) + mulli 8,8,19 + addc 30,30,12 + adde 31,31,21 + + mulld 12,11,4 + mulhdu 21,11,4 + addc 26,26,12 + adde 27,27,21 + + mulld 12,7,4 + mulhdu 21,7,4 + addc 28,28,12 + adde 29,29,21 + mulld 12,8,6 + mulhdu 21,8,6 + addc 22,22,12 + adde 23,23,21 + + mulld 12,9,6 + mulhdu 21,9,6 + addc 24,24,12 + adde 25,25,21 + + mulld 12,10,6 + mulhdu 21,10,6 + addc 26,26,12 + adde 27,27,21 + + mulld 12,11,6 + mulhdu 21,11,6 + addc 28,28,12 + adde 29,29,21 + + mulld 12,7,6 + mulhdu 21,7,6 + addc 30,30,12 + adde 31,31,21 + +.Lfe51_reduce: + li 0,-1 + srdi 0,0,13 + + srdi 12,26,51 + and 9,26,0 + insrdi 12,27,51,0 + srdi 21,22,51 + and 7,22,0 + insrdi 21,23,51,0 + addc 28,28,12 + addze 29,29 + addc 24,24,21 + addze 25,25 + + srdi 12,28,51 + and 10,28,0 + insrdi 12,29,51,0 + srdi 21,24,51 + and 8,24,0 + insrdi 21,25,51,0 + addc 30,30,12 + addze 31,31 + add 9,9,21 + + srdi 12,30,51 + and 11,30,0 + insrdi 12,31,51,0 + mulli 12,12,19 + + add 7,7,12 + + srdi 21,9,51 + and 9,9,0 + add 10,10,21 + + srdi 12,7,51 + and 7,7,0 + add 8,8,12 + + std 9,16(3) + std 10,24(3) + std 11,32(3) + std 7,0(3) + std 8,8(3) + + ld 21,56(1) + ld 22,64(1) + ld 23,72(1) + ld 24,80(1) + ld 25,88(1) + ld 26,96(1) + ld 27,104(1) + ld 28,112(1) + ld 29,120(1) + ld 30,128(1) + ld 31,136(1) + addi 1,1,144 + blr +SYM_FUNC_END(x25519_fe51_mul) + +.align 5 +SYM_FUNC_START(x25519_fe51_sqr) + + stdu 1,-144(1) + std 21,56(1) + std 22,64(1) + std 23,72(1) + std 24,80(1) + std 25,88(1) + std 26,96(1) + std 27,104(1) + std 28,112(1) + std 29,120(1) + std 30,128(1) + std 31,136(1) + + ld 7,0(4) + ld 8,8(4) + ld 9,16(4) + ld 10,24(4) + ld 11,32(4) + + add 6,7,7 + mulli 21,11,19 + + mulld 22,7,7 + mulhdu 23,7,7 + mulld 24,8,6 + mulhdu 25,8,6 + mulld 26,9,6 + mulhdu 27,9,6 + mulld 28,10,6 + mulhdu 29,10,6 + mulld 30,11,6 + mulhdu 31,11,6 + add 6,8,8 + mulld 12,11,21 + mulhdu 11,11,21 + addc 28,28,12 + adde 29,29,11 + + mulli 5,10,19 + + mulld 12,8,8 + mulhdu 11,8,8 + addc 26,26,12 + adde 27,27,11 + mulld 12,9,6 + mulhdu 11,9,6 + addc 28,28,12 + adde 29,29,11 + mulld 12,10,6 + mulhdu 11,10,6 + addc 30,30,12 + adde 31,31,11 + mulld 12,21,6 + mulhdu 11,21,6 + add 6,10,10 + addc 22,22,12 + adde 23,23,11 + mulld 12,10,5 + mulhdu 10,10,5 + addc 24,24,12 + adde 25,25,10 + mulld 12,6,21 + mulhdu 10,6,21 + add 6,9,9 + addc 26,26,12 + adde 27,27,10 + + mulld 12,9,9 + mulhdu 10,9,9 + addc 30,30,12 + adde 31,31,10 + mulld 12,5,6 + mulhdu 10,5,6 + addc 22,22,12 + adde 23,23,10 + mulld 12,21,6 + mulhdu 10,21,6 + addc 24,24,12 + adde 25,25,10 + + b .Lfe51_reduce +SYM_FUNC_END(x25519_fe51_sqr) + +.align 5 +SYM_FUNC_START(x25519_fe51_mul121666) + + stdu 1,-144(1) + std 21,56(1) + std 22,64(1) + std 23,72(1) + std 24,80(1) + std 25,88(1) + std 26,96(1) + std 27,104(1) + std 28,112(1) + std 29,120(1) + std 30,128(1) + std 31,136(1) + + lis 6,1 + ori 6,6,56130 + ld 7,0(4) + ld 8,8(4) + ld 9,16(4) + ld 10,24(4) + ld 11,32(4) + + mulld 22,7,6 + mulhdu 23,7,6 + mulld 24,8,6 + mulhdu 25,8,6 + mulld 26,9,6 + mulhdu 27,9,6 + mulld 28,10,6 + mulhdu 29,10,6 + mulld 30,11,6 + mulhdu 31,11,6 + + b .Lfe51_reduce +SYM_FUNC_END(x25519_fe51_mul121666) + +.align 5 +SYM_FUNC_START(x25519_fe51_sqr_times) + + stdu 1,-144(1) + std 21,56(1) + std 22,64(1) + std 23,72(1) + std 24,80(1) + std 25,88(1) + std 26,96(1) + std 27,104(1) + std 28,112(1) + std 29,120(1) + std 30,128(1) + std 31,136(1) + + ld 7,0(4) + ld 8,8(4) + ld 9,16(4) + ld 10,24(4) + ld 11,32(4) + + mtctr 5 + +.Lsqr_times_loop: + add 6,7,7 + mulli 21,11,19 + + mulld 22,7,7 + mulhdu 23,7,7 + mulld 24,8,6 + mulhdu 25,8,6 + mulld 26,9,6 + mulhdu 27,9,6 + mulld 28,10,6 + mulhdu 29,10,6 + mulld 30,11,6 + mulhdu 31,11,6 + add 6,8,8 + mulld 12,11,21 + mulhdu 11,11,21 + addc 28,28,12 + adde 29,29,11 + + mulli 5,10,19 + + mulld 12,8,8 + mulhdu 11,8,8 + addc 26,26,12 + adde 27,27,11 + mulld 12,9,6 + mulhdu 11,9,6 + addc 28,28,12 + adde 29,29,11 + mulld 12,10,6 + mulhdu 11,10,6 + addc 30,30,12 + adde 31,31,11 + mulld 12,21,6 + mulhdu 11,21,6 + add 6,10,10 + addc 22,22,12 + adde 23,23,11 + mulld 12,10,5 + mulhdu 10,10,5 + addc 24,24,12 + adde 25,25,10 + mulld 12,6,21 + mulhdu 10,6,21 + add 6,9,9 + addc 26,26,12 + adde 27,27,10 + + mulld 12,9,9 + mulhdu 10,9,9 + addc 30,30,12 + adde 31,31,10 + mulld 12,5,6 + mulhdu 10,5,6 + addc 22,22,12 + adde 23,23,10 + mulld 12,21,6 + mulhdu 10,21,6 + addc 24,24,12 + adde 25,25,10 + + # fe51_reduce + li 0,-1 + srdi 0,0,13 + + srdi 12,26,51 + and 9,26,0 + insrdi 12,27,51,0 + srdi 21,22,51 + and 7,22,0 + insrdi 21,23,51,0 + addc 28,28,12 + addze 29,29 + addc 24,24,21 + addze 25,25 + + srdi 12,28,51 + and 10,28,0 + insrdi 12,29,51,0 + srdi 21,24,51 + and 8,24,0 + insrdi 21,25,51,0 + addc 30,30,12 + addze 31,31 + add 9,9,21 + + srdi 12,30,51 + and 11,30,0 + insrdi 12,31,51,0 + mulli 12,12,19 + + add 7,7,12 + + srdi 21,9,51 + and 9,9,0 + add 10,10,21 + + srdi 12,7,51 + and 7,7,0 + add 8,8,12 + + bdnz .Lsqr_times_loop + + std 9,16(3) + std 10,24(3) + std 11,32(3) + std 7,0(3) + std 8,8(3) + + ld 21,56(1) + ld 22,64(1) + ld 23,72(1) + ld 24,80(1) + ld 25,88(1) + ld 26,96(1) + ld 27,104(1) + ld 28,112(1) + ld 29,120(1) + ld 30,128(1) + ld 31,136(1) + addi 1,1,144 + blr +SYM_FUNC_END(x25519_fe51_sqr_times) + +.align 5 +SYM_FUNC_START(x25519_fe51_frombytes) + + li 12, -1 + srdi 12, 12, 13 # 0x7ffffffffffff + + ld 5, 0(4) + ld 6, 8(4) + ld 7, 16(4) + ld 8, 24(4) + + srdi 10, 5, 51 + and 5, 5, 12 # h0 + + sldi 11, 6, 13 + or 11, 10, 11 # h1t + srdi 10, 6, 38 + and 6, 11, 12 # h1 + + sldi 11, 7, 26 + or 10, 10, 11 # h2t + + srdi 11, 7, 25 + and 7, 10, 12 # h2 + sldi 10, 8, 39 + or 11, 11, 10 # h3t + + srdi 9, 8, 12 + and 8, 11, 12 # h3 + and 9, 9, 12 # h4 + + std 5, 0(3) + std 6, 8(3) + std 7, 16(3) + std 8, 24(3) + std 9, 32(3) + + blr +SYM_FUNC_END(x25519_fe51_frombytes) + +.align 5 +SYM_FUNC_START(x25519_fe51_tobytes) + + ld 5, 0(4) + ld 6, 8(4) + ld 7, 16(4) + ld 8, 24(4) + ld 9, 32(4) + + li 12, -1 + srdi 12, 12, 13 # 0x7ffffffffffff + + # Full reducuction + addi 10, 5, 19 + srdi 10, 10, 51 + add 10, 10, 6 + srdi 10, 10, 51 + add 10, 10, 7 + srdi 10, 10, 51 + add 10, 10, 8 + srdi 10, 10, 51 + add 10, 10, 9 + srdi 10, 10, 51 + + mulli 10, 10, 19 + add 5, 5, 10 + srdi 11, 5, 51 + add 6, 6, 11 + srdi 11, 6, 51 + add 7, 7, 11 + srdi 11, 7, 51 + add 8, 8, 11 + srdi 11, 8, 51 + add 9, 9, 11 + + and 5, 5, 12 + and 6, 6, 12 + and 7, 7, 12 + and 8, 8, 12 + and 9, 9, 12 + + sldi 10, 6, 51 + or 5, 5, 10 # s0 + + srdi 11, 6, 13 + sldi 10, 7, 38 + or 6, 11, 10 # s1 + + srdi 11, 7, 26 + sldi 10, 8, 25 + or 7, 11, 10 # s2 + + srdi 11, 8, 39 + sldi 10, 9, 12 + or 8, 11, 10 # s4 + + std 5, 0(3) + std 6, 8(3) + std 7, 16(3) + std 8, 24(3) + + blr +SYM_FUNC_END(x25519_fe51_tobytes) + +.align 5 +SYM_FUNC_START(x25519_cswap) + + li 7, 5 + neg 6, 5 + mtctr 7 + +.Lswap_loop: + ld 8, 0(3) + ld 9, 0(4) + xor 10, 8, 9 + and 10, 10, 6 + xor 11, 8, 10 + xor 12, 9, 10 + std 11, 0(3) + addi 3, 3, 8 + std 12, 0(4) + addi 4, 4, 8 + bdnz .Lswap_loop + + blr +SYM_FUNC_END(x25519_cswap) diff --git a/arch/powerpc/crypto/ghash.c b/arch/powerpc/crypto/ghash.c index 77eca20bc7ac..7308735bdb33 100644 --- a/arch/powerpc/crypto/ghash.c +++ b/arch/powerpc/crypto/ghash.c @@ -11,19 +11,18 @@ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> */ -#include <linux/types.h> -#include <linux/err.h> -#include <linux/crypto.h> -#include <linux/delay.h> -#include <asm/simd.h> +#include "aesp8-ppc.h" #include <asm/switch_to.h> #include <crypto/aes.h> +#include <crypto/gf128mul.h> #include <crypto/ghash.h> -#include <crypto/scatterwalk.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> -#include <crypto/b128ops.h> -#include "aesp8-ppc.h" +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/uaccess.h> void gcm_init_p8(u128 htable[16], const u64 Xi[2]); void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); @@ -39,15 +38,12 @@ struct p8_ghash_ctx { struct p8_ghash_desc_ctx { u64 shash[2]; - u8 buffer[GHASH_DIGEST_SIZE]; - int bytes; }; static int p8_ghash_init(struct shash_desc *desc) { struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - dctx->bytes = 0; memset(dctx->shash, 0, GHASH_DIGEST_SIZE); return 0; } @@ -74,27 +70,30 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, } static inline void __ghash_block(struct p8_ghash_ctx *ctx, - struct p8_ghash_desc_ctx *dctx) + struct p8_ghash_desc_ctx *dctx, + const u8 *src) { if (crypto_simd_usable()) { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); - gcm_ghash_p8(dctx->shash, ctx->htable, - dctx->buffer, GHASH_DIGEST_SIZE); + gcm_ghash_p8(dctx->shash, ctx->htable, src, GHASH_BLOCK_SIZE); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } else { - crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE); + crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); gf128mul_lle((be128 *)dctx->shash, &ctx->key); } } -static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, - struct p8_ghash_desc_ctx *dctx, - const u8 *src, unsigned int srclen) +static inline int __ghash_blocks(struct p8_ghash_ctx *ctx, + struct p8_ghash_desc_ctx *dctx, + const u8 *src, unsigned int srclen) { + int remain = srclen - round_down(srclen, GHASH_BLOCK_SIZE); + + srclen -= remain; if (crypto_simd_usable()) { preempt_disable(); pagefault_disable(); @@ -105,62 +104,38 @@ static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, pagefault_enable(); preempt_enable(); } else { - while (srclen >= GHASH_BLOCK_SIZE) { + do { crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); gf128mul_lle((be128 *)dctx->shash, &ctx->key); srclen -= GHASH_BLOCK_SIZE; src += GHASH_BLOCK_SIZE; - } + } while (srclen); } + + return remain; } static int p8_ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { - unsigned int len; struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - if (dctx->bytes) { - if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { - memcpy(dctx->buffer + dctx->bytes, src, - srclen); - dctx->bytes += srclen; - return 0; - } - memcpy(dctx->buffer + dctx->bytes, src, - GHASH_DIGEST_SIZE - dctx->bytes); - - __ghash_block(ctx, dctx); - - src += GHASH_DIGEST_SIZE - dctx->bytes; - srclen -= GHASH_DIGEST_SIZE - dctx->bytes; - dctx->bytes = 0; - } - len = srclen & ~(GHASH_DIGEST_SIZE - 1); - if (len) { - __ghash_blocks(ctx, dctx, src, len); - src += len; - srclen -= len; - } - if (srclen) { - memcpy(dctx->buffer, src, srclen); - dctx->bytes = srclen; - } - return 0; + return __ghash_blocks(ctx, dctx, src, srclen); } -static int p8_ghash_final(struct shash_desc *desc, u8 *out) +static int p8_ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - int i; struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - if (dctx->bytes) { - for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) - dctx->buffer[i] = 0; - __ghash_block(ctx, dctx); - dctx->bytes = 0; + if (len) { + u8 buf[GHASH_BLOCK_SIZE] = {}; + + memcpy(buf, src, len); + __ghash_block(ctx, dctx, buf); + memzero_explicit(buf, sizeof(buf)); } memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); return 0; @@ -170,14 +145,14 @@ struct shash_alg p8_ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = p8_ghash_init, .update = p8_ghash_update, - .final = p8_ghash_final, + .finup = p8_ghash_finup, .setkey = p8_ghash_setkey, - .descsize = sizeof(struct p8_ghash_desc_ctx) - + sizeof(struct ghash_desc_ctx), + .descsize = sizeof(struct p8_ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "p8_ghash", .cra_priority = 1000, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct p8_ghash_ctx), .cra_module = THIS_MODULE, diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c index c24f605033bd..204440a90cd8 100644 --- a/arch/powerpc/crypto/md5-glue.c +++ b/arch/powerpc/crypto/md5-glue.c @@ -8,25 +8,13 @@ */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/md5.h> -#include <asm/byteorder.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> extern void ppc_md5_transform(u32 *state, const u8 *src, u32 blocks); -static inline void ppc_md5_clear_context(struct md5_state *sctx) -{ - int count = sizeof(struct md5_state) >> 2; - u32 *ptr = (u32 *)sctx; - - /* make sure we can clear the fast way */ - BUILD_BUG_ON(sizeof(struct md5_state) % 4); - do { *ptr++ = 0; } while (--count); -} - static int ppc_md5_init(struct shash_desc *desc) { struct md5_state *sctx = shash_desc_ctx(desc); @@ -44,79 +32,34 @@ static int ppc_md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->byte_count & 0x3f; - unsigned int avail = 64 - offset; - const u8 *src = data; - sctx->byte_count += len; - - if (avail > len) { - memcpy((char *)sctx->block + offset, src, len); - return 0; - } - - if (offset) { - memcpy((char *)sctx->block + offset, src, avail); - ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1); - len -= avail; - src += avail; - } - - if (len > 63) { - ppc_md5_transform(sctx->hash, src, len >> 6); - src += len & ~0x3f; - len &= 0x3f; - } - - memcpy((char *)sctx->block, src, len); - return 0; + sctx->byte_count += round_down(len, MD5_HMAC_BLOCK_SIZE); + ppc_md5_transform(sctx->hash, data, len >> 6); + return len - round_down(len, MD5_HMAC_BLOCK_SIZE); } -static int ppc_md5_final(struct shash_desc *desc, u8 *out) +static int ppc_md5_finup(struct shash_desc *desc, const u8 *src, + unsigned int offset, u8 *out) { struct md5_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->byte_count & 0x3f; - const u8 *src = (const u8 *)sctx->block; - u8 *p = (u8 *)src + offset; - int padlen = 55 - offset; - __le64 *pbits = (__le64 *)((char *)sctx->block + 56); + __le64 block[MD5_BLOCK_WORDS] = {}; + u8 *p = memcpy(block, src, offset); __le32 *dst = (__le32 *)out; + __le64 *pbits; + src = p; + p += offset; *p++ = 0x80; - - if (padlen < 0) { - memset(p, 0x00, padlen + sizeof (u64)); - ppc_md5_transform(sctx->hash, src, 1); - p = (char *)sctx->block; - padlen = 56; - } - - memset(p, 0, padlen); + sctx->byte_count += offset; + pbits = &block[(MD5_BLOCK_WORDS / (offset > 55 ? 1 : 2)) - 1]; *pbits = cpu_to_le64(sctx->byte_count << 3); - ppc_md5_transform(sctx->hash, src, 1); + ppc_md5_transform(sctx->hash, src, (pbits - block + 1) / 8); + memzero_explicit(block, sizeof(block)); dst[0] = cpu_to_le32(sctx->hash[0]); dst[1] = cpu_to_le32(sctx->hash[1]); dst[2] = cpu_to_le32(sctx->hash[2]); dst[3] = cpu_to_le32(sctx->hash[3]); - - ppc_md5_clear_context(sctx); - return 0; -} - -static int ppc_md5_export(struct shash_desc *desc, void *out) -{ - struct md5_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int ppc_md5_import(struct shash_desc *desc, const void *in) -{ - struct md5_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); return 0; } @@ -124,15 +67,13 @@ static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = ppc_md5_init, .update = ppc_md5_update, - .final = ppc_md5_final, - .export = ppc_md5_export, - .import = ppc_md5_import, - .descsize = sizeof(struct md5_state), - .statesize = sizeof(struct md5_state), + .finup = ppc_md5_finup, + .descsize = MD5_STATE_SIZE, .base = { .cra_name = "md5", .cra_driver_name= "md5-ppc", .cra_priority = 200, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/powerpc/crypto/poly1305-p10-glue.c b/arch/powerpc/crypto/poly1305-p10-glue.c deleted file mode 100644 index 95dd708573ee..000000000000 --- a/arch/powerpc/crypto/poly1305-p10-glue.c +++ /dev/null @@ -1,186 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Poly1305 authenticator algorithm, RFC7539. - * - * Copyright 2023- IBM Corp. All rights reserved. - */ - -#include <crypto/algapi.h> -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/jump_label.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <crypto/internal/simd.h> -#include <linux/cpufeature.h> -#include <asm/unaligned.h> -#include <asm/simd.h> -#include <asm/switch_to.h> - -asmlinkage void poly1305_p10le_4blocks(void *h, const u8 *m, u32 mlen); -asmlinkage void poly1305_64s(void *h, const u8 *m, u32 mlen, int highbit); -asmlinkage void poly1305_emit_64(void *h, void *s, u8 *dst); - -static void vsx_begin(void) -{ - preempt_disable(); - enable_kernel_vsx(); -} - -static void vsx_end(void) -{ - disable_kernel_vsx(); - preempt_enable(); -} - -static int crypto_poly1305_p10_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - poly1305_core_init(&dctx->h); - dctx->buflen = 0; - dctx->rset = 0; - dctx->sset = false; - - return 0; -} - -static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx, - const u8 *inp, unsigned int len) -{ - unsigned int acc = 0; - - if (unlikely(!dctx->sset)) { - if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) { - struct poly1305_core_key *key = &dctx->core_r; - - key->key.r64[0] = get_unaligned_le64(&inp[0]); - key->key.r64[1] = get_unaligned_le64(&inp[8]); - inp += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - acc += POLY1305_BLOCK_SIZE; - dctx->rset = 1; - } - if (len >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(&inp[0]); - dctx->s[1] = get_unaligned_le32(&inp[4]); - dctx->s[2] = get_unaligned_le32(&inp[8]); - dctx->s[3] = get_unaligned_le32(&inp[12]); - acc += POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - } - return acc; -} - -static int crypto_poly1305_p10_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int bytes, used; - - if (unlikely(dctx->buflen)) { - bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - srclen -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, - POLY1305_BLOCK_SIZE))) { - vsx_begin(); - poly1305_64s(&dctx->h, dctx->buf, - POLY1305_BLOCK_SIZE, 1); - vsx_end(); - } - dctx->buflen = 0; - } - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { - bytes = round_down(srclen, POLY1305_BLOCK_SIZE); - used = crypto_poly1305_setdctxkey(dctx, src, bytes); - if (likely(used)) { - srclen -= used; - src += used; - } - if (crypto_simd_usable() && (srclen >= POLY1305_BLOCK_SIZE*4)) { - vsx_begin(); - poly1305_p10le_4blocks(&dctx->h, src, srclen); - vsx_end(); - src += srclen - (srclen % (POLY1305_BLOCK_SIZE * 4)); - srclen %= POLY1305_BLOCK_SIZE * 4; - } - while (srclen >= POLY1305_BLOCK_SIZE) { - vsx_begin(); - poly1305_64s(&dctx->h, src, POLY1305_BLOCK_SIZE, 1); - vsx_end(); - srclen -= POLY1305_BLOCK_SIZE; - src += POLY1305_BLOCK_SIZE; - } - } - - if (unlikely(srclen)) { - dctx->buflen = srclen; - memcpy(dctx->buf, src, srclen); - } - - return 0; -} - -static int crypto_poly1305_p10_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - if ((dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, - POLY1305_BLOCK_SIZE - dctx->buflen); - vsx_begin(); - poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - vsx_end(); - dctx->buflen = 0; - } - - poly1305_emit_64(&dctx->h, &dctx->s, dst); - return 0; -} - -static struct shash_alg poly1305_alg = { - .digestsize = POLY1305_DIGEST_SIZE, - .init = crypto_poly1305_p10_init, - .update = crypto_poly1305_p10_update, - .final = crypto_poly1305_p10_final, - .descsize = sizeof(struct poly1305_desc_ctx), - .base = { - .cra_name = "poly1305", - .cra_driver_name = "poly1305-p10", - .cra_priority = 300, - .cra_blocksize = POLY1305_BLOCK_SIZE, - .cra_module = THIS_MODULE, - }, -}; - -static int __init poly1305_p10_init(void) -{ - return crypto_register_shash(&poly1305_alg); -} - -static void __exit poly1305_p10_exit(void) -{ - crypto_unregister_shash(&poly1305_alg); -} - -module_cpu_feature_match(PPC_MODULE_FEATURE_P10, poly1305_p10_init); -module_exit(poly1305_p10_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>"); -MODULE_DESCRIPTION("Optimized Poly1305 for P10"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-p10"); diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c index 9170892a8557..04c88e173ce1 100644 --- a/arch/powerpc/crypto/sha1-spe-glue.c +++ b/arch/powerpc/crypto/sha1-spe-glue.c @@ -7,16 +7,13 @@ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> */ +#include <asm/switch_to.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <asm/byteorder.h> -#include <asm/switch_to.h> -#include <linux/hardirq.h> +#include <linux/kernel.h> +#include <linux/preempt.h> +#include <linux/module.h> /* * MAX_BYTES defines the number of bytes that are allowed to be processed @@ -30,7 +27,7 @@ */ #define MAX_BYTES 2048 -extern void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks); +asmlinkage void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks); static void spe_begin(void) { @@ -46,126 +43,45 @@ static void spe_end(void) preempt_enable(); } -static inline void ppc_sha1_clear_context(struct sha1_state *sctx) +static void ppc_spe_sha1_block(struct sha1_state *sctx, const u8 *src, + int blocks) { - int count = sizeof(struct sha1_state) >> 2; - u32 *ptr = (u32 *)sctx; - - /* make sure we can clear the fast way */ - BUILD_BUG_ON(sizeof(struct sha1_state) % 4); - do { *ptr++ = 0; } while (--count); -} - -static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->count & 0x3f; - const unsigned int avail = 64 - offset; - unsigned int bytes; - const u8 *src = data; - - if (avail > len) { - sctx->count += len; - memcpy((char *)sctx->buffer + offset, src, len); - return 0; - } - - sctx->count += len; - - if (offset) { - memcpy((char *)sctx->buffer + offset, src, avail); + do { + int unit = min(blocks, MAX_BYTES / SHA1_BLOCK_SIZE); spe_begin(); - ppc_spe_sha1_transform(sctx->state, (const u8 *)sctx->buffer, 1); + ppc_spe_sha1_transform(sctx->state, src, unit); spe_end(); - len -= avail; - src += avail; - } - - while (len > 63) { - bytes = (len > MAX_BYTES) ? MAX_BYTES : len; - bytes = bytes & ~0x3f; - - spe_begin(); - ppc_spe_sha1_transform(sctx->state, src, bytes >> 6); - spe_end(); - - src += bytes; - len -= bytes; - } - - memcpy((char *)sctx->buffer, src, len); - return 0; -} - -static int ppc_spe_sha1_final(struct shash_desc *desc, u8 *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->count & 0x3f; - char *p = (char *)sctx->buffer + offset; - int padlen; - __be64 *pbits = (__be64 *)(((char *)&sctx->buffer) + 56); - __be32 *dst = (__be32 *)out; - - padlen = 55 - offset; - *p++ = 0x80; - - spe_begin(); - - if (padlen < 0) { - memset(p, 0x00, padlen + sizeof (u64)); - ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1); - p = (char *)sctx->buffer; - padlen = 56; - } - - memset(p, 0, padlen); - *pbits = cpu_to_be64(sctx->count << 3); - ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1); - - spe_end(); - - dst[0] = cpu_to_be32(sctx->state[0]); - dst[1] = cpu_to_be32(sctx->state[1]); - dst[2] = cpu_to_be32(sctx->state[2]); - dst[3] = cpu_to_be32(sctx->state[3]); - dst[4] = cpu_to_be32(sctx->state[4]); - - ppc_sha1_clear_context(sctx); - return 0; + src += unit * SHA1_BLOCK_SIZE; + blocks -= unit; + } while (blocks); } -static int ppc_spe_sha1_export(struct shash_desc *desc, void *out) +static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; + return sha1_base_do_update_blocks(desc, data, len, ppc_spe_sha1_block); } -static int ppc_spe_sha1_import(struct shash_desc *desc, const void *in) +static int ppc_spe_sha1_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; + sha1_base_do_finup(desc, src, len, ppc_spe_sha1_block); + return sha1_base_finish(desc, out); } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = ppc_spe_sha1_update, - .final = ppc_spe_sha1_final, - .export = ppc_spe_sha1_export, - .import = ppc_spe_sha1_import, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = ppc_spe_sha1_finup, + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-ppc-spe", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c index f283bbd3f121..4593946aa9b3 100644 --- a/arch/powerpc/crypto/sha1.c +++ b/arch/powerpc/crypto/sha1.c @@ -13,107 +13,46 @@ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <asm/byteorder.h> - -void powerpc_sha_transform(u32 *state, const u8 *src); - -static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int partial, done; - const u8 *src; - - partial = sctx->count & 0x3f; - sctx->count += len; - done = 0; - src = data; - - if ((partial + len) > 63) { - - if (partial) { - done = -partial; - memcpy(sctx->buffer + partial, data, done + 64); - src = sctx->buffer; - } - - do { - powerpc_sha_transform(sctx->state, src); - done += 64; - src = data + done; - } while (done + 63 < len); - - partial = 0; - } - memcpy(sctx->buffer + partial, src, len - done); - - return 0; -} +#include <linux/kernel.h> +#include <linux/module.h> +asmlinkage void powerpc_sha_transform(u32 *state, const u8 *src); -/* Add padding and return the message digest. */ -static int powerpc_sha1_final(struct shash_desc *desc, u8 *out) +static void powerpc_sha_block(struct sha1_state *sctx, const u8 *data, + int blocks) { - struct sha1_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - u32 i, index, padlen; - __be64 bits; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64 */ - index = sctx->count & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64+56) - index); - powerpc_sha1_update(desc, padding, padlen); - - /* Append length */ - powerpc_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); - - /* Store state in digest */ - for (i = 0; i < 5; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Wipe context */ - memset(sctx, 0, sizeof *sctx); - - return 0; + do { + powerpc_sha_transform(sctx->state, data); + data += 64; + } while (--blocks); } -static int powerpc_sha1_export(struct shash_desc *desc, void *out) +static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; + return sha1_base_do_update_blocks(desc, data, len, powerpc_sha_block); } -static int powerpc_sha1_import(struct shash_desc *desc, const void *in) +/* Add padding and return the message digest. */ +static int powerpc_sha1_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; + sha1_base_do_finup(desc, src, len, powerpc_sha_block); + return sha1_base_finish(desc, out); } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = powerpc_sha1_update, - .final = powerpc_sha1_final, - .export = powerpc_sha1_export, - .import = powerpc_sha1_import, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = powerpc_sha1_finup, + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-powerpc", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c deleted file mode 100644 index 2997d13236e0..000000000000 --- a/arch/powerpc/crypto/sha256-spe-glue.c +++ /dev/null @@ -1,235 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Glue code for SHA-256 implementation for SPE instructions (PPC) - * - * Based on generic implementation. The assembler module takes care - * about the SPE registers so it can run from interrupt context. - * - * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> - */ - -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <asm/byteorder.h> -#include <asm/switch_to.h> -#include <linux/hardirq.h> - -/* - * MAX_BYTES defines the number of bytes that are allowed to be processed - * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000 - * operations per 64 bytes. e500 cores can issue two arithmetic instructions - * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2). - * Thus 1KB of input data will need an estimated maximum of 18,000 cycles. - * Headroom for cache misses included. Even with the low end model clocked - * at 667 MHz this equals to a critical time window of less than 27us. - * - */ -#define MAX_BYTES 1024 - -extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks); - -static void spe_begin(void) -{ - /* We just start SPE operations and will save SPE registers later. */ - preempt_disable(); - enable_kernel_spe(); -} - -static void spe_end(void) -{ - disable_kernel_spe(); - /* reenable preemption */ - preempt_enable(); -} - -static inline void ppc_sha256_clear_context(struct sha256_state *sctx) -{ - int count = sizeof(struct sha256_state) >> 2; - u32 *ptr = (u32 *)sctx; - - /* make sure we can clear the fast way */ - BUILD_BUG_ON(sizeof(struct sha256_state) % 4); - do { *ptr++ = 0; } while (--count); -} - -static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->count & 0x3f; - const unsigned int avail = 64 - offset; - unsigned int bytes; - const u8 *src = data; - - if (avail > len) { - sctx->count += len; - memcpy((char *)sctx->buf + offset, src, len); - return 0; - } - - sctx->count += len; - - if (offset) { - memcpy((char *)sctx->buf + offset, src, avail); - - spe_begin(); - ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1); - spe_end(); - - len -= avail; - src += avail; - } - - while (len > 63) { - /* cut input data into smaller blocks */ - bytes = (len > MAX_BYTES) ? MAX_BYTES : len; - bytes = bytes & ~0x3f; - - spe_begin(); - ppc_spe_sha256_transform(sctx->state, src, bytes >> 6); - spe_end(); - - src += bytes; - len -= bytes; - } - - memcpy((char *)sctx->buf, src, len); - return 0; -} - -static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->count & 0x3f; - char *p = (char *)sctx->buf + offset; - int padlen; - __be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56); - __be32 *dst = (__be32 *)out; - - padlen = 55 - offset; - *p++ = 0x80; - - spe_begin(); - - if (padlen < 0) { - memset(p, 0x00, padlen + sizeof (u64)); - ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); - p = (char *)sctx->buf; - padlen = 56; - } - - memset(p, 0, padlen); - *pbits = cpu_to_be64(sctx->count << 3); - ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); - - spe_end(); - - dst[0] = cpu_to_be32(sctx->state[0]); - dst[1] = cpu_to_be32(sctx->state[1]); - dst[2] = cpu_to_be32(sctx->state[2]); - dst[3] = cpu_to_be32(sctx->state[3]); - dst[4] = cpu_to_be32(sctx->state[4]); - dst[5] = cpu_to_be32(sctx->state[5]); - dst[6] = cpu_to_be32(sctx->state[6]); - dst[7] = cpu_to_be32(sctx->state[7]); - - ppc_sha256_clear_context(sctx); - return 0; -} - -static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out) -{ - __be32 D[SHA256_DIGEST_SIZE >> 2]; - __be32 *dst = (__be32 *)out; - - ppc_spe_sha256_final(desc, (u8 *)D); - - /* avoid bytewise memcpy */ - dst[0] = D[0]; - dst[1] = D[1]; - dst[2] = D[2]; - dst[3] = D[3]; - dst[4] = D[4]; - dst[5] = D[5]; - dst[6] = D[6]; - - /* clear sensitive data */ - memzero_explicit(D, SHA256_DIGEST_SIZE); - return 0; -} - -static int ppc_spe_sha256_export(struct shash_desc *desc, void *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int ppc_spe_sha256_import(struct shash_desc *desc, const void *in) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; -} - -static struct shash_alg algs[2] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = ppc_spe_sha256_update, - .final = ppc_spe_sha256_final, - .export = ppc_spe_sha256_export, - .import = ppc_spe_sha256_import, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "sha256-ppc-spe", - .cra_priority = 300, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = ppc_spe_sha256_update, - .final = ppc_spe_sha224_final, - .export = ppc_spe_sha256_export, - .import = ppc_spe_sha256_import, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "sha224-ppc-spe", - .cra_priority = 300, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init ppc_spe_sha256_mod_init(void) -{ - return crypto_register_shashes(algs, ARRAY_SIZE(algs)); -} - -static void __exit ppc_spe_sha256_mod_fini(void) -{ - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); -} - -module_init(ppc_spe_sha256_mod_init); -module_exit(ppc_spe_sha256_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, SPE optimized"); - -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha224-ppc-spe"); -MODULE_ALIAS_CRYPTO("sha256"); -MODULE_ALIAS_CRYPTO("sha256-ppc-spe"); diff --git a/arch/powerpc/crypto/vmx.c b/arch/powerpc/crypto/vmx.c index 7eb713cc87c8..0b725e826388 100644 --- a/arch/powerpc/crypto/vmx.c +++ b/arch/powerpc/crypto/vmx.c @@ -74,4 +74,4 @@ MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions " "support on Power 8"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); -MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 61a8d5555cd7..e5fdc336c9b2 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -6,5 +6,4 @@ generic-y += agp.h generic-y += kvm_types.h generic-y += mcs_spinlock.h generic-y += qrwlock.h -generic-y += vtime.h generic-y += early_ioremap.h diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index 2bc53c646ccd..f48e644900a2 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h @@ -26,19 +26,23 @@ #define PPC_MIN_STKFRM 112 #ifdef __BIG_ENDIAN__ -#define LHZX_BE stringify_in_c(lhzx) #define LWZX_BE stringify_in_c(lwzx) #define LDX_BE stringify_in_c(ldx) #define STWX_BE stringify_in_c(stwx) #define STDX_BE stringify_in_c(stdx) #else -#define LHZX_BE stringify_in_c(lhbrx) #define LWZX_BE stringify_in_c(lwbrx) #define LDX_BE stringify_in_c(ldbrx) #define STWX_BE stringify_in_c(stwbrx) #define STDX_BE stringify_in_c(stdbrx) #endif +#ifdef CONFIG_CC_IS_CLANG +#define DS_FORM_CONSTRAINT "Z<>" +#else +#define DS_FORM_CONSTRAINT "YZ<>" +#endif + #else /* 32-bit */ /* operations for longs and pointers */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 5bf6a4d49268..d1ea554c33ed 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -11,6 +11,7 @@ #include <asm/cmpxchg.h> #include <asm/barrier.h> #include <asm/asm-const.h> +#include <asm/asm-compat.h> /* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with @@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); else - __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter)); return t; } @@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); else - __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); + __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i)); } #define ATOMIC64_OP(op, asm_op) \ diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index dc5c039eb28e..dd4eb3063175 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -47,8 +47,6 @@ static inline void pgtable_free(void *table, unsigned index_size) } } -#define get_hugepd_cache_index(x) (x) - static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 52971ee30717..92d21c6faf1e 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -196,7 +196,8 @@ void unmap_kernel_page(unsigned long va); #endif #define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M) -#define MODULES_VADDR (MODULES_END - SZ_256M) +#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) +#define MODULES_VADDR (MODULES_END - MODULES_SIZE) #ifndef __ASSEMBLY__ #include <linux/sched.h> @@ -364,7 +365,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -static inline int pte_swp_exclusive(pte_t pte) +static inline bool pte_swp_exclusive(pte_t pte) { return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; } diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 6472b08fa1b0..aa90a048f319 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -74,20 +74,33 @@ #define remap_4k_pfn(vma, addr, pfn, prot) \ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) -#ifdef CONFIG_HUGETLB_PAGE -static inline int hash__hugepd_ok(hugepd_t hpd) +/* + * With 4K page size the real_pte machinery is all nops. + */ +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset) { - unsigned long hpdval = hpd_val(hpd); - /* - * if it is not a pte and have hugepd shift mask - * set, then it is a hugepd directory pointer - */ - if (!(hpdval & _PAGE_PTE) && (hpdval & _PAGE_PRESENT) && - ((hpdval & HUGEPD_SHIFT_MASK) != 0)) - return true; - return false; + return (real_pte_t){pte}; } -#endif + +#define __rpte_to_pte(r) ((r).pte) + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT; +} + +#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ + do { \ + index = 0; \ + shift = mmu_psize_defs[psize].shift; \ + +#define pte_iterate_hashed_end() } while(0) + +/* + * We expect this to be called only for user addresses or kernel virtual + * addresses other than the linear mapping. + */ +#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K /* * 4K PTE format is different from 64K PTE format. Saving the hash_slot is just diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index faf3e3b4e4b2..0755f2567021 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -4,6 +4,7 @@ #ifdef __KERNEL__ #include <asm/asm-const.h> +#include <asm/book3s/64/slice.h> /* * Common bits between 4K and 64K pages in a linux-style PTE. @@ -161,14 +162,10 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge); unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags); /* Atomic PTE updates */ -static inline unsigned long hash__pte_update(struct mm_struct *mm, - unsigned long addr, - pte_t *ptep, unsigned long clr, - unsigned long set, - int huge) +static inline unsigned long hash__pte_update_one(pte_t *ptep, unsigned long clr, + unsigned long set) { __be64 old_be, tmp_be; - unsigned long old; __asm__ __volatile__( "1: ldarx %0,0,%3 # pte_update\n\ @@ -182,11 +179,40 @@ static inline unsigned long hash__pte_update(struct mm_struct *mm, : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) : "cc" ); + + return be64_to_cpu(old_be); +} + +static inline unsigned long hash__pte_update(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, unsigned long clr, + unsigned long set, + int huge) +{ + unsigned long old; + + old = hash__pte_update_one(ptep, clr, set); + + if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && huge) { + unsigned int psize = get_slice_psize(mm, addr); + int nb, i; + + if (psize == MMU_PAGE_16M) + nb = SZ_16M / PMD_SIZE; + else if (psize == MMU_PAGE_16G) + nb = SZ_16G / PUD_SIZE; + else + nb = 1; + + WARN_ON_ONCE(nb == 1); /* Should never happen */ + + for (i = 1; i < nb; i++) + hash__pte_update_one(ptep + i, clr, set); + } /* huge pages use the old page table lock */ if (!huge) assert_pte_locked(mm, addr); - old = be64_to_cpu(old_be); if (old & H_PAGE_HASHPTE) hpte_need_flush(mm, addr, ptep, old, huge); diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index aa1c67c8bfc8..bb786694dd26 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -49,9 +49,6 @@ static inline bool gigantic_page_runtime_supported(void) return true; } -/* hugepd entry valid bit */ -#define HUGEPD_VAL_BITS (0x8000000000000000UL) - #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); @@ -60,29 +57,7 @@ extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t new_pte); -/* - * This should work for other subarchs too. But right now we use the - * new format only for 64bit book3s - */ -static inline pte_t *hugepd_page(hugepd_t hpd) -{ - BUG_ON(!hugepd_ok(hpd)); - /* - * We have only four bits to encode, MMU page size - */ - BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); - return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK); -} -static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) -{ - return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2; -} - -static inline unsigned int hugepd_shift(hugepd_t hpd) -{ - return mmu_psize_to_shift(hugepd_mmu_psize(hpd)); -} static inline void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { @@ -90,19 +65,6 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma, return radix__flush_hugetlb_page(vma, vmaddr); } -static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, - unsigned int pdshift) -{ - unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd); - - return hugepd_page(hpd) + idx; -} - -static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) -{ - *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS | (shift_to_mmu_psize(pshift) << 2)); -} - void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); static inline int check_and_get_huge_psize(int shift) @@ -132,4 +94,10 @@ static inline int check_and_get_huge_psize(int shift) return mmu_psize; } +#define arch_has_huge_bootmem_alloc arch_has_huge_bootmem_alloc + +static inline bool arch_has_huge_bootmem_alloc(void) +{ + return (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()); +} #endif diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h deleted file mode 100644 index 48f21820afe2..000000000000 --- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h +++ /dev/null @@ -1,67 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_4K_H -#define _ASM_POWERPC_BOOK3S_64_PGTABLE_4K_H -/* - * hash 4k can't share hugetlb and also doesn't support THP - */ -#ifndef __ASSEMBLY__ -#ifdef CONFIG_HUGETLB_PAGE -static inline int pmd_huge(pmd_t pmd) -{ - /* - * leaf pte for huge page - */ - if (radix_enabled()) - return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); - return 0; -} - -static inline int pud_huge(pud_t pud) -{ - /* - * leaf pte for huge page - */ - if (radix_enabled()) - return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); - return 0; -} - -/* - * With radix , we have hugepage ptes in the pud and pmd entries. We don't - * need to setup hugepage directory for them. Our pte and page directory format - * enable us to have this enabled. - */ -static inline int hugepd_ok(hugepd_t hpd) -{ - if (radix_enabled()) - return 0; - return hash__hugepd_ok(hpd); -} -#define is_hugepd(hpd) (hugepd_ok(hpd)) - -/* - * 16M and 16G huge page directory tables are allocated from slab cache - * - */ -#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24) -#define H_16G_CACHE_INDEX \ - (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34) - -static inline int get_hugepd_cache_index(int index) -{ - switch (index) { - case H_16M_CACHE_INDEX: - return HTLB_16M_INDEX; - case H_16G_CACHE_INDEX: - return HTLB_16G_INDEX; - default: - BUG(); - } - /* should not reach */ -} - -#endif /* CONFIG_HUGETLB_PAGE */ - -#endif /* __ASSEMBLY__ */ - -#endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_4K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h index ced7ee8b42fc..4d8d7b4ea16b 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h @@ -4,51 +4,6 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_HUGETLB_PAGE -/* - * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have - * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; - * - * Defined in such a way that we can optimize away code block at build time - * if CONFIG_HUGETLB_PAGE=n. - * - * returns true for pmd migration entries, THP, devmap, hugetlb - * But compile time dependent on CONFIG_HUGETLB_PAGE - */ -static inline int pmd_huge(pmd_t pmd) -{ - /* - * leaf pte for huge page - */ - return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); -} - -static inline int pud_huge(pud_t pud) -{ - /* - * leaf pte for huge page - */ - return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); -} - -/* - * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't - * need to setup hugepage directory for them. Our pte and page directory format - * enable us to have this enabled. - */ -static inline int hugepd_ok(hugepd_t hpd) -{ - return 0; -} - -#define is_hugepd(pdep) 0 - -/* - * This should never get called - */ -static __always_inline int get_hugepd_cache_index(int index) -{ - BUILD_BUG(); -} #endif /* CONFIG_HUGETLB_PAGE */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index fac5615e6bc5..a2ddcbb3fcb9 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -262,6 +262,36 @@ extern unsigned long __kernel_io_end; extern struct page *vmemmap; extern unsigned long pci_io_base; + +#define pmd_leaf pmd_leaf +static inline bool pmd_leaf(pmd_t pmd) +{ + return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); +} + +#define pud_leaf pud_leaf +static inline bool pud_leaf(pud_t pud) +{ + return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); +} + +#define pmd_leaf_size pmd_leaf_size +static inline unsigned long pmd_leaf_size(pmd_t pmd) +{ + if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !radix_enabled()) + return SZ_16M; + else + return PMD_SIZE; +} + +#define pud_leaf_size pud_leaf_size +static inline unsigned long pud_leaf_size(pud_t pud) +{ + if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !radix_enabled()) + return SZ_16G; + else + return PUD_SIZE; +} #endif /* __ASSEMBLY__ */ #include <asm/book3s/64/hash.h> @@ -273,11 +303,9 @@ extern unsigned long pci_io_base; #define MAX_PHYSMEM_BITS R_MAX_PHYSMEM_BITS #endif - +/* hash 4k can't share hugetlb and also doesn't support THP */ #ifdef CONFIG_PPC_64K_PAGES #include <asm/book3s/64/pgtable-64k.h> -#else -#include <asm/book3s/64/pgtable-4k.h> #endif #include <asm/barrier.h> @@ -302,32 +330,6 @@ extern unsigned long pci_io_base; #ifndef __ASSEMBLY__ -/* - * This is the default implementation of various PTE accessors, it's - * used in all cases except Book3S with 64K pages where we have a - * concept of sub-pages - */ -#ifndef __real_pte - -#define __real_pte(e, p, o) ((real_pte_t){(e)}) -#define __rpte_to_pte(r) ((r).pte) -#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) - -#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ - do { \ - index = 0; \ - shift = mmu_psize_defs[psize].shift; \ - -#define pte_iterate_hashed_end() } while(0) - -/* - * We expect this to be called only for user addresses or kernel virtual - * addresses other than the linear mapping. - */ -#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K - -#endif /* __real_pte */ - static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long clr, unsigned long set, int huge) @@ -691,7 +693,7 @@ static inline pte_t pte_swp_mkexclusive(pte_t pte) return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_EXCLUSIVE)); } -static inline int pte_swp_exclusive(pte_t pte) +static inline bool pte_swp_exclusive(pte_t pte) { return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_EXCLUSIVE)); } @@ -1094,8 +1096,8 @@ static inline bool pmd_access_permitted(pmd_t pmd, bool write) #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot); -extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); +extern pud_t pud_modify(pud_t pud, pgprot_t newprot); extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); extern void set_pud_at(struct mm_struct *mm, unsigned long addr, @@ -1356,6 +1358,8 @@ static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, #define __HAVE_ARCH_PMDP_INVALIDATE extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); +extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, + pud_t *pudp); #define pmd_move_must_withdraw pmd_move_must_withdraw struct spinlock; @@ -1426,20 +1430,5 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va return false; } -/* - * Like pmd_huge(), but works regardless of config options - */ -#define pmd_leaf pmd_leaf -static inline bool pmd_leaf(pmd_t pmd) -{ - return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); -} - -#define pud_leaf pud_leaf -static inline bool pud_leaf(pud_t pud) -{ - return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); -} - #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index ef7d2de33b89..f2656774aaa9 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -121,7 +121,7 @@ static inline void invalidate_dcache_range(unsigned long start, mb(); /* sync */ } -#ifdef CONFIG_4xx +#ifdef CONFIG_44x static inline void flush_instruction_cache(void) { iccci((void *)KERNELBASE); diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h index 6a79b5d1c44f..7fbefd64b4fb 100644 --- a/arch/powerpc/include/asm/cell-pmu.h +++ b/arch/powerpc/include/asm/cell-pmu.h @@ -20,36 +20,9 @@ /* Macros for the pm_control register. */ #define CBE_PM_16BIT_CTR(ctr) (1 << (24 - ((ctr) & (NR_PHYS_CTRS - 1)))) -#define CBE_PM_ENABLE_PERF_MON 0x80000000 -#define CBE_PM_STOP_AT_MAX 0x40000000 -#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3) -#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28) -#define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17) -#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18) -#define CBE_PM_FREEZE_ALL_CTRS 0x00100000 -#define CBE_PM_ENABLE_EXT_TRACE 0x00008000 -#define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9) /* Macros for the trace_address register. */ -#define CBE_PM_TRACE_BUF_FULL 0x00000800 #define CBE_PM_TRACE_BUF_EMPTY 0x00000400 -#define CBE_PM_TRACE_BUF_DATA_COUNT(ta) ((ta) & 0x3ff) -#define CBE_PM_TRACE_BUF_MAX_COUNT 0x400 - -/* Macros for the pm07_control registers. */ -#define CBE_PM_CTR_INPUT_MUX(pm07_control) (((pm07_control) >> 26) & 0x3f) -#define CBE_PM_CTR_INPUT_CONTROL 0x02000000 -#define CBE_PM_CTR_POLARITY 0x01000000 -#define CBE_PM_CTR_COUNT_CYCLES 0x00800000 -#define CBE_PM_CTR_ENABLE 0x00400000 -#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26) -#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25) -#define PM07_CTR_POLARITY(x) (((x) & 1) << 24) -#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23) -#define PM07_CTR_ENABLE(x) (((x) & 1) << 22) - -/* Macros for the pm_status register. */ -#define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7))) enum pm_reg_name { group_control, @@ -62,33 +35,4 @@ enum pm_reg_name { pm_start_stop, }; -/* Routines for reading/writing the PMU registers. */ -extern u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr); -extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val); -extern u32 cbe_read_ctr(u32 cpu, u32 ctr); -extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val); - -extern u32 cbe_read_pm07_control(u32 cpu, u32 ctr); -extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val); -extern u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg); -extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val); - -extern u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr); -extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size); - -extern void cbe_enable_pm(u32 cpu); -extern void cbe_disable_pm(u32 cpu); - -extern void cbe_read_trace_buffer(u32 cpu, u64 *buf); - -extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask); -extern void cbe_disable_pm_interrupts(u32 cpu); -extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu); -extern void cbe_sync_irq(int node); - -#define CBE_COUNT_SUPERVISOR_MODE 0 -#define CBE_COUNT_HYPERVISOR_MODE 1 -#define CBE_COUNT_PROBLEM_MODE 2 -#define CBE_COUNT_ALL_MODES 3 - #endif /* __ASM_CELL_PMU_H__ */ diff --git a/arch/powerpc/include/asm/cell-regs.h b/arch/powerpc/include/asm/cell-regs.h index e1c431ef30e0..20f7339a3d4a 100644 --- a/arch/powerpc/include/asm/cell-regs.h +++ b/arch/powerpc/include/asm/cell-regs.h @@ -18,293 +18,6 @@ #include <asm/cell-pmu.h> -/* - * - * Some HID register definitions - * - */ - -/* CBE specific HID0 bits */ -#define HID0_CBE_THERM_WAKEUP 0x0000020000000000ul -#define HID0_CBE_SYSERR_WAKEUP 0x0000008000000000ul -#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul -#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul - -#define MAX_CBE 2 - -/* - * - * Pervasive unit register definitions - * - */ - -union spe_reg { - u64 val; - u8 spe[8]; -}; - -union ppe_spe_reg { - u64 val; - struct { - u32 ppe; - u32 spe; - }; -}; - - -struct cbe_pmd_regs { - /* Debug Bus Control */ - u64 pad_0x0000; /* 0x0000 */ - - u64 group_control; /* 0x0008 */ - - u8 pad_0x0010_0x00a8 [0x00a8 - 0x0010]; /* 0x0010 */ - - u64 debug_bus_control; /* 0x00a8 */ - - u8 pad_0x00b0_0x0100 [0x0100 - 0x00b0]; /* 0x00b0 */ - - u64 trace_aux_data; /* 0x0100 */ - u64 trace_buffer_0_63; /* 0x0108 */ - u64 trace_buffer_64_127; /* 0x0110 */ - u64 trace_address; /* 0x0118 */ - u64 ext_tr_timer; /* 0x0120 */ - - u8 pad_0x0128_0x0400 [0x0400 - 0x0128]; /* 0x0128 */ - - /* Performance Monitor */ - u64 pm_status; /* 0x0400 */ - u64 pm_control; /* 0x0408 */ - u64 pm_interval; /* 0x0410 */ - u64 pm_ctr[4]; /* 0x0418 */ - u64 pm_start_stop; /* 0x0438 */ - u64 pm07_control[8]; /* 0x0440 */ - - u8 pad_0x0480_0x0800 [0x0800 - 0x0480]; /* 0x0480 */ - - /* Thermal Sensor Registers */ - union spe_reg ts_ctsr1; /* 0x0800 */ - u64 ts_ctsr2; /* 0x0808 */ - union spe_reg ts_mtsr1; /* 0x0810 */ - u64 ts_mtsr2; /* 0x0818 */ - union spe_reg ts_itr1; /* 0x0820 */ - u64 ts_itr2; /* 0x0828 */ - u64 ts_gitr; /* 0x0830 */ - u64 ts_isr; /* 0x0838 */ - u64 ts_imr; /* 0x0840 */ - union spe_reg tm_cr1; /* 0x0848 */ - u64 tm_cr2; /* 0x0850 */ - u64 tm_simr; /* 0x0858 */ - union ppe_spe_reg tm_tpr; /* 0x0860 */ - union spe_reg tm_str1; /* 0x0868 */ - u64 tm_str2; /* 0x0870 */ - union ppe_spe_reg tm_tsr; /* 0x0878 */ - - /* Power Management */ - u64 pmcr; /* 0x0880 */ -#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000 - u64 pmsr; /* 0x0888 */ - - /* Time Base Register */ - u64 tbr; /* 0x0890 */ - - u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */ - - /* Fault Isolation Registers */ - u64 checkstop_fir; /* 0x0c00 */ - u64 recoverable_fir; /* 0x0c08 */ - u64 spec_att_mchk_fir; /* 0x0c10 */ - u32 fir_mode_reg; /* 0x0c18 */ - u8 pad_0x0c1c_0x0c20 [4]; /* 0x0c1c */ -#define CBE_PMD_FIR_MODE_M8 0x00800 - u64 fir_enable_mask; /* 0x0c20 */ - - u8 pad_0x0c28_0x0ca8 [0x0ca8 - 0x0c28]; /* 0x0c28 */ - u64 ras_esc_0; /* 0x0ca8 */ - u8 pad_0x0cb0_0x1000 [0x1000 - 0x0cb0]; /* 0x0cb0 */ -}; - -extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np); -extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu); - -/* - * PMU shadow registers - * - * Many of the registers in the performance monitoring unit are write-only, - * so we need to save a copy of what we write to those registers. - * - * The actual data counters are read/write. However, writing to the counters - * only takes effect if the PMU is enabled. Otherwise the value is stored in - * a hardware latch until the next time the PMU is enabled. So we save a copy - * of the counter values if we need to read them back while the PMU is - * disabled. The counter_value_in_latch field is a bitmap indicating which - * counters currently have a value waiting to be written. - */ - -struct cbe_pmd_shadow_regs { - u32 group_control; - u32 debug_bus_control; - u32 trace_address; - u32 ext_tr_timer; - u32 pm_status; - u32 pm_control; - u32 pm_interval; - u32 pm_start_stop; - u32 pm07_control[NR_CTRS]; - - u32 pm_ctr[NR_PHYS_CTRS]; - u32 counter_value_in_latch; -}; - -extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np); -extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu); - -/* - * - * IIC unit register definitions - * - */ - -struct cbe_iic_pending_bits { - u32 data; - u8 flags; - u8 class; - u8 source; - u8 prio; -}; - -#define CBE_IIC_IRQ_VALID 0x80 -#define CBE_IIC_IRQ_IPI 0x40 - -struct cbe_iic_thread_regs { - struct cbe_iic_pending_bits pending; - struct cbe_iic_pending_bits pending_destr; - u64 generate; - u64 prio; -}; - -struct cbe_iic_regs { - u8 pad_0x0000_0x0400[0x0400 - 0x0000]; /* 0x0000 */ - - /* IIC interrupt registers */ - struct cbe_iic_thread_regs thread[2]; /* 0x0400 */ - - u64 iic_ir; /* 0x0440 */ -#define CBE_IIC_IR_PRIO(x) (((x) & 0xf) << 12) -#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4) -#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf) -#define CBE_IIC_IR_IOC_0 0x0 -#define CBE_IIC_IR_IOC_1S 0xb -#define CBE_IIC_IR_PT_0 0xe -#define CBE_IIC_IR_PT_1 0xf - - u64 iic_is; /* 0x0448 */ -#define CBE_IIC_IS_PMI 0x2 - - u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */ - - /* IOC FIR */ - u64 ioc_fir_reset; /* 0x0500 */ - u64 ioc_fir_set; /* 0x0508 */ - u64 ioc_checkstop_enable; /* 0x0510 */ - u64 ioc_fir_error_mask; /* 0x0518 */ - u64 ioc_syserr_enable; /* 0x0520 */ - u64 ioc_fir; /* 0x0528 */ - - u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */ -}; - -extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np); -extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu); - - -struct cbe_mic_tm_regs { - u8 pad_0x0000_0x0040[0x0040 - 0x0000]; /* 0x0000 */ - - u64 mic_ctl_cnfg2; /* 0x0040 */ -#define CBE_MIC_ENABLE_AUX_TRC 0x8000000000000000LL -#define CBE_MIC_DISABLE_PWR_SAV_2 0x0200000000000000LL -#define CBE_MIC_DISABLE_AUX_TRC_WRAP 0x0100000000000000LL -#define CBE_MIC_ENABLE_AUX_TRC_INT 0x0080000000000000LL - - u64 pad_0x0048; /* 0x0048 */ - - u64 mic_aux_trc_base; /* 0x0050 */ - u64 mic_aux_trc_max_addr; /* 0x0058 */ - u64 mic_aux_trc_cur_addr; /* 0x0060 */ - u64 mic_aux_trc_grf_addr; /* 0x0068 */ - u64 mic_aux_trc_grf_data; /* 0x0070 */ - - u64 pad_0x0078; /* 0x0078 */ - - u64 mic_ctl_cnfg_0; /* 0x0080 */ -#define CBE_MIC_DISABLE_PWR_SAV_0 0x8000000000000000LL - - u64 pad_0x0088; /* 0x0088 */ - - u64 slow_fast_timer_0; /* 0x0090 */ - u64 slow_next_timer_0; /* 0x0098 */ - - u8 pad_0x00a0_0x00f8[0x00f8 - 0x00a0]; /* 0x00a0 */ - u64 mic_df_ecc_address_0; /* 0x00f8 */ - - u8 pad_0x0100_0x01b8[0x01b8 - 0x0100]; /* 0x0100 */ - u64 mic_df_ecc_address_1; /* 0x01b8 */ - - u64 mic_ctl_cnfg_1; /* 0x01c0 */ -#define CBE_MIC_DISABLE_PWR_SAV_1 0x8000000000000000LL - - u64 pad_0x01c8; /* 0x01c8 */ - - u64 slow_fast_timer_1; /* 0x01d0 */ - u64 slow_next_timer_1; /* 0x01d8 */ - - u8 pad_0x01e0_0x0208[0x0208 - 0x01e0]; /* 0x01e0 */ - u64 mic_exc; /* 0x0208 */ -#define CBE_MIC_EXC_BLOCK_SCRUB 0x0800000000000000ULL -#define CBE_MIC_EXC_FAST_SCRUB 0x0100000000000000ULL - - u64 mic_mnt_cfg; /* 0x0210 */ -#define CBE_MIC_MNT_CFG_CHAN_0_POP 0x0002000000000000ULL -#define CBE_MIC_MNT_CFG_CHAN_1_POP 0x0004000000000000ULL - - u64 mic_df_config; /* 0x0218 */ -#define CBE_MIC_ECC_DISABLE_0 0x4000000000000000ULL -#define CBE_MIC_ECC_REP_SINGLE_0 0x2000000000000000ULL -#define CBE_MIC_ECC_DISABLE_1 0x0080000000000000ULL -#define CBE_MIC_ECC_REP_SINGLE_1 0x0040000000000000ULL - - u8 pad_0x0220_0x0230[0x0230 - 0x0220]; /* 0x0220 */ - u64 mic_fir; /* 0x0230 */ -#define CBE_MIC_FIR_ECC_SINGLE_0_ERR 0x0200000000000000ULL -#define CBE_MIC_FIR_ECC_MULTI_0_ERR 0x0100000000000000ULL -#define CBE_MIC_FIR_ECC_SINGLE_1_ERR 0x0080000000000000ULL -#define CBE_MIC_FIR_ECC_MULTI_1_ERR 0x0040000000000000ULL -#define CBE_MIC_FIR_ECC_ERR_MASK 0xffff000000000000ULL -#define CBE_MIC_FIR_ECC_SINGLE_0_CTE 0x0000020000000000ULL -#define CBE_MIC_FIR_ECC_MULTI_0_CTE 0x0000010000000000ULL -#define CBE_MIC_FIR_ECC_SINGLE_1_CTE 0x0000008000000000ULL -#define CBE_MIC_FIR_ECC_MULTI_1_CTE 0x0000004000000000ULL -#define CBE_MIC_FIR_ECC_CTE_MASK 0x0000ffff00000000ULL -#define CBE_MIC_FIR_ECC_SINGLE_0_RESET 0x0000000002000000ULL -#define CBE_MIC_FIR_ECC_MULTI_0_RESET 0x0000000001000000ULL -#define CBE_MIC_FIR_ECC_SINGLE_1_RESET 0x0000000000800000ULL -#define CBE_MIC_FIR_ECC_MULTI_1_RESET 0x0000000000400000ULL -#define CBE_MIC_FIR_ECC_RESET_MASK 0x00000000ffff0000ULL -#define CBE_MIC_FIR_ECC_SINGLE_0_SET 0x0000000000000200ULL -#define CBE_MIC_FIR_ECC_MULTI_0_SET 0x0000000000000100ULL -#define CBE_MIC_FIR_ECC_SINGLE_1_SET 0x0000000000000080ULL -#define CBE_MIC_FIR_ECC_MULTI_1_SET 0x0000000000000040ULL -#define CBE_MIC_FIR_ECC_SET_MASK 0x000000000000ffffULL - u64 mic_fir_debug; /* 0x0238 */ - - u8 pad_0x0240_0x1000[0x1000 - 0x0240]; /* 0x0240 */ -}; - -extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np); -extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu); - - /* Cell page table entries */ #define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */ #define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */ @@ -315,13 +28,4 @@ extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu); #define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */ #define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */ -/* some utility functions to deal with SMT */ -extern u32 cbe_get_hw_thread_id(int cpu); -extern u32 cbe_cpu_to_node(int cpu); -extern u32 cbe_node_to_cpu(int node); - -/* Init this module early */ -extern void cbe_regs_init(void); - - #endif /* CBE_REGS_H */ diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h index fd2e166ea02a..81bd176203ab 100644 --- a/arch/powerpc/include/asm/copro.h +++ b/arch/powerpc/include/asm/copro.h @@ -18,10 +18,4 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb); - -#ifdef CONFIG_PPC_COPRO_BASE -void copro_flush_all_slbs(struct mm_struct *mm); -#else -static inline void copro_flush_all_slbs(struct mm_struct *mm) {} -#endif #endif /* _ASM_POWERPC_COPRO_H */ diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index 727d4b321937..bf8a228229fa 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -24,12 +24,11 @@ static __always_inline bool cpu_has_feature(unsigned long feature) { int i; -#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); -#endif + BUILD_BUG_ON(__builtin_popcountl(feature) > 1); #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG - if (!static_key_initialized) { + if (!static_key_feature_checks_initialized) { printk("Warning! cpu_has_feature() used prior to jump label init!\n"); dump_stack(); return early_cpu_has_feature(feature); diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 07a204d21034..29a529d2ab8b 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -193,6 +193,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_ARCH_31 LONG_ASM_CONST(0x0004000000000000) #define CPU_FTR_DAWR1 LONG_ASM_CONST(0x0008000000000000) #define CPU_FTR_DEXCR_NPHIE LONG_ASM_CONST(0x0010000000000000) +#define CPU_FTR_P11_PVR LONG_ASM_CONST(0x0020000000000000) #ifndef __ASSEMBLY__ @@ -353,7 +354,6 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON) #define CPU_FTRS_8XX (CPU_FTR_NOEXECUTE) -#define CPU_FTRS_40X (CPU_FTR_NOEXECUTE) #define CPU_FTRS_44X (CPU_FTR_NOEXECUTE) #define CPU_FTRS_440x6 (CPU_FTR_NOEXECUTE | \ CPU_FTR_INDEXED_DCR) @@ -455,7 +455,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_DAWR | CPU_FTR_DAWR1 | \ CPU_FTR_DEXCR_NPHIE) -#define CPU_FTRS_POWER11 CPU_FTRS_POWER10 +#define CPU_FTRS_POWER11 (CPU_FTRS_POWER10 | CPU_FTR_P11_PVR) #define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ @@ -476,7 +476,7 @@ static inline void cpu_feature_keys_init(void) { } (CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \ - CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10) + CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10 | CPU_FTRS_POWER11) #else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ @@ -484,7 +484,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \ - CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10) + CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10 | CPU_FTRS_POWER11) #endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else @@ -507,9 +507,6 @@ enum { #ifdef CONFIG_PPC_8xx CPU_FTRS_8XX | #endif -#ifdef CONFIG_40x - CPU_FTRS_40X | -#endif #ifdef CONFIG_PPC_47x CPU_FTRS_47X | CPU_FTR_476_DD2 | #elif defined(CONFIG_44x) @@ -551,7 +548,7 @@ enum { (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & ~CPU_FTR_DBELL & \ CPU_FTRS_POWER7 & CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & \ CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \ - CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE) + CPU_FTRS_POWER10 & CPU_FTRS_POWER11 & CPU_FTRS_DT_CPU_BASE) #else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \ @@ -559,7 +556,7 @@ enum { CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ ~CPU_FTR_HVMODE & ~CPU_FTR_DBELL & CPU_FTRS_POSSIBLE & \ CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \ - CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE) + CPU_FTRS_POWER10 & CPU_FTRS_POWER11 & CPU_FTRS_DT_CPU_BASE) #endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else @@ -582,9 +579,6 @@ enum { #ifdef CONFIG_PPC_8xx CPU_FTRS_8XX & #endif -#ifdef CONFIG_40x - CPU_FTRS_40X & -#endif #ifdef CONFIG_PPC_47x CPU_FTRS_47X & #elif defined(CONFIG_44x) diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 4961fb38e438..aff858ca99c0 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -32,23 +32,10 @@ #ifdef CONFIG_PPC64 #define get_accounting(tsk) (&get_paca()->accounting) #define raw_get_accounting(tsk) (&local_paca->accounting) -static inline void arch_vtime_task_switch(struct task_struct *tsk) { } #else #define get_accounting(tsk) (&task_thread_info(tsk)->accounting) #define raw_get_accounting(tsk) get_accounting(tsk) -/* - * Called from the context switch with interrupts disabled, to charge all - * accumulated times to the current process, and to prepare accounting on - * the next process. - */ -static inline void arch_vtime_task_switch(struct task_struct *prev) -{ - struct cpu_accounting_data *acct = get_accounting(current); - struct cpu_accounting_data *acct0 = get_accounting(prev); - - acct->starttime = acct0->starttime; -} #endif /* diff --git a/arch/powerpc/include/asm/crash_reserve.h b/arch/powerpc/include/asm/crash_reserve.h new file mode 100644 index 000000000000..6467ce29b1fa --- /dev/null +++ b/arch/powerpc/include/asm/crash_reserve.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_CRASH_RESERVE_H +#define _ASM_POWERPC_CRASH_RESERVE_H + +/* crash kernel regions are Page size agliged */ +#define CRASH_ALIGN PAGE_SIZE + +#endif /* _ASM_POWERPC_CRASH_RESERVE_H */ diff --git a/arch/powerpc/include/asm/dcr-generic.h b/arch/powerpc/include/asm/dcr-generic.h deleted file mode 100644 index 099c28dd40b9..000000000000 --- a/arch/powerpc/include/asm/dcr-generic.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. - * <benh@kernel.crashing.org> - */ - -#ifndef _ASM_POWERPC_DCR_GENERIC_H -#define _ASM_POWERPC_DCR_GENERIC_H -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -enum host_type_t {DCR_HOST_MMIO, DCR_HOST_NATIVE, DCR_HOST_INVALID}; - -typedef struct { - enum host_type_t type; - union { - dcr_host_mmio_t mmio; - dcr_host_native_t native; - } host; -} dcr_host_t; - -extern bool dcr_map_ok_generic(dcr_host_t host); - -extern dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n, - unsigned int dcr_c); -extern void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c); - -extern u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n); - -extern void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value); - -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_DCR_GENERIC_H */ - - diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h deleted file mode 100644 index fc6d93ef4a13..000000000000 --- a/arch/powerpc/include/asm/dcr-mmio.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. - * <benh@kernel.crashing.org> - */ - -#ifndef _ASM_POWERPC_DCR_MMIO_H -#define _ASM_POWERPC_DCR_MMIO_H -#ifdef __KERNEL__ - -#include <asm/io.h> - -typedef struct { - void __iomem *token; - unsigned int stride; - unsigned int base; -} dcr_host_mmio_t; - -static inline bool dcr_map_ok_mmio(dcr_host_mmio_t host) -{ - return host.token != NULL; -} - -extern dcr_host_mmio_t dcr_map_mmio(struct device_node *dev, - unsigned int dcr_n, - unsigned int dcr_c); -extern void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c); - -static inline u32 dcr_read_mmio(dcr_host_mmio_t host, unsigned int dcr_n) -{ - return in_be32(host.token + ((host.base + dcr_n) * host.stride)); -} - -static inline void dcr_write_mmio(dcr_host_mmio_t host, - unsigned int dcr_n, - u32 value) -{ - out_be32(host.token + ((host.base + dcr_n) * host.stride), value); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_DCR_MMIO_H */ - - diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h index 64030e3a1f30..180021cd0b30 100644 --- a/arch/powerpc/include/asm/dcr.h +++ b/arch/powerpc/include/asm/dcr.h @@ -10,46 +10,14 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_PPC_DCR -#ifdef CONFIG_PPC_DCR_NATIVE #include <asm/dcr-native.h> -#endif -#ifdef CONFIG_PPC_DCR_MMIO -#include <asm/dcr-mmio.h> -#endif - - -/* Indirection layer for providing both NATIVE and MMIO support. */ - -#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) - -#include <asm/dcr-generic.h> - -#define DCR_MAP_OK(host) dcr_map_ok_generic(host) -#define dcr_map(dev, dcr_n, dcr_c) dcr_map_generic(dev, dcr_n, dcr_c) -#define dcr_unmap(host, dcr_c) dcr_unmap_generic(host, dcr_c) -#define dcr_read(host, dcr_n) dcr_read_generic(host, dcr_n) -#define dcr_write(host, dcr_n, value) dcr_write_generic(host, dcr_n, value) - -#else - -#ifdef CONFIG_PPC_DCR_NATIVE typedef dcr_host_native_t dcr_host_t; #define DCR_MAP_OK(host) dcr_map_ok_native(host) #define dcr_map(dev, dcr_n, dcr_c) dcr_map_native(dev, dcr_n, dcr_c) #define dcr_unmap(host, dcr_c) dcr_unmap_native(host, dcr_c) #define dcr_read(host, dcr_n) dcr_read_native(host, dcr_n) #define dcr_write(host, dcr_n, value) dcr_write_native(host, dcr_n, value) -#else -typedef dcr_host_mmio_t dcr_host_t; -#define DCR_MAP_OK(host) dcr_map_ok_mmio(host) -#define dcr_map(dev, dcr_n, dcr_c) dcr_map_mmio(dev, dcr_n, dcr_c) -#define dcr_unmap(host, dcr_c) dcr_unmap_mmio(host, dcr_c) -#define dcr_read(host, dcr_n) dcr_read_mmio(host, dcr_n) -#define dcr_write(host, dcr_n, value) dcr_write_mmio(host, dcr_n, value) -#endif - -#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ /* * additional helpers to read the DCR * base from the device-tree diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 47ed639f3b8f..a4dc27655b3e 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -38,9 +38,6 @@ struct dev_archdata { #ifdef CONFIG_FAIL_IOMMU int fail_iommu; #endif -#ifdef CONFIG_CXL_BASE - struct cxl_context *cxl_ctx; -#endif #ifdef CONFIG_PCI_IOV void *iov_data; #endif diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h index d6f43d149f8d..a5c21bc623cb 100644 --- a/arch/powerpc/include/asm/dtl.h +++ b/arch/powerpc/include/asm/dtl.h @@ -1,8 +1,8 @@ #ifndef _ASM_POWERPC_DTL_H #define _ASM_POWERPC_DTL_H +#include <linux/rwsem.h> #include <asm/lppaca.h> -#include <linux/spinlock_types.h> /* * Layout of entries in the hypervisor's dispatch trace log buffer. @@ -35,7 +35,7 @@ struct dtl_entry { #define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT) extern struct kmem_cache *dtl_cache; -extern rwlock_t dtl_access_lock; +extern struct rw_semaphore dtl_access_lock; extern void register_dtl_buffer(int cpu); extern void alloc_dtl_buffers(unsigned long *time_limit); diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 514dd056c2c8..5e34611de9ef 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -82,7 +82,7 @@ struct eeh_pe { int false_positives; /* Times of reported #ff's */ atomic_t pass_dev_cnt; /* Count of passed through devs */ struct eeh_pe *parent; /* Parent PE */ - void *data; /* PE auxillary data */ + void *data; /* PE auxiliary data */ struct list_head child_list; /* List of PEs below this PE */ struct list_head child; /* Memb. child_list/eeh_phb_pe */ struct list_head edevs; /* List of eeh_dev in this PE */ @@ -308,6 +308,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed); int eeh_pe_configure(struct eeh_pe *pe); int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func, unsigned long addr, unsigned long mask); +int eeh_pe_inject_mmio_error(struct pci_dev *pdev); /** * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 79f1c480b5eb..bb4b94444d3e 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -127,8 +127,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */ #define NT_SPU 1 -#define ARCH_HAVE_EXTRA_ELF_NOTES - #endif /* CONFIG_SPU_BASE */ #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h index 27f9e11eda28..e83869a4eb6a 100644 --- a/arch/powerpc/include/asm/fadump-internal.h +++ b/arch/powerpc/include/asm/fadump-internal.h @@ -42,13 +42,38 @@ static inline u64 fadump_str_to_u64(const char *str) #define FADUMP_CPU_UNKNOWN (~((u32)0)) -#define FADUMP_CRASH_INFO_MAGIC fadump_str_to_u64("FADMPINF") +/* + * The introduction of new fields in the fadump crash info header has + * led to a change in the magic key from `FADMPINF` to `FADMPSIG` for + * identifying a kernel crash from an old kernel. + * + * To prevent the need for further changes to the magic number in the + * event of future modifications to the fadump crash info header, a + * version field has been introduced to track the fadump crash info + * header version. + * + * Consider a few points before adding new members to the fadump crash info + * header structure: + * + * - Append new members; avoid adding them in between. + * - Non-primitive members should have a size member as well. + * - For every change in the fadump header, increment the + * fadump header version. This helps the updated kernel decide how to + * handle kernel dumps from older kernels. + */ +#define FADUMP_CRASH_INFO_MAGIC_OLD fadump_str_to_u64("FADMPINF") +#define FADUMP_CRASH_INFO_MAGIC fadump_str_to_u64("FADMPSIG") +#define FADUMP_HEADER_VERSION 1 /* fadump crash info structure */ struct fadump_crash_info_header { u64 magic_number; - u64 elfcorehdr_addr; + u32 version; u32 crashing_cpu; + u64 vmcoreinfo_raddr; + u64 vmcoreinfo_size; + u32 pt_regs_sz; + u32 cpu_mask_sz; struct pt_regs regs; struct cpumask cpu_mask; }; @@ -94,9 +119,13 @@ struct fw_dump { u64 boot_mem_regs_cnt; unsigned long fadumphdr_addr; + u64 elfcorehdr_addr; + u64 elfcorehdr_size; unsigned long cpu_notes_buf_vaddr; unsigned long cpu_notes_buf_size; + unsigned long param_area; + /* * Maximum size supported by firmware to copy from source to * destination address per entry. @@ -111,6 +140,7 @@ struct fw_dump { unsigned long dump_active:1; unsigned long dump_registered:1; unsigned long nocma:1; + unsigned long param_area_supported:1; struct fadump_ops *ops; }; @@ -129,6 +159,7 @@ struct fadump_ops { struct seq_file *m); void (*fadump_trigger)(struct fadump_crash_info_header *fdh, const char *msg); + int (*fadump_max_boot_mem_rgns)(void); }; /* Helper functions */ @@ -136,7 +167,6 @@ s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus); void fadump_free_cpu_notes_buf(void); u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs); void __init fadump_update_elfcore_header(char *bufp); -bool is_fadump_boot_mem_contiguous(void); bool is_fadump_reserved_mem_contiguous(void); #else /* !CONFIG_PRESERVE_FA_DUMP */ diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 526a6a647312..a48f54dde4f6 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -19,12 +19,16 @@ extern int is_fadump_active(void); extern int should_fadump_crash(void); extern void crash_fadump(struct pt_regs *, const char *); extern void fadump_cleanup(void); +void fadump_setup_param_area(void); +extern void fadump_append_bootargs(void); #else /* CONFIG_FA_DUMP */ static inline int is_fadump_active(void) { return 0; } static inline int should_fadump_crash(void) { return 0; } static inline void crash_fadump(struct pt_regs *regs, const char *str) { } static inline void fadump_cleanup(void) { } +static inline void fadump_setup_param_area(void) { } +static inline void fadump_append_bootargs(void) { } #endif /* !CONFIG_FA_DUMP */ #if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP) @@ -32,4 +36,11 @@ extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data); extern int fadump_reserve_mem(void); #endif + +#if defined(CONFIG_FA_DUMP) && defined(CONFIG_CMA) +void fadump_cma_init(void); +#else +static inline void fadump_cma_init(void) { } +#endif + #endif /* _ASM_POWERPC_FADUMP_H */ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 77824bd289a3..17d168dd8b49 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -291,6 +291,8 @@ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; +extern bool static_key_feature_checks_initialized; + void apply_feature_fixups(void); void update_mmu_feature_fixups(unsigned long mask); void setup_feature_keys(void); diff --git a/arch/powerpc/include/asm/fpu.h b/arch/powerpc/include/asm/fpu.h new file mode 100644 index 000000000000..ca584e4bc40f --- /dev/null +++ b/arch/powerpc/include/asm/fpu.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 SiFive + */ + +#ifndef _ASM_POWERPC_FPU_H +#define _ASM_POWERPC_FPU_H + +#include <linux/preempt.h> + +#include <asm/cpu_has_feature.h> +#include <asm/switch_to.h> + +#define kernel_fpu_available() (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) + +static inline void kernel_fpu_begin(void) +{ + preempt_disable(); + enable_kernel_fp(); +} + +static inline void kernel_fpu_end(void) +{ + disable_kernel_fp(); + preempt_enable(); +} + +#endif /* ! _ASM_POWERPC_FPU_H */ diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 107fc5a48456..82da7c7a1d12 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -8,8 +8,6 @@ #define MCOUNT_ADDR ((unsigned long)(_mcount)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ -#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR - /* Ignore unused weak functions which will have larger offsets */ #if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY) #define FTRACE_MCOUNT_MAX_OFFSET 16 @@ -26,7 +24,10 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, struct module; struct dyn_ftrace; struct dyn_arch_ftrace { - struct module *mod; +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + /* pointer to the associated out-of-line stub */ + unsigned long ool_stub; +#endif }; #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS @@ -34,42 +35,34 @@ struct dyn_arch_ftrace { int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop -struct ftrace_regs { - struct pt_regs regs; -}; +#include <linux/ftrace_regs.h> static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) { /* We clear regs.msr in ftrace_call */ - return fregs->regs.msr ? &fregs->regs : NULL; + return arch_ftrace_regs(fregs)->regs.msr ? &arch_ftrace_regs(fregs)->regs : NULL; } +#define arch_ftrace_fill_perf_regs(fregs, _regs) do { \ + (_regs)->result = 0; \ + (_regs)->nip = arch_ftrace_regs(fregs)->regs.nip; \ + (_regs)->gpr[1] = arch_ftrace_regs(fregs)->regs.gpr[1]; \ + asm volatile("mfmsr %0" : "=r" ((_regs)->msr)); \ + } while (0) + static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip) { - regs_set_return_ip(&fregs->regs, ip); + regs_set_return_ip(&arch_ftrace_regs(fregs)->regs, ip); } static __always_inline unsigned long -ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs) +ftrace_regs_get_return_address(struct ftrace_regs *fregs) { - return instruction_pointer(&fregs->regs); + return arch_ftrace_regs(fregs)->regs.link; } -#define ftrace_regs_get_argument(fregs, n) \ - regs_get_kernel_argument(&(fregs)->regs, n) -#define ftrace_regs_get_stack_pointer(fregs) \ - kernel_stack_pointer(&(fregs)->regs) -#define ftrace_regs_return_value(fregs) \ - regs_return_value(&(fregs)->regs) -#define ftrace_regs_set_return_value(fregs, ret) \ - regs_set_return_value(&(fregs)->regs, ret) -#define ftrace_override_function_with_return(fregs) \ - override_function_with_return(&(fregs)->regs) -#define ftrace_regs_query_register_offset(name) \ - regs_query_register_offset(name) - struct ftrace_ops; #define ftrace_graph_func ftrace_graph_func @@ -133,8 +126,36 @@ static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; } #ifdef CONFIG_FUNCTION_TRACER extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE +struct ftrace_ool_stub { +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + struct ftrace_ops *ftrace_op; +#endif + u32 insn[4]; +} __aligned(sizeof(unsigned long)); +extern struct ftrace_ool_stub ftrace_ool_stub_text_end[], ftrace_ool_stub_text[], + ftrace_ool_stub_inittext[]; +extern unsigned int ftrace_ool_stub_text_end_count, ftrace_ool_stub_text_count, + ftrace_ool_stub_inittext_count; +#endif void ftrace_free_init_tramp(void); unsigned long ftrace_call_adjust(unsigned long addr); + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +/* + * When an ftrace registered caller is tracing a function that is also set by a + * register_ftrace_direct() call, it needs to be differentiated in the + * ftrace_caller trampoline so that the direct call can be invoked after the + * other ftrace ops. To do this, place the direct caller in the orig_gpr3 field + * of pt_regs. This tells ftrace_caller that there's a direct caller. + */ +static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr) +{ + struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs; + + regs->orig_gpr3 = addr; +} +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ #else static inline void ftrace_free_init_tramp(void) { } static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; } diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h index 808149f31576..acd61eb36d59 100644 --- a/arch/powerpc/include/asm/guest-state-buffer.h +++ b/arch/powerpc/include/asm/guest-state-buffer.h @@ -28,6 +28,21 @@ /* Process Table Info */ #define KVMPPC_GSID_PROCESS_TABLE 0x0006 +/* Guest Management Heap Size */ +#define KVMPPC_GSID_L0_GUEST_HEAP 0x0800 + +/* Guest Management Heap Max Size */ +#define KVMPPC_GSID_L0_GUEST_HEAP_MAX 0x0801 + +/* Guest Pagetable Size */ +#define KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE 0x0802 + +/* Guest Pagetable Max Size */ +#define KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX 0x0803 + +/* Guest Pagetable Reclaim in bytes */ +#define KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM 0x0804 + /* H_GUEST_RUN_VCPU input buffer Info */ #define KVMPPC_GSID_RUN_INPUT 0x0C00 /* H_GUEST_RUN_VCPU output buffer Info */ @@ -81,6 +96,7 @@ #define KVMPPC_GSID_HASHKEYR 0x1050 #define KVMPPC_GSID_HASHPKEYR 0x1051 #define KVMPPC_GSID_CTRL 0x1052 +#define KVMPPC_GSID_DPDES 0x1053 #define KVMPPC_GSID_CR 0x2000 #define KVMPPC_GSID_PIDR 0x2001 @@ -105,12 +121,17 @@ #define KVMPPC_GSE_GUESTWIDE_COUNT \ (KVMPPC_GSE_GUESTWIDE_END - KVMPPC_GSE_GUESTWIDE_START + 1) +#define KVMPPC_GSE_HOSTWIDE_START KVMPPC_GSID_L0_GUEST_HEAP +#define KVMPPC_GSE_HOSTWIDE_END KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM +#define KVMPPC_GSE_HOSTWIDE_COUNT \ + (KVMPPC_GSE_HOSTWIDE_END - KVMPPC_GSE_HOSTWIDE_START + 1) + #define KVMPPC_GSE_META_START KVMPPC_GSID_RUN_INPUT #define KVMPPC_GSE_META_END KVMPPC_GSID_VPA #define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1) #define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0) -#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL +#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_DPDES #define KVMPPC_GSE_DW_REGS_COUNT \ (KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1) @@ -129,7 +150,8 @@ (KVMPPC_GSE_INTR_REGS_END - KVMPPC_GSE_INTR_REGS_START + 1) #define KVMPPC_GSE_IDEN_COUNT \ - (KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \ + (KVMPPC_GSE_HOSTWIDE_COUNT + \ + KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \ KVMPPC_GSE_DW_REGS_COUNT + KVMPPC_GSE_W_REGS_COUNT + \ KVMPPC_GSE_VSRS_COUNT + KVMPPC_GSE_INTR_REGS_COUNT) @@ -138,10 +160,11 @@ */ enum { KVMPPC_GS_CLASS_GUESTWIDE = 0x01, - KVMPPC_GS_CLASS_META = 0x02, - KVMPPC_GS_CLASS_DWORD_REG = 0x04, - KVMPPC_GS_CLASS_WORD_REG = 0x08, - KVMPPC_GS_CLASS_VECTOR = 0x10, + KVMPPC_GS_CLASS_HOSTWIDE = 0x02, + KVMPPC_GS_CLASS_META = 0x04, + KVMPPC_GS_CLASS_DWORD_REG = 0x08, + KVMPPC_GS_CLASS_WORD_REG = 0x10, + KVMPPC_GS_CLASS_VECTOR = 0x18, KVMPPC_GS_CLASS_INTR = 0x20, }; @@ -163,6 +186,7 @@ enum { */ enum { KVMPPC_GS_FLAGS_WIDE = 0x01, + KVMPPC_GS_FLAGS_HOST_WIDE = 0x02, }; /** @@ -286,7 +310,7 @@ struct kvmppc_gs_msg_ops { * struct kvmppc_gs_msg - a guest state message * @bitmap: the guest state ids that should be included * @ops: modify message behavior for reading and writing to buffers - * @flags: guest wide or thread wide + * @flags: host wide, guest wide or thread wide * @data: location where buffer data will be written to or from. * * A guest state message is allows flexibility in sending in receiving data diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index ea71f7245a63..86326587e58d 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -15,6 +15,15 @@ extern bool hugetlb_disabled; +static inline bool hugepages_supported(void) +{ + if (hugetlb_disabled) + return false; + + return HPAGE_SHIFT != 0; +} +#define hugepages_supported hugepages_supported + void __init hugetlbpage_init_defaultsize(void); int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, @@ -30,14 +39,14 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, } #define is_hugepage_only_range is_hugepage_only_range -#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE -void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, - unsigned long end, unsigned long floor, - unsigned long ceiling); +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, + unsigned long sz) { return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); } @@ -47,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; + unsigned long sz = huge_page_size(hstate_vma(vma)); - pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz); flush_hugetlb_page(vma, addr); return pte; } @@ -67,14 +77,6 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma, { } -#define hugepd_shift(x) 0 -static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, - unsigned pdshift) -{ - return NULL; -} - - static inline void __init gigantic_hugetlb_cma_reserve(void) { } diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index a41e542ba94d..6df6dbbe1e7c 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -348,6 +348,7 @@ #define H_SCM_FLUSH 0x44C #define H_GET_ENERGY_SCALE_INFO 0x450 #define H_PKS_SIGNED_UPDATE 0x454 +#define H_HTM 0x458 #define H_WATCHDOG 0x45C #define H_GUEST_GET_CAPABILITIES 0x460 #define H_GUEST_SET_CAPABILITIES 0x464 @@ -489,13 +490,48 @@ #define H_RPTI_PAGE_ALL (-1UL) /* Flags for H_GUEST_{S,G}_STATE */ -#define H_GUEST_FLAGS_WIDE (1UL<<(63-0)) +#define H_GUEST_FLAGS_WIDE (1UL << (63 - 0)) +#define H_GUEST_FLAGS_HOST_WIDE (1UL << (63 - 1)) /* Flag values used for H_{S,G}SET_GUEST_CAPABILITIES */ -#define H_GUEST_CAP_COPY_MEM (1UL<<(63-0)) -#define H_GUEST_CAP_POWER9 (1UL<<(63-1)) -#define H_GUEST_CAP_POWER10 (1UL<<(63-2)) -#define H_GUEST_CAP_BITMAP2 (1UL<<(63-63)) +#define H_GUEST_CAP_COPY_MEM (1UL << (63 - 0)) +#define H_GUEST_CAP_POWER9 (1UL << (63 - 1)) +#define H_GUEST_CAP_POWER10 (1UL << (63 - 2)) +#define H_GUEST_CAP_POWER11 (1UL << (63 - 3)) +#define H_GUEST_CAP_BITMAP2 (1UL << (63 - 63)) + +/* + * Defines for H_HTM - Macros for hardware trace macro (HTM) function. + */ +#define H_HTM_FLAGS_HARDWARE_TARGET (1ul << 63) +#define H_HTM_FLAGS_LOGICAL_TARGET (1ul << 62) +#define H_HTM_FLAGS_PROCID_TARGET (1ul << 61) +#define H_HTM_FLAGS_NOWRAP (1ul << 60) + +#define H_HTM_OP_SHIFT (63-15) +#define H_HTM_OP(x) ((unsigned long)(x)<<H_HTM_OP_SHIFT) +#define H_HTM_OP_CAPABILITIES 0x01 +#define H_HTM_OP_STATUS 0x02 +#define H_HTM_OP_SETUP 0x03 +#define H_HTM_OP_CONFIGURE 0x04 +#define H_HTM_OP_START 0x05 +#define H_HTM_OP_STOP 0x06 +#define H_HTM_OP_DECONFIGURE 0x07 +#define H_HTM_OP_DUMP_DETAILS 0x08 +#define H_HTM_OP_DUMP_DATA 0x09 +#define H_HTM_OP_DUMP_SYSMEM_CONF 0x0a +#define H_HTM_OP_DUMP_SYSPROC_CONF 0x0b + +#define H_HTM_TYPE_SHIFT (63-31) +#define H_HTM_TYPE(x) ((unsigned long)(x)<<H_HTM_TYPE_SHIFT) +#define H_HTM_TYPE_NEST 0x01 +#define H_HTM_TYPE_CORE 0x02 +#define H_HTM_TYPE_LLAT 0x03 +#define H_HTM_TYPE_GLOBAL 0xff + +#define H_HTM_TARGET_NODE_INDEX(x) ((unsigned long)(x)<<(63-15)) +#define H_HTM_TARGET_NODAL_CHIP_INDEX(x) ((unsigned long)(x)<<(63-31)) +#define H_HTM_TARGET_CORE_INDEX_ON_CHIP(x) ((unsigned long)(x)<<(63-47)) #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -524,7 +560,7 @@ long plpar_hcall_norets_notrace(unsigned long opcode, ...); * Used for all but the craziest of phyp interfaces (see plpar_hcall9) */ #define PLPAR_HCALL_BUFSIZE 4 -long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...); +long plpar_hcall(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...); /** * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats @@ -538,7 +574,7 @@ long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...); * plpar_hcall, but plpar_hcall_raw works in real mode and does not * calculate hypervisor call statistics. */ -long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...); +long plpar_hcall_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...); /** * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments @@ -549,8 +585,8 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...); * PLPAR_HCALL9_BUFSIZE to size the return argument buffer. */ #define PLPAR_HCALL9_BUFSIZE 9 -long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...); -long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...); +long plpar_hcall9(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...); +long plpar_hcall9_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...); /* pseries hcall tracing */ extern struct static_key hcall_tracepoint_key; @@ -570,7 +606,7 @@ struct hvcall_mpp_data { unsigned long backing_mem; }; -int h_get_mpp(struct hvcall_mpp_data *); +long h_get_mpp(struct hvcall_mpp_data *mpp_data); struct hvcall_mpp_x_data { unsigned long coalesced_bytes; diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 317659fdeacf..569ac1165b06 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -63,7 +63,7 @@ static inline void __hard_irq_enable(void) { - if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) + if (IS_ENABLED(CONFIG_BOOKE)) wrtee(MSR_EE); else if (IS_ENABLED(CONFIG_PPC_8xx)) wrtspr(SPRN_EIE); @@ -75,7 +75,7 @@ static inline void __hard_irq_enable(void) static inline void __hard_irq_disable(void) { - if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) + if (IS_ENABLED(CONFIG_BOOKE)) wrtee(0); else if (IS_ENABLED(CONFIG_PPC_8xx)) wrtspr(SPRN_EID); @@ -87,7 +87,7 @@ static inline void __hard_irq_disable(void) static inline void __hard_EE_RI_disable(void) { - if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) + if (IS_ENABLED(CONFIG_BOOKE)) wrtee(0); else if (IS_ENABLED(CONFIG_PPC_8xx)) wrtspr(SPRN_NRI); @@ -99,7 +99,7 @@ static inline void __hard_EE_RI_disable(void) static inline void __hard_RI_enable(void) { - if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) + if (IS_ENABLED(CONFIG_BOOKE)) return; if (IS_ENABLED(CONFIG_PPC_8xx)) diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 7b610864b364..23638d4e73ac 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -177,7 +177,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) if (user_mode(regs)) { kuap_lock(); - CT_WARN_ON(ct_state() != CONTEXT_USER); + CT_WARN_ON(ct_state() != CT_STATE_USER); user_exit_irqoff(); account_cpu_user_entry(); @@ -189,8 +189,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) * so avoid recursion. */ if (TRAP(regs) != INTERRUPT_PROGRAM) - CT_WARN_ON(ct_state() != CONTEXT_KERNEL && - ct_state() != CONTEXT_IDLE); + CT_WARN_ON(ct_state() != CT_STATE_KERNEL && + ct_state() != CT_STATE_IDLE); INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs)); INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) && search_kernel_restart_table(regs->nip)); @@ -336,6 +336,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte if (IS_ENABLED(CONFIG_KASAN)) return; + /* + * Likewise, do not use it in real mode if percpu first chunk is not + * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there + * are chances where percpu allocation can come from vmalloc area. + */ + if (percpu_first_chunk_is_paged) + return; + /* Otherwise, it should be safe to call it */ nmi_enter(); } @@ -351,6 +359,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter // no nmi_exit for a pseries hash guest taking a real mode exception } else if (IS_ENABLED(CONFIG_KASAN)) { // no nmi_exit for KASAN in real mode + } else if (percpu_first_chunk_is_paged) { + // no nmi_exit if percpu first chunk is not embedded } else { nmi_exit(); } diff --git a/arch/powerpc/include/asm/io-defs.h b/arch/powerpc/include/asm/io-defs.h index faf8617cc574..5c2be9b54a9d 100644 --- a/arch/powerpc/include/asm/io-defs.h +++ b/arch/powerpc/include/asm/io-defs.h @@ -1,61 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* This file is meant to be include multiple times by other headers */ -/* last 2 argments are used by platforms/cell/io-workarounds.[ch] */ -DEF_PCI_AC_RET(readb, u8, (const PCI_IO_ADDR addr), (addr), mem, addr) -DEF_PCI_AC_RET(readw, u16, (const PCI_IO_ADDR addr), (addr), mem, addr) -DEF_PCI_AC_RET(readl, u32, (const PCI_IO_ADDR addr), (addr), mem, addr) -DEF_PCI_AC_RET(readw_be, u16, (const PCI_IO_ADDR addr), (addr), mem, addr) -DEF_PCI_AC_RET(readl_be, u32, (const PCI_IO_ADDR addr), (addr), mem, addr) -DEF_PCI_AC_NORET(writeb, (u8 val, PCI_IO_ADDR addr), (val, addr), mem, addr) -DEF_PCI_AC_NORET(writew, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr) -DEF_PCI_AC_NORET(writel, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr) -DEF_PCI_AC_NORET(writew_be, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr) -DEF_PCI_AC_NORET(writel_be, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr) - -#ifdef __powerpc64__ -DEF_PCI_AC_RET(readq, u64, (const PCI_IO_ADDR addr), (addr), mem, addr) -DEF_PCI_AC_RET(readq_be, u64, (const PCI_IO_ADDR addr), (addr), mem, addr) -DEF_PCI_AC_NORET(writeq, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr) -DEF_PCI_AC_NORET(writeq_be, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr) -#endif /* __powerpc64__ */ - -DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port), pio, port) -DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port), pio, port) -DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port), pio, port) -DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port), pio, port) -DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port), pio, port) -DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port), pio, port) - -DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c), - (a, b, c), mem, a) -DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c), - (a, b, c), mem, a) -DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c), - (a, b, c), mem, a) -DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c), - (a, b, c), mem, a) -DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c), - (a, b, c), mem, a) -DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c), - (a, b, c), mem, a) - -DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c), - (p, b, c), pio, p) -DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c), - (p, b, c), pio, p) -DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c), - (p, b, c), pio, p) -DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c), - (p, b, c), pio, p) -DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c), - (p, b, c), pio, p) -DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c), - (p, b, c), pio, p) - -DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n), - (a, c, n), mem, a) -DEF_PCI_AC_NORET(memcpy_fromio, (void *d, const PCI_IO_ADDR s, unsigned long n), - (d, s, n), mem, s) -DEF_PCI_AC_NORET(memcpy_toio, (PCI_IO_ADDR d, const void *s, unsigned long n), - (d, s, n), mem, d) +DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port)) +DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port)) +DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port)) +DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port)) +DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port)) +DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port)) +DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c), (p, b, c)) +DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c), (p, b, c)) +DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c), (p, b, c)) +DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c), (p, b, c)) +DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c), (p, b, c)) +DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c), (p, b, c)) diff --git a/arch/powerpc/include/asm/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h deleted file mode 100644 index 3cce499fbe27..000000000000 --- a/arch/powerpc/include/asm/io-workarounds.h +++ /dev/null @@ -1,55 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Support PCI IO workaround - * - * (C) Copyright 2007-2008 TOSHIBA CORPORATION - */ - -#ifndef _IO_WORKAROUNDS_H -#define _IO_WORKAROUNDS_H - -#ifdef CONFIG_PPC_IO_WORKAROUNDS -#include <linux/io.h> -#include <asm/pci-bridge.h> - -/* Bus info */ -struct iowa_bus { - struct pci_controller *phb; - struct ppc_pci_io *ops; - void *private; -}; - -void iowa_register_bus(struct pci_controller *, struct ppc_pci_io *, - int (*)(struct iowa_bus *, void *), void *); -struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR); -struct iowa_bus *iowa_pio_find_bus(unsigned long); - -extern struct ppc_pci_io spiderpci_ops; -extern int spiderpci_iowa_init(struct iowa_bus *, void *); - -#define SPIDER_PCI_REG_BASE 0xd000 -#define SPIDER_PCI_REG_SIZE 0x1000 -#define SPIDER_PCI_VCI_CNTL_STAT 0x0110 -#define SPIDER_PCI_DUMMY_READ 0x0810 -#define SPIDER_PCI_DUMMY_READ_BASE 0x0814 - -#endif - -#if defined(CONFIG_PPC_IO_WORKAROUNDS) && defined(CONFIG_PPC_INDIRECT_MMIO) -extern bool io_workaround_inited; - -static inline bool iowa_is_active(void) -{ - return unlikely(io_workaround_inited); -} -#else -static inline bool iowa_is_active(void) -{ - return false; -} -#endif - -void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, - pgprot_t prot, void *caller); - -#endif /* _IO_WORKAROUNDS_H */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 08c550ed49be..7a89754842d6 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -37,7 +37,7 @@ extern struct pci_dev *isa_bridge_pcidev; * define properly based on the platform */ #ifndef CONFIG_PCI -#define _IO_BASE 0 +#define _IO_BASE POISON_POINTER_DELTA #define _ISA_MEM_BASE 0 #define PCI_DRAM_OFFSET 0 #elif defined(CONFIG_PPC32) @@ -65,8 +65,8 @@ extern resource_size_t isa_mem_base; extern bool isa_io_special; #ifdef CONFIG_PPC32 -#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO) -#error CONFIG_PPC_INDIRECT_{PIO,MMIO} are not yet supported on 32 bits +#ifdef CONFIG_PPC_INDIRECT_PIO +#error CONFIG_PPC_INDIRECT_PIO is not yet supported on 32 bits #endif #endif @@ -80,16 +80,12 @@ extern bool isa_io_special; * * in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64 * out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64 - * _insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns + * _insb, _insw, _insl, _outsb, _outsw, _outsl * * Those operate directly on a kernel virtual address. Note that the prototype * for the out_* accessors has the arguments in opposite order from the usual * linux PCI accessors. Unlike those, they take the address first and the value * next. - * - * Note: I might drop the _ns suffix on the stream operations soon as it is - * simply normal for stream operations to not swap in the first place. - * */ /* -mprefixed can generate offsets beyond range, fall back hack */ @@ -228,19 +224,10 @@ static inline void out_be64(volatile u64 __iomem *addr, u64 val) */ extern void _insb(const volatile u8 __iomem *addr, void *buf, long count); extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count); -extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count); -extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count); -extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count); -extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count); - -/* The _ns naming is historical and will be removed. For now, just #define - * the non _ns equivalent names - */ -#define _insw _insw_ns -#define _insl _insl_ns -#define _outsw _outsw_ns -#define _outsl _outsl_ns - +extern void _insw(const volatile u16 __iomem *addr, void *buf, long count); +extern void _outsw(volatile u16 __iomem *addr, const void *buf, long count); +extern void _insl(const volatile u32 __iomem *addr, void *buf, long count); +extern void _outsl(volatile u32 __iomem *addr, const void *buf, long count); /* * memset_io, memcpy_toio, memcpy_fromio base implementations are out of line @@ -261,9 +248,9 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, * for PowerPC is as close as possible to the x86 version of these, and thus * provides fairly heavy weight barriers for the non-raw versions * - * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_MMIO - * or CONFIG_PPC_INDIRECT_PIO are set allowing the platform to provide its - * own implementation of some or all of the accessors. + * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_PIO + * is set allowing the platform to provide its own implementation of some + * of the accessors. */ /* @@ -274,116 +261,11 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, #include <asm/eeh.h> #endif -/* Shortcut to the MMIO argument pointer */ -#define PCI_IO_ADDR volatile void __iomem * - -/* Indirect IO address tokens: - * - * When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks - * on all MMIOs. (Note that this is all 64 bits only for now) - * - * To help platforms who may need to differentiate MMIO addresses in - * their hooks, a bitfield is reserved for use by the platform near the - * top of MMIO addresses (not PIO, those have to cope the hard way). - * - * The highest address in the kernel virtual space are: - * - * d0003fffffffffff # with Hash MMU - * c00fffffffffffff # with Radix MMU - * - * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits - * that can be used for the field. - * - * The direct IO mapping operations will then mask off those bits - * before doing the actual access, though that only happen when - * CONFIG_PPC_INDIRECT_MMIO is set, thus be careful when you use that - * mechanism - * - * For PIO, there is a separate CONFIG_PPC_INDIRECT_PIO which makes - * all PIO functions call through a hook. - */ - -#ifdef CONFIG_PPC_INDIRECT_MMIO -#define PCI_IO_IND_TOKEN_SHIFT 52 -#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT) -#define PCI_FIX_ADDR(addr) \ - ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) -#define PCI_GET_ADDR_TOKEN(addr) \ - (((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >> \ - PCI_IO_IND_TOKEN_SHIFT) -#define PCI_SET_ADDR_TOKEN(addr, token) \ -do { \ - unsigned long __a = (unsigned long)(addr); \ - __a &= ~PCI_IO_IND_TOKEN_MASK; \ - __a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT; \ - (addr) = (void __iomem *)__a; \ -} while(0) -#else -#define PCI_FIX_ADDR(addr) (addr) -#endif - - -/* - * Non ordered and non-swapping "raw" accessors - */ - -static inline unsigned char __raw_readb(const volatile void __iomem *addr) -{ - return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr); -} -#define __raw_readb __raw_readb - -static inline unsigned short __raw_readw(const volatile void __iomem *addr) -{ - return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr); -} -#define __raw_readw __raw_readw - -static inline unsigned int __raw_readl(const volatile void __iomem *addr) -{ - return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr); -} -#define __raw_readl __raw_readl - -static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr) -{ - *(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v; -} -#define __raw_writeb __raw_writeb - -static inline void __raw_writew(unsigned short v, volatile void __iomem *addr) -{ - *(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v; -} -#define __raw_writew __raw_writew - -static inline void __raw_writel(unsigned int v, volatile void __iomem *addr) -{ - *(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v; -} -#define __raw_writel __raw_writel +#define _IO_PORT(port) ((volatile void __iomem *)(_IO_BASE + (port))) #ifdef __powerpc64__ -static inline unsigned long __raw_readq(const volatile void __iomem *addr) -{ - return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr); -} -#define __raw_readq __raw_readq - -static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) -{ - *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v; -} -#define __raw_writeq __raw_writeq - -static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr) -{ - __raw_writeq((__force unsigned long)cpu_to_be64(v), addr); -} -#define __raw_writeq_be __raw_writeq_be - /* - * Real mode versions of the above. Those instructions are only supposed + * Real mode versions of raw accessors. Those instructions are only supposed * to be used in hypervisor real mode as per the architecture spec. */ static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr) @@ -551,30 +433,23 @@ __do_out_asm(_rec_outl, "stwbrx") * possible to hook directly at the toplevel PIO operation if they have to * be handled differently */ -#define __do_writeb(val, addr) out_8(PCI_FIX_ADDR(addr), val) -#define __do_writew(val, addr) out_le16(PCI_FIX_ADDR(addr), val) -#define __do_writel(val, addr) out_le32(PCI_FIX_ADDR(addr), val) -#define __do_writeq(val, addr) out_le64(PCI_FIX_ADDR(addr), val) -#define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val) -#define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val) -#define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val) #ifdef CONFIG_EEH -#define __do_readb(addr) eeh_readb(PCI_FIX_ADDR(addr)) -#define __do_readw(addr) eeh_readw(PCI_FIX_ADDR(addr)) -#define __do_readl(addr) eeh_readl(PCI_FIX_ADDR(addr)) -#define __do_readq(addr) eeh_readq(PCI_FIX_ADDR(addr)) -#define __do_readw_be(addr) eeh_readw_be(PCI_FIX_ADDR(addr)) -#define __do_readl_be(addr) eeh_readl_be(PCI_FIX_ADDR(addr)) -#define __do_readq_be(addr) eeh_readq_be(PCI_FIX_ADDR(addr)) +#define __do_readb(addr) eeh_readb(addr) +#define __do_readw(addr) eeh_readw(addr) +#define __do_readl(addr) eeh_readl(addr) +#define __do_readq(addr) eeh_readq(addr) +#define __do_readw_be(addr) eeh_readw_be(addr) +#define __do_readl_be(addr) eeh_readl_be(addr) +#define __do_readq_be(addr) eeh_readq_be(addr) #else /* CONFIG_EEH */ -#define __do_readb(addr) in_8(PCI_FIX_ADDR(addr)) -#define __do_readw(addr) in_le16(PCI_FIX_ADDR(addr)) -#define __do_readl(addr) in_le32(PCI_FIX_ADDR(addr)) -#define __do_readq(addr) in_le64(PCI_FIX_ADDR(addr)) -#define __do_readw_be(addr) in_be16(PCI_FIX_ADDR(addr)) -#define __do_readl_be(addr) in_be32(PCI_FIX_ADDR(addr)) -#define __do_readq_be(addr) in_be64(PCI_FIX_ADDR(addr)) +#define __do_readb(addr) in_8(addr) +#define __do_readw(addr) in_le16(addr) +#define __do_readl(addr) in_le32(addr) +#define __do_readq(addr) in_le64(addr) +#define __do_readw_be(addr) in_be16(addr) +#define __do_readl_be(addr) in_be32(addr) +#define __do_readq_be(addr) in_be64(addr) #endif /* !defined(CONFIG_EEH) */ #ifdef CONFIG_PPC32 @@ -585,64 +460,185 @@ __do_out_asm(_rec_outl, "stwbrx") #define __do_inw(port) _rec_inw(port) #define __do_inl(port) _rec_inl(port) #else /* CONFIG_PPC32 */ -#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)_IO_BASE+port); -#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)_IO_BASE+port); -#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)_IO_BASE+port); -#define __do_inb(port) readb((PCI_IO_ADDR)_IO_BASE + port); -#define __do_inw(port) readw((PCI_IO_ADDR)_IO_BASE + port); -#define __do_inl(port) readl((PCI_IO_ADDR)_IO_BASE + port); +#define __do_outb(val, port) writeb(val,_IO_PORT(port)); +#define __do_outw(val, port) writew(val,_IO_PORT(port)); +#define __do_outl(val, port) writel(val,_IO_PORT(port)); +#define __do_inb(port) readb(_IO_PORT(port)); +#define __do_inw(port) readw(_IO_PORT(port)); +#define __do_inl(port) readl(_IO_PORT(port)); #endif /* !CONFIG_PPC32 */ #ifdef CONFIG_EEH -#define __do_readsb(a, b, n) eeh_readsb(PCI_FIX_ADDR(a), (b), (n)) -#define __do_readsw(a, b, n) eeh_readsw(PCI_FIX_ADDR(a), (b), (n)) -#define __do_readsl(a, b, n) eeh_readsl(PCI_FIX_ADDR(a), (b), (n)) +#define __do_readsb(a, b, n) eeh_readsb(a, (b), (n)) +#define __do_readsw(a, b, n) eeh_readsw(a, (b), (n)) +#define __do_readsl(a, b, n) eeh_readsl(a, (b), (n)) #else /* CONFIG_EEH */ -#define __do_readsb(a, b, n) _insb(PCI_FIX_ADDR(a), (b), (n)) -#define __do_readsw(a, b, n) _insw(PCI_FIX_ADDR(a), (b), (n)) -#define __do_readsl(a, b, n) _insl(PCI_FIX_ADDR(a), (b), (n)) +#define __do_readsb(a, b, n) _insb(a, (b), (n)) +#define __do_readsw(a, b, n) _insw(a, (b), (n)) +#define __do_readsl(a, b, n) _insl(a, (b), (n)) #endif /* !CONFIG_EEH */ -#define __do_writesb(a, b, n) _outsb(PCI_FIX_ADDR(a),(b),(n)) -#define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n)) -#define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n)) - -#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) -#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) -#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) -#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) -#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) -#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) - -#define __do_memset_io(addr, c, n) \ - _memset_io(PCI_FIX_ADDR(addr), c, n) -#define __do_memcpy_toio(dst, src, n) \ - _memcpy_toio(PCI_FIX_ADDR(dst), src, n) +#define __do_writesb(a, b, n) _outsb(a, (b), (n)) +#define __do_writesw(a, b, n) _outsw(a, (b), (n)) +#define __do_writesl(a, b, n) _outsl(a, (b), (n)) + +#define __do_insb(p, b, n) readsb(_IO_PORT(p), (b), (n)) +#define __do_insw(p, b, n) readsw(_IO_PORT(p), (b), (n)) +#define __do_insl(p, b, n) readsl(_IO_PORT(p), (b), (n)) +#define __do_outsb(p, b, n) writesb(_IO_PORT(p),(b),(n)) +#define __do_outsw(p, b, n) writesw(_IO_PORT(p),(b),(n)) +#define __do_outsl(p, b, n) writesl(_IO_PORT(p),(b),(n)) #ifdef CONFIG_EEH #define __do_memcpy_fromio(dst, src, n) \ - eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n) + eeh_memcpy_fromio(dst, src, n) #else /* CONFIG_EEH */ #define __do_memcpy_fromio(dst, src, n) \ - _memcpy_fromio(dst,PCI_FIX_ADDR(src),n) + _memcpy_fromio(dst, src, n) #endif /* !CONFIG_EEH */ -#ifdef CONFIG_PPC_INDIRECT_PIO -#define DEF_PCI_HOOK_pio(x) x -#else -#define DEF_PCI_HOOK_pio(x) NULL -#endif +static inline u8 readb(const volatile void __iomem *addr) +{ + return __do_readb(addr); +} +#define readb readb + +static inline u16 readw(const volatile void __iomem *addr) +{ + return __do_readw(addr); +} +#define readw readw + +static inline u32 readl(const volatile void __iomem *addr) +{ + return __do_readl(addr); +} +#define readl readl + +static inline u16 readw_be(const volatile void __iomem *addr) +{ + return __do_readw_be(addr); +} + +static inline u32 readl_be(const volatile void __iomem *addr) +{ + return __do_readl_be(addr); +} + +static inline void writeb(u8 val, volatile void __iomem *addr) +{ + out_8(addr, val); +} +#define writeb writeb + +static inline void writew(u16 val, volatile void __iomem *addr) +{ + out_le16(addr, val); +} +#define writew writew + +static inline void writel(u32 val, volatile void __iomem *addr) +{ + out_le32(addr, val); +} +#define writel writel + +static inline void writew_be(u16 val, volatile void __iomem *addr) +{ + out_be16(addr, val); +} + +static inline void writel_be(u32 val, volatile void __iomem *addr) +{ + out_be32(addr, val); +} + +static inline void readsb(const volatile void __iomem *a, void *b, unsigned long c) +{ + __do_readsb(a, b, c); +} +#define readsb readsb + +static inline void readsw(const volatile void __iomem *a, void *b, unsigned long c) +{ + __do_readsw(a, b, c); +} +#define readsw readsw + +static inline void readsl(const volatile void __iomem *a, void *b, unsigned long c) +{ + __do_readsl(a, b, c); +} +#define readsl readsl + +static inline void writesb(volatile void __iomem *a, const void *b, unsigned long c) +{ + __do_writesb(a, b, c); +} +#define writesb writesb + +static inline void writesw(volatile void __iomem *a, const void *b, unsigned long c) +{ + __do_writesw(a, b, c); +} +#define writesw writesw + +static inline void writesl(volatile void __iomem *a, const void *b, unsigned long c) +{ + __do_writesl(a, b, c); +} +#define writesl writesl + +static inline void memset_io(volatile void __iomem *a, int c, unsigned long n) +{ + _memset_io(a, c, n); +} +#define memset_io memset_io + +static inline void memcpy_fromio(void *d, const volatile void __iomem *s, unsigned long n) +{ + __do_memcpy_fromio(d, s, n); +} +#define memcpy_fromio memcpy_fromio + +static inline void memcpy_toio(volatile void __iomem *d, const void *s, unsigned long n) +{ + _memcpy_toio(d, s, n); +} +#define memcpy_toio memcpy_toio -#ifdef CONFIG_PPC_INDIRECT_MMIO -#define DEF_PCI_HOOK_mem(x) x +#ifdef __powerpc64__ +static inline u64 readq(const volatile void __iomem *addr) +{ + return __do_readq(addr); +} + +static inline u64 readq_be(const volatile void __iomem *addr) +{ + return __do_readq_be(addr); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + out_le64(addr, val); +} + +static inline void writeq_be(u64 val, volatile void __iomem *addr) +{ + out_be64(addr, val); +} +#endif /* __powerpc64__ */ + +#ifdef CONFIG_PPC_INDIRECT_PIO +#define DEF_PCI_HOOK(x) x #else -#define DEF_PCI_HOOK_mem(x) NULL +#define DEF_PCI_HOOK(x) NULL #endif /* Structure containing all the hooks */ extern struct ppc_pci_io { -#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) ret (*name) at; -#define DEF_PCI_AC_NORET(name, at, al, space, aa) void (*name) at; +#define DEF_PCI_AC_RET(name, ret, at, al) ret (*name) at; +#define DEF_PCI_AC_NORET(name, at, al) void (*name) at; #include <asm/io-defs.h> @@ -652,18 +648,18 @@ extern struct ppc_pci_io { } ppc_pci_io; /* The inline wrappers */ -#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ +#define DEF_PCI_AC_RET(name, ret, at, al) \ static inline ret name at \ { \ - if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ + if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ return ppc_pci_io.name al; \ return __do_##name al; \ } -#define DEF_PCI_AC_NORET(name, at, al, space, aa) \ +#define DEF_PCI_AC_NORET(name, at, al) \ static inline void name at \ { \ - if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ + if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ ppc_pci_io.name al; \ else \ __do_##name al; \ @@ -674,21 +670,7 @@ static inline void name at \ #undef DEF_PCI_AC_RET #undef DEF_PCI_AC_NORET -/* Some drivers check for the presence of readq & writeq with - * a #ifdef, so we make them happy here. - */ -#define readb readb -#define readw readw -#define readl readl -#define writeb writeb -#define writew writew -#define writel writel -#define readsb readsb -#define readsw readsw -#define readsl readsl -#define writesb writesb -#define writesw writesw -#define writesl writesl +// Signal to asm-generic/io.h that we have implemented these. #define inb inb #define inw inw #define inl inl @@ -705,9 +687,6 @@ static inline void name at \ #define readq readq #define writeq writeq #endif -#define memset_io memset_io -#define memcpy_fromio memcpy_fromio -#define memcpy_toio memcpy_toio /* * We don't do relaxed operations yet, at least not with this semantic @@ -738,35 +717,11 @@ static inline unsigned int ioread32be(const void __iomem *addr) #define ioread32be ioread32be #ifdef __powerpc64__ -static inline u64 ioread64_lo_hi(const void __iomem *addr) -{ - return readq(addr); -} -#define ioread64_lo_hi ioread64_lo_hi - -static inline u64 ioread64_hi_lo(const void __iomem *addr) -{ - return readq(addr); -} -#define ioread64_hi_lo ioread64_hi_lo - static inline u64 ioread64be(const void __iomem *addr) { return readq_be(addr); } #define ioread64be ioread64be - -static inline u64 ioread64be_lo_hi(const void __iomem *addr) -{ - return readq_be(addr); -} -#define ioread64be_lo_hi ioread64be_lo_hi - -static inline u64 ioread64be_hi_lo(const void __iomem *addr) -{ - return readq_be(addr); -} -#define ioread64be_hi_lo ioread64be_hi_lo #endif /* __powerpc64__ */ static inline void iowrite16be(u16 val, void __iomem *addr) @@ -782,35 +737,11 @@ static inline void iowrite32be(u32 val, void __iomem *addr) #define iowrite32be iowrite32be #ifdef __powerpc64__ -static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) -{ - writeq(val, addr); -} -#define iowrite64_lo_hi iowrite64_lo_hi - -static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) -{ - writeq(val, addr); -} -#define iowrite64_hi_lo iowrite64_hi_lo - static inline void iowrite64be(u64 val, void __iomem *addr) { writeq_be(val, addr); } #define iowrite64be iowrite64be - -static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr) -{ - writeq_be(val, addr); -} -#define iowrite64be_lo_hi iowrite64be_lo_hi - -static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr) -{ - writeq_be(val, addr); -} -#define iowrite64be_hi_lo iowrite64be_hi_lo #endif /* __powerpc64__ */ struct pci_dev; @@ -895,7 +826,7 @@ void __iomem *ioremap_wt(phys_addr_t address, unsigned long size); void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); #define ioremap_cache(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) + ioremap_prot((addr), (size), PAGE_KERNEL) #define iounmap iounmap @@ -970,19 +901,7 @@ static inline void * phys_to_virt(unsigned long address) #define phys_to_virt phys_to_virt /* - * Change "struct page" to physical address. - */ -static inline phys_addr_t page_to_phys(struct page *page) -{ - unsigned long pfn = page_to_pfn(page); - - WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !pfn_valid(pfn)); - - return PFN_PHYS(pfn); -} - -/* - * 32 bits still uses virt_to_bus() for it's implementation of DMA + * 32 bits still uses virt_to_bus() for its implementation of DMA * mappings se we have to keep it defined here. We also have some old * drivers (shame shame shame) that use bus_to_virt() and haven't been * fixed yet so I need to define it here. @@ -1042,6 +961,14 @@ static inline void * bus_to_virt(unsigned long address) #include <asm-generic/io.h> +#ifdef __powerpc64__ +static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr) +{ + __raw_writeq((__force unsigned long)cpu_to_be64(v), addr); +} +#define __raw_writeq_be __raw_writeq_be +#endif // __powerpc64__ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_IO_H */ diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 026695943550..b410021ad4c6 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -31,6 +31,8 @@ #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" #define DMA64_PROPNAME "linux,dma64-ddr-window-info" +#define MIN_DDW_VPMEM_DMA_WINDOW SZ_2G + /* Boot time flags */ extern int iommu_is_off; extern int iommu_force_on; @@ -156,6 +158,9 @@ extern int iommu_tce_table_put(struct iommu_table *tbl); extern struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, unsigned long res_start, unsigned long res_end); bool iommu_table_in_use(struct iommu_table *tbl); +extern void iommu_table_reserve_pages(struct iommu_table *tbl, + unsigned long res_start, unsigned long res_end); +extern void iommu_table_clear(struct iommu_table *tbl); #define IOMMU_TABLE_GROUP_MAX_TABLES 2 @@ -178,9 +183,9 @@ struct iommu_table_group_ops { long (*unset_window)(struct iommu_table_group *table_group, int num); /* Switch ownership from platform code to external user (e.g. VFIO) */ - long (*take_ownership)(struct iommu_table_group *table_group); + long (*take_ownership)(struct iommu_table_group *table_group, struct device *dev); /* Switch ownership from external user (e.g. VFIO) back to core */ - void (*release_ownership)(struct iommu_table_group *table_group); + void (*release_ownership)(struct iommu_table_group *table_group, struct device *dev); }; struct iommu_table_group_link { @@ -217,8 +222,8 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, enum dma_data_direction *direction); extern void iommu_tce_kill(struct iommu_table *tbl, unsigned long entry, unsigned long pages); +int dev_has_iommu_table(struct device *dev, void *data); -extern struct iommu_table_group_ops spapr_tce_table_group_ops; #else static inline void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, @@ -231,6 +236,11 @@ static inline int iommu_add_device(struct iommu_table_group *table_group, { return 0; } + +static inline int dev_has_iommu_table(struct device *dev, void *data) +{ + return 0; +} #endif /* !CONFIG_IOMMU_API */ u64 dma_iommu_get_required_mask(struct device *dev); @@ -307,12 +317,6 @@ extern void iommu_flush_tce(struct iommu_table *tbl); extern enum dma_data_direction iommu_tce_direction(unsigned long tce); extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir); -#ifdef CONFIG_PPC_CELL_NATIVE -extern bool iommu_fixed_is_weak; -#else -#define iommu_fixed_is_weak false -#endif - extern const struct dma_map_ops dma_iommu_ops; #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index ba1a5974e714..aa3751960ffd 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -33,7 +33,7 @@ extern int distribute_irqs; struct pt_regs; -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE /* * Per-cpu stacks for handling critical, debug and machine check * level interrupts. diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index 365d2720097c..b5bbb94c51f6 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -19,7 +19,7 @@ #define KASAN_SHADOW_SCALE_SHIFT 3 -#if defined(CONFIG_MODULES) && defined(CONFIG_PPC32) +#if defined(CONFIG_EXECMEM) && defined(CONFIG_PPC32) #define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M) #else #define KASAN_KERN_START PAGE_OFFSET diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index e1b43aa12175..70f2f0517509 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -55,59 +55,17 @@ typedef void (*crash_shutdown_t)(void); #ifdef CONFIG_KEXEC_CORE - -/* - * This function is responsible for capturing register states if coming - * via panic or invoking dump using sysrq-trigger. - */ -static inline void crash_setup_regs(struct pt_regs *newregs, - struct pt_regs *oldregs) -{ - if (oldregs) - memcpy(newregs, oldregs, sizeof(*newregs)); - else - ppc_save_regs(newregs); -} +struct kimage; +struct pt_regs; extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ -extern int crashing_cpu; -extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); -extern void crash_ipi_callback(struct pt_regs *); -extern int crash_wake_offline; - -struct kimage; -struct pt_regs; extern void default_machine_kexec(struct kimage *image); -extern void default_machine_crash_shutdown(struct pt_regs *regs); -extern int crash_shutdown_register(crash_shutdown_t handler); -extern int crash_shutdown_unregister(crash_shutdown_t handler); - -extern void crash_kexec_prepare(void); -extern void crash_kexec_secondary(struct pt_regs *regs); -int __init overlaps_crashkernel(unsigned long start, unsigned long size); -extern void reserve_crashkernel(void); -extern void machine_kexec_mask_interrupts(void); - -static inline bool kdump_in_progress(void) -{ - return crashing_cpu >= 0; -} void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer, unsigned long start_address) __noreturn; - void kexec_copy_flush(struct kimage *image); -#if defined(CONFIG_CRASH_DUMP) -bool is_kdump_kernel(void); -#define is_kdump_kernel is_kdump_kernel -#if defined(CONFIG_PPC_RTAS) -void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); -#define crash_free_reserved_phys_range crash_free_reserved_phys_range -#endif /* CONFIG_PPC_RTAS */ -#endif /* CONFIG_CRASH_DUMP */ - #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_elf64_ops; @@ -136,31 +94,83 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long int arch_kimage_file_post_load_cleanup(struct kimage *image); #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup -int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); -#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole +int arch_check_excluded_range(struct kimage *image, unsigned long start, + unsigned long end); +#define arch_check_excluded_range arch_check_excluded_range + int load_crashdump_segments_ppc64(struct kimage *image, struct kexec_buf *kbuf); int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); -unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image); -int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, - unsigned long initrd_load_addr, - unsigned long initrd_len, const char *cmdline); +unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image, struct crash_mem *rmem); +int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem *rmem); #endif /* CONFIG_PPC64 */ #endif /* CONFIG_KEXEC_FILE */ -#else /* !CONFIG_KEXEC_CORE */ -static inline void crash_kexec_secondary(struct pt_regs *regs) { } +#endif /* CONFIG_KEXEC_CORE */ -static inline int overlaps_crashkernel(unsigned long start, unsigned long size) +#ifdef CONFIG_CRASH_RESERVE +int __init overlaps_crashkernel(unsigned long start, unsigned long size); +extern void arch_reserve_crashkernel(void); +#else +static inline void arch_reserve_crashkernel(void) {} +static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; } +#endif + +#if defined(CONFIG_CRASH_DUMP) +/* + * This function is responsible for capturing register states if coming + * via panic or invoking dump using sysrq-trigger. + */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) { - return 0; + if (oldregs) + memcpy(newregs, oldregs, sizeof(*newregs)); + else + ppc_save_regs(newregs); +} + +#ifdef CONFIG_CRASH_HOTPLUG +void arch_crash_handle_hotplug_event(struct kimage *image, void *arg); +#define arch_crash_handle_hotplug_event arch_crash_handle_hotplug_event + +int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags); +#define arch_crash_hotplug_support arch_crash_hotplug_support + +unsigned int arch_crash_get_elfcorehdr_size(void); +#define crash_get_elfcorehdr_size arch_crash_get_elfcorehdr_size +#endif /* CONFIG_CRASH_HOTPLUG */ + +extern int crashing_cpu; +extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); +extern void crash_ipi_callback(struct pt_regs *regs); +extern int crash_wake_offline; + +extern int crash_shutdown_register(crash_shutdown_t handler); +extern int crash_shutdown_unregister(crash_shutdown_t handler); +extern void default_machine_crash_shutdown(struct pt_regs *regs); + +extern void crash_kexec_prepare(void); +extern void crash_kexec_secondary(struct pt_regs *regs); + +static inline bool kdump_in_progress(void) +{ + return crashing_cpu >= 0; } -static inline void reserve_crashkernel(void) { ; } +bool is_kdump_kernel(void); +#define is_kdump_kernel is_kdump_kernel +#if defined(CONFIG_PPC_RTAS) +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range +#endif /* CONFIG_PPC_RTAS */ + +#else /* !CONFIG_CRASH_DUMP */ +static inline void crash_kexec_secondary(struct pt_regs *regs) { } static inline int crash_shutdown_register(crash_shutdown_t handler) { @@ -183,7 +193,11 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) { } -#endif /* CONFIG_KEXEC_CORE */ +#endif /* CONFIG_CRASH_DUMP */ + +#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP) +int update_cpus_node(void *fdt); +#endif #ifdef CONFIG_PPC_BOOK3S_64 #include <asm/book3s/64/kexec.h> diff --git a/arch/powerpc/include/asm/kexec_ranges.h b/arch/powerpc/include/asm/kexec_ranges.h index f83866a19e87..14055896cbcb 100644 --- a/arch/powerpc/include/asm/kexec_ranges.h +++ b/arch/powerpc/include/asm/kexec_ranges.h @@ -7,19 +7,9 @@ void sort_memory_ranges(struct crash_mem *mrngs, bool merge); struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges); int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size); -int add_tce_mem_ranges(struct crash_mem **mem_ranges); -int add_initrd_mem_range(struct crash_mem **mem_ranges); -#ifdef CONFIG_PPC_64S_HASH_MMU -int add_htab_mem_range(struct crash_mem **mem_ranges); -#else -static inline int add_htab_mem_range(struct crash_mem **mem_ranges) -{ - return 0; -} -#endif -int add_kernel_mem_range(struct crash_mem **mem_ranges); -int add_rtas_mem_range(struct crash_mem **mem_ranges); -int add_opal_mem_range(struct crash_mem **mem_ranges); -int add_reserved_mem_ranges(struct crash_mem **mem_ranges); - +int remove_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size); +int get_exclude_memory_ranges(struct crash_mem **mem_ranges); +int get_reserved_memory_ranges(struct crash_mem **mem_ranges); +int get_crash_memory_ranges(struct crash_mem **mem_ranges); +int get_usable_memory_ranges(struct crash_mem **mem_ranges); #endif /* _ASM_POWERPC_KEXEC_RANGES_H */ diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h index 424ceef82ae6..1f7cab58ab2c 100644 --- a/arch/powerpc/include/asm/kfence.h +++ b/arch/powerpc/include/asm/kfence.h @@ -15,9 +15,22 @@ #define ARCH_FUNC_PREFIX "." #endif +extern bool kfence_early_init; +extern bool kfence_disabled; + +static inline void disable_kfence(void) +{ + kfence_disabled = true; +} + static inline bool arch_kfence_init_pool(void) { - return true; + return !kfence_disabled; +} + +static inline bool kfence_early_init_enabled(void) +{ + return IS_ENABLED(CONFIG_KFENCE) && kfence_early_init; } #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 4525a9c68260..dfe2e5ad3b21 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -21,7 +21,7 @@ #include <linux/percpu.h> #include <linux/module.h> #include <asm/probes.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #ifdef CONFIG_KPROBES #define __ARCH_WANT_KPROBES_INSN_SLOT diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index ad7e8c5aec3f..2bb03d941e3e 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -20,7 +20,7 @@ static __always_inline bool kuap_is_disabled(void); #include <asm/nohash/32/kup-8xx.h> #endif -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE #include <asm/nohash/kup-booke.h> #endif diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 3e1e2a698c9e..e1ff291ba891 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -203,7 +203,7 @@ extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, - bool writing, bool kvm_ro, + bool writing, pte_t *inserted_pte, unsigned int *levelp); extern int kvmppc_init_vm_radix(struct kvm *kvm); extern void kvmppc_free_radix(struct kvm *kvm); @@ -235,7 +235,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu); extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, - bool writing, bool *writable); + bool writing, bool *writable, struct page **page); extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, unsigned long *rmap, long pte_index, int realmode); extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, @@ -594,6 +594,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB) +KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES) KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR) KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR) KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index d8729ec81ca0..b936e174eefd 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -684,6 +684,17 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1); int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu); int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa); +int kvmhv_counters_tracepoint_regfunc(void); +void kvmhv_counters_tracepoint_unregfunc(void); +int kvmhv_get_l2_counters_status(void); +void kvmhv_set_l2_counters_status(int cpu, bool status); +u64 kvmhv_get_l1_to_l2_cs_time(void); +u64 kvmhv_get_l2_to_l1_cs_time(void); +u64 kvmhv_get_l2_runtime_agg(void); +u64 kvmhv_get_l1_to_l2_cs_time_vcpu(void); +u64 kvmhv_get_l2_to_l1_cs_time_vcpu(void); +u64 kvmhv_get_l2_runtime_agg_vcpu(void); + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 8abac532146e..2d139c807577 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -599,6 +599,9 @@ struct kvm_vcpu_arch { ulong dawrx0; ulong dawr1; ulong dawrx1; + ulong dexcr; + ulong hashkeyr; + ulong hashpkeyr; ulong ciabr; ulong cfar; ulong ppr; @@ -868,6 +871,11 @@ struct kvm_vcpu_arch { struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */ #endif #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + u64 l1_to_l2_cs; + u64 l2_to_l1_cs; + u64 l2_runtime_agg; +#endif }; #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET] @@ -894,10 +902,8 @@ struct kvm_vcpu_arch { #define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_CREATE_DEVICE -static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 3281215097cc..ca3829d47ab7 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -287,7 +287,6 @@ struct kvmppc_ops { bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); - bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); void (*free_memslot)(struct kvm_memory_slot *slot); int (*init_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm); diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 61ec2447dabf..f40a646bee3c 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -62,7 +62,8 @@ struct lppaca { u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */ u8 fpregs_in_use; u8 pmcregs_in_use; - u8 reserved8[28]; + u8 l2_counters_enable; /* Enable usage of counters for KVM guest */ + u8 reserved8[27]; __be64 wait_state_cycles; /* Wait cycles for this proc */ u8 reserved9[28]; __be16 slb_count; /* # of SLBs to maintain */ @@ -92,9 +93,13 @@ struct lppaca { /* cacheline 4-5 */ __be32 page_ins; /* CMO Hint - # page ins by OS */ - u8 reserved12[148]; + u8 reserved12[28]; + volatile __be64 l1_to_l2_cs_tb; + volatile __be64 l2_to_l1_cs_tb; + volatile __be64 l2_runtime_tb; + u8 reserved13[96]; volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ - u8 reserved13[96]; + u8 reserved14[96]; } ____cacheline_aligned; #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 1862f94335ee..3298eec123a3 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -4,20 +4,24 @@ #ifdef __KERNEL__ #include <linux/compiler.h> -#include <linux/seq_file.h> #include <linux/init.h> -#include <linux/dma-mapping.h> #include <linux/export.h> +#include <linux/time64.h> + +#include <asm/page.h> struct pt_regs; struct pci_bus; +struct device; struct device_node; struct iommu_table; struct rtc_time; struct file; +struct pci_dev; struct pci_controller; struct kimage; struct pci_host_bridge; +struct seq_file; struct machdep_calls { const char *name; diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 17a77d47ed6d..42a51a993d94 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -6,7 +6,7 @@ #include <uapi/asm/mman.h> -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC64) && !defined(BUILD_VDSO) #include <asm/cputable.h> #include <linux/mm.h> diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 3b72c7ed24cf..4182d68d9cd1 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -16,7 +16,6 @@ */ #define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001) #define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002) -#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) #define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) #define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) #define MMU_FTR_TYPE_47x ASM_CONST(0x00000020) @@ -153,9 +152,6 @@ enum { #ifdef CONFIG_PPC_8xx MMU_FTR_TYPE_8xx | #endif -#ifdef CONFIG_40x - MMU_FTR_TYPE_40x | -#endif #ifdef CONFIG_PPC_47x MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL | #elif defined(CONFIG_44x) @@ -202,9 +198,6 @@ enum { #ifdef CONFIG_PPC_8xx #define MMU_FTRS_ALWAYS MMU_FTR_TYPE_8xx #endif -#ifdef CONFIG_40x -#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_40x -#endif #ifdef CONFIG_PPC_47x #define MMU_FTRS_ALWAYS MMU_FTR_TYPE_47x #elif defined(CONFIG_44x) @@ -246,12 +239,11 @@ static __always_inline bool mmu_has_feature(unsigned long feature) { int i; -#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); -#endif + BUILD_BUG_ON(__builtin_popcountl(feature) > 1); #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG - if (!static_key_initialized) { + if (!static_key_feature_checks_initialized) { printk("Warning! mmu_has_feature() used prior to jump label init!\n"); dump_stack(); return early_mmu_has_feature(feature); @@ -406,9 +398,5 @@ extern void *abatron_pteptrs[2]; #include <asm/nohash/mmu.h> #endif -#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP) -#define __HAVE_ARCH_RESERVED_KERNEL_PAGES -#endif - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MMU_H_ */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 37bffa0f7918..a157ab513347 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -116,9 +116,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) } #endif -extern int use_cop(unsigned long acop, struct mm_struct *mm); -extern void drop_cop(unsigned long acop, struct mm_struct *mm); - #ifdef CONFIG_PPC_BOOK3S_64 static inline void inc_mm_active_cpus(struct mm_struct *mm) { @@ -260,15 +257,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, extern void arch_exit_mmap(struct mm_struct *mm); -static inline void arch_unmap(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - unsigned long vdso_base = (unsigned long)mm->context.vdso; - - if (start <= vdso_base && vdso_base < end) - mm->context.vdso = NULL; -} - #ifdef CONFIG_PPC_MEM_KEYS bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign); diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h index da827d2d0866..049152f8d597 100644 --- a/arch/powerpc/include/asm/mmzone.h +++ b/arch/powerpc/include/asm/mmzone.h @@ -20,12 +20,6 @@ #ifdef CONFIG_NUMA -extern struct pglist_data *node_data[]; -/* - * Return a pointer to the node data for node n. - */ -#define NODE_DATA(nid) (node_data[nid]) - /* * Following are specific to this numa platform. */ @@ -35,6 +29,7 @@ extern cpumask_var_t node_to_cpumask_map[]; #ifdef CONFIG_MEMORY_HOTPLUG extern unsigned long max_pfn; u64 memory_hotplug_max(void); +u64 hot_add_drconf_memory_max(void); #else #define memory_hotplug_max() memblock_end_of_DRAM() #endif diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index a8e2e8339fb7..e1ee5026ac4a 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -35,9 +35,11 @@ struct mod_arch_specific { bool toc_fixed; /* Have we fixed up .TOC.? */ #endif +#ifdef CONFIG_PPC64_ELF_ABI_V1 /* For module function descriptor dereference */ unsigned long start_opd; unsigned long end_opd; +#endif #else /* powerpc64 */ /* Indices of PLT sections within module. */ unsigned int core_plt_section; @@ -47,12 +49,12 @@ struct mod_arch_specific { #ifdef CONFIG_DYNAMIC_FTRACE unsigned long tramp; unsigned long tramp_regs; +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + struct ftrace_ool_stub *ool_stubs; + unsigned int ool_stub_count; + unsigned int ool_stub_index; +#endif #endif - - /* List of BUG addresses, source line numbers and filenames */ - struct list_head bug_list; - struct bug_entry *bug_table; - unsigned int num_bugs; }; /* diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h index 92df40c6cc6b..014799557f60 100644 --- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -4,42 +4,12 @@ #define PAGE_SHIFT_8M 23 -static inline pte_t *hugepd_page(hugepd_t hpd) -{ - BUG_ON(!hugepd_ok(hpd)); - - return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK); -} - -static inline unsigned int hugepd_shift(hugepd_t hpd) -{ - return PAGE_SHIFT_8M; -} - -static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, - unsigned int pdshift) -{ - unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT; - - return hugepd_page(hpd) + idx; -} - static inline void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { flush_tlb_page(vma, vmaddr); } -static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) -{ - *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M); -} - -static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift) -{ - *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M); -} - static inline int check_and_get_huge_psize(int shift) { return shift_to_mmu_psize(shift); @@ -49,6 +19,14 @@ static inline int check_and_get_huge_psize(int shift) void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz); +#define __HAVE_ARCH_HUGE_PTEP_GET +static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + if (ptep_is_8m_pmdp(mm, addr, ptep)) + ptep = pte_offset_kernel((pmd_t *)ptep, ALIGN_DOWN(addr, SZ_8M)); + return ptep_get(ptep); +} + #define __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h deleted file mode 100644 index 8a8f13a22cf4..000000000000 --- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_MMU_40X_H_ -#define _ASM_POWERPC_MMU_40X_H_ - -/* - * PPC40x support - */ - -#define PPC40X_TLB_SIZE 64 - -/* - * TLB entries are defined by a "high" tag portion and a "low" data - * portion. On all architectures, the data portion is 32-bits. - * - * TLB entries are managed entirely under software control by reading, - * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx - * instructions. - */ - -#define TLB_LO 1 -#define TLB_HI 0 - -#define TLB_DATA TLB_LO -#define TLB_TAG TLB_HI - -/* Tag portion */ - -#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */ -#define TLB_PAGESZ_MASK 0x00000380 -#define TLB_PAGESZ(x) (((x) & 0x7) << 7) -#define PAGESZ_1K 0 -#define PAGESZ_4K 1 -#define PAGESZ_16K 2 -#define PAGESZ_64K 3 -#define PAGESZ_256K 4 -#define PAGESZ_1M 5 -#define PAGESZ_4M 6 -#define PAGESZ_16M 7 -#define TLB_VALID 0x00000040 /* Entry is valid */ - -/* Data portion */ - -#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */ -#define TLB_PERM_MASK 0x00000300 -#define TLB_EX 0x00000200 /* Instruction execution allowed */ -#define TLB_WR 0x00000100 /* Writes permitted */ -#define TLB_ZSEL_MASK 0x000000F0 -#define TLB_ZSEL(x) (((x) & 0xF) << 4) -#define TLB_ATTR_MASK 0x0000000F -#define TLB_W 0x00000008 /* Caching is write-through */ -#define TLB_I 0x00000004 /* Caching is inhibited */ -#define TLB_M 0x00000002 /* Memory is coherent */ -#define TLB_G 0x00000001 /* Memory is guarded from prefetch */ - -#ifndef __ASSEMBLY__ - -typedef struct { - unsigned int id; - unsigned int active; - void __user *vdso; -} mm_context_t; - -#endif /* !__ASSEMBLY__ */ - -#define mmu_virtual_psize MMU_PAGE_4K -#define mmu_linear_psize MMU_PAGE_256M - -#endif /* _ASM_POWERPC_MMU_40X_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 141d82e249a8..2986f9ba40b8 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -170,8 +170,9 @@ #define mmu_linear_psize MMU_PAGE_8M -#define MODULES_VADDR (PAGE_OFFSET - SZ_256M) #define MODULES_END PAGE_OFFSET +#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) +#define MODULES_VADDR (MODULES_END - MODULES_SIZE) #ifndef __ASSEMBLY__ @@ -189,19 +190,14 @@ typedef struct { #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) -/* Page size definitions, common between 32 and 64-bit +/* + * Page size definitions for 8xx * * shift : is the "PAGE_SHIFT" value for that page size - * penc : is the pte encoding mask * */ struct mmu_psize_def { unsigned int shift; /* number of bits */ - unsigned int enc; /* PTE encoding */ - unsigned int ind; /* Corresponding indirect page size shift */ - unsigned int flags; -#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ -#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ }; extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9164a9e41b02..b481738c4bb5 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -52,7 +52,7 @@ #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) #define pgd_ERROR(e) \ - pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + pr_err("%s:%d: bad pgd %08llx.\n", __FILE__, __LINE__, (unsigned long long)pgd_val(e)) /* * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary @@ -118,9 +118,7 @@ * (hardware-defined) PowerPC PTE as closely as possible. */ -#if defined(CONFIG_40x) -#include <asm/nohash/32/pte-40x.h> -#elif defined(CONFIG_44x) +#if defined(CONFIG_44x) #include <asm/nohash/32/pte-44x.h> #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) #include <asm/nohash/pte-e500.h> @@ -172,7 +170,7 @@ static inline void pmd_clear(pmd_t *pmdp) #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) #else #define pmd_page_vaddr(pmd) \ - ((const void *)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) + ((const void *)((unsigned long)pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT) #endif diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h deleted file mode 100644 index d759cfd74754..000000000000 --- a/arch/powerpc/include/asm/nohash/32/pte-40x.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H -#define _ASM_POWERPC_NOHASH_32_PTE_40x_H -#ifdef __KERNEL__ - -/* - * At present, all PowerPC 400-class processors share a similar TLB - * architecture. The instruction and data sides share a unified, - * 64-entry, fully-associative TLB which is maintained totally under - * software control. In addition, the instruction side has a - * hardware-managed, 4-entry, fully-associative TLB which serves as a - * first level to the shared TLB. These two TLBs are known as the UTLB - * and ITLB, respectively (see "mmu.h" for definitions). - * - * There are several potential gotchas here. The 40x hardware TLBLO - * field looks like this: - * - * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - * RPN..................... 0 0 EX WR ZSEL....... W I M G - * - * Where possible we make the Linux PTE bits match up with this - * - * - bits 20 and 21 must be cleared, because we use 4k pages (40x can - * support down to 1k pages), this is done in the TLBMiss exception - * handler. - * - We use only zones 0 (for kernel pages) and 1 (for user pages) - * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB - * miss handler. Bit 27 is PAGE_USER, thus selecting the correct - * zone. - * - PRESENT *must* be in the bottom two bits because swap PTEs - * use the top 30 bits. Because 40x doesn't support SMP anyway, M is - * irrelevant so we borrow it for PAGE_PRESENT. Bit 30 - * is cleared in the TLB miss handler before the TLB entry is loaded. - * - All other bits of the PTE are loaded into TLBLO without - * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for - * software PTE bits. We actually use bits 21, 24, 25, and - * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and - * PRESENT. - */ - -#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ -#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ -#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ -#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ -#define _PAGE_READ 0x010 /* software: read permission */ -#define _PAGE_SPECIAL 0x020 /* software: Special page */ -#define _PAGE_DIRTY 0x080 /* software: dirty page */ -#define _PAGE_WRITE 0x100 /* hardware: WR, anded with dirty in exception */ -#define _PAGE_EXEC 0x200 /* hardware: EX permission */ -#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ - -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - -/* cache related flags non existing on 40x */ -#define _PAGE_COHERENT 0 - -#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ -#define _PMD_PRESENT_MASK _PMD_PRESENT -#define _PMD_BAD 0x802 -#define _PMD_SIZE_4M 0x0c0 -#define _PMD_SIZE_16M 0x0e0 -#define _PMD_USER 0 - -#define _PTE_NONE_MASK 0 - -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) -#define _PAGE_BASE (_PAGE_BASE_NC) - -#include <asm/pgtable-masks.h> - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h index 851813725237..da0469928273 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-44x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h @@ -75,9 +75,6 @@ #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - /* TODO: Add large page lowmem mapping support */ #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) diff --git a/arch/powerpc/include/asm/nohash/32/pte-85xx.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h index 653a342d3b25..14d64b4f3f14 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-85xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h @@ -31,9 +31,6 @@ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ #define _PAGE_SPECIAL 0x00800 /* S: Special page */ -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index 137dc3c84e45..54ebb91dbdcf 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -74,12 +74,11 @@ #define _PTE_NONE_MASK 0 #ifdef CONFIG_PPC_16K_PAGES -#define _PAGE_PSIZE _PAGE_SPS +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SPS) #else -#define _PAGE_PSIZE 0 +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) #endif -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) #define _PAGE_BASE (_PAGE_BASE_NC) #include <asm/pgtable-masks.h> @@ -120,7 +119,7 @@ static inline pte_t pte_mkhuge(pte_t pte) #define pte_mkhuge pte_mkhuge -static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long clr, unsigned long set, int huge); static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -142,19 +141,12 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *pt } #define __ptep_set_access_flags __ptep_set_access_flags -static inline unsigned long pgd_leaf_size(pgd_t pgd) -{ - if (pgd_val(pgd) & _PMD_PAGE_8M) - return SZ_8M; - return SZ_4M; -} - -#define pgd_leaf_size pgd_leaf_size - -static inline unsigned long pte_leaf_size(pte_t pte) +static inline unsigned long __pte_leaf_size(pmd_t pmd, pte_t pte) { pte_basic_t val = pte_val(pte); + if (pmd_val(pmd) & _PMD_PAGE_8M) + return SZ_8M; if (val & _PAGE_HUGE) return SZ_512K; if (val & _PAGE_SPS) @@ -162,31 +154,38 @@ static inline unsigned long pte_leaf_size(pte_t pte) return SZ_4K; } -#define pte_leaf_size pte_leaf_size +#define __pte_leaf_size __pte_leaf_size /* * On the 8xx, the page tables are a bit special. For 16k pages, we have * 4 identical entries. For 512k pages, we have 128 entries as if it was * 4k pages, but they are flagged as 512k pages for the hardware. - * For other page sizes, we have a single entry in the table. + * For 8M pages, we have 1024 entries as if it was 4M pages (PMD_SIZE) + * but they are flagged as 8M pages for the hardware. + * For 4k pages, we have a single entry in the table. */ static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr); -static int hugepd_ok(hugepd_t hpd); +static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address); + +static inline bool ptep_is_8m_pmdp(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + return (pmd_t *)ptep == pmd_off(mm, ALIGN_DOWN(addr, SZ_8M)); +} static inline int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) { if (!huge) return PAGE_SIZE / SZ_4K; - else if (hugepd_ok(*((hugepd_t *)pmd))) - return 1; + else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M) + return SZ_4M / SZ_4K; else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE)) return SZ_16K / SZ_4K; else return SZ_512K / SZ_4K; } -static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, - unsigned long clr, unsigned long set, int huge) +static inline pte_basic_t __pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge) { pte_basic_t *entry = (pte_basic_t *)p; pte_basic_t old = pte_val(*p); @@ -198,7 +197,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) { *entry++ = new; - if (IS_ENABLED(CONFIG_PPC_16K_PAGES) && num != 1) { + if (IS_ENABLED(CONFIG_PPC_16K_PAGES)) { *entry++ = new; *entry++ = new; *entry++ = new; @@ -208,6 +207,21 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p return old; } +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + unsigned long clr, unsigned long set, int huge) +{ + pte_basic_t old; + + if (huge && ptep_is_8m_pmdp(mm, addr, ptep)) { + pmd_t *pmdp = (pmd_t *)ptep; + + old = __pte_update(mm, addr, pte_offset_kernel(pmdp, 0), clr, set, huge); + __pte_update(mm, addr, pte_offset_kernel(pmdp + 1, 0), clr, set, huge); + } else { + old = __pte_update(mm, addr, ptep, clr, set, huge); + } + return old; +} #define pte_update pte_update #ifdef CONFIG_PPC_16K_PAGES diff --git a/arch/powerpc/include/asm/nohash/hugetlb-e500.h b/arch/powerpc/include/asm/nohash/hugetlb-e500.h index 8f04ad20e040..cab0e1f1eea0 100644 --- a/arch/powerpc/include/asm/nohash/hugetlb-e500.h +++ b/arch/powerpc/include/asm/nohash/hugetlb-e500.h @@ -2,38 +2,8 @@ #ifndef _ASM_POWERPC_NOHASH_HUGETLB_E500_H #define _ASM_POWERPC_NOHASH_HUGETLB_E500_H -static inline pte_t *hugepd_page(hugepd_t hpd) -{ - if (WARN_ON(!hugepd_ok(hpd))) - return NULL; - - return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE); -} - -static inline unsigned int hugepd_shift(hugepd_t hpd) -{ - return hpd_val(hpd) & HUGEPD_SHIFT_MASK; -} - -static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, - unsigned int pdshift) -{ - /* - * On FSL BookE, we have multiple higher-level table entries that - * point to the same hugepte. Just use the first one since they're all - * identical. So for that case, idx=0. - */ - return hugepd_page(hpd); -} - void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) -{ - /* We use the old format for PPC_E500 */ - *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift); -} - static inline int check_and_get_huge_psize(int shift) { if (shift & 1) /* Not a power of 4 */ @@ -42,4 +12,13 @@ static inline int check_and_get_huge_psize(int shift) return shift_to_mmu_psize(shift); } +static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + unsigned int tsize = shift - _PAGE_PSIZE_SHIFT_OFFSET; + pte_basic_t val = (tsize << _PAGE_PSIZE_SHIFT) & _PAGE_PSIZE_MSK; + + return __pte((pte_val(entry) & ~(pte_basic_t)_PAGE_PSIZE_MSK) | val); +} +#define arch_make_huge_pte arch_make_huge_pte + #endif /* _ASM_POWERPC_NOHASH_HUGETLB_E500_H */ diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h index 6ddced0415cb..b281d9eeaf1e 100644 --- a/arch/powerpc/include/asm/nohash/mmu-e500.h +++ b/arch/powerpc/include/asm/nohash/mmu-e500.h @@ -244,14 +244,11 @@ typedef struct { /* Page size definitions, common between 32 and 64-bit * * shift : is the "PAGE_SHIFT" value for that page size - * penc : is the pte encoding mask * */ struct mmu_psize_def { unsigned int shift; /* number of bits */ - unsigned int enc; /* PTE encoding */ - unsigned int ind; /* Corresponding indirect page size shift */ unsigned int flags; #define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ #define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ @@ -303,8 +300,7 @@ extern unsigned long linear_map_top; extern int book3e_htw_mode; #define PPC_HTW_NONE 0 -#define PPC_HTW_IBM 1 -#define PPC_HTW_E6500 2 +#define PPC_HTW_E6500 1 /* * 64-bit booke platforms don't load the tlb in the tlb miss handler code. diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h index e264be219fdb..4cc795044103 100644 --- a/arch/powerpc/include/asm/nohash/mmu.h +++ b/arch/powerpc/include/asm/nohash/mmu.h @@ -2,10 +2,7 @@ #ifndef _ASM_POWERPC_NOHASH_MMU_H_ #define _ASM_POWERPC_NOHASH_MMU_H_ -#if defined(CONFIG_40x) -/* 40x-style software loaded TLB */ -#include <asm/nohash/32/mmu-40x.h> -#elif defined(CONFIG_44x) +#if defined(CONFIG_44x) /* 44x-style software loaded TLB */ #include <asm/nohash/32/mmu-44x.h> #elif defined(CONFIG_PPC_E500) diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h index 4b62376318e1..bb5f3e8ea912 100644 --- a/arch/powerpc/include/asm/nohash/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/pgalloc.h @@ -19,8 +19,14 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb, static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), pgtable_gfp_flags(mm, GFP_KERNEL)); + +#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603) + memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, + (MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); +#endif + return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) @@ -44,8 +50,6 @@ static inline void pgtable_free(void *table, int shift) } } -#define get_hugepd_cache_index(x) (x) - static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { unsigned long pgf = (unsigned long)table; diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 427db14292c9..7d6b9e5b286e 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -31,6 +31,13 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p extern int icache_44x_need_flush; +#ifndef pte_huge_size +static inline unsigned long pte_huge_size(pte_t pte) +{ + return PAGE_SIZE; +} +#endif + /* * PTE updates. This function is called whenever an existing * valid PTE is updated. This does -not- include set_pte_at() @@ -52,11 +59,34 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p { pte_basic_t old = pte_val(*p); pte_basic_t new = (old & ~(pte_basic_t)clr) | set; + unsigned long sz; + unsigned long pdsize; + int i; if (new == old) return old; - *p = __pte(new); + if (huge) + sz = pte_huge_size(__pte(old)); + else + sz = PAGE_SIZE; + + if (sz < PMD_SIZE) + pdsize = PAGE_SIZE; + else if (sz < PUD_SIZE) + pdsize = PMD_SIZE; + else if (sz < P4D_SIZE) + pdsize = PUD_SIZE; + else if (sz < PGDIR_SIZE) + pdsize = P4D_SIZE; + else + pdsize = PGDIR_SIZE; + + for (i = 0; i < sz / pdsize; i++, p++) { + *p = __pte(new); + if (new) + new += (unsigned long long)(pdsize / PAGE_SIZE) << PTE_RPN_SHIFT; + } if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC)) icache_44x_need_flush = 1; @@ -256,7 +286,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); } -static inline int pte_swp_exclusive(pte_t pte) +static inline bool pte_swp_exclusive(pte_t pte) { return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; } @@ -340,30 +370,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, #define pgprot_writecombine pgprot_noncached_wc -#ifdef CONFIG_HUGETLB_PAGE -static inline int hugepd_ok(hugepd_t hpd) -{ -#ifdef CONFIG_PPC_8xx - return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M); -#else - /* We clear the top bit to indicate hugepd */ - return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); -#endif -} - -static inline int pmd_huge(pmd_t pmd) -{ - return 0; -} - -static inline int pud_huge(pud_t pud) -{ - return 0; -} - -#define is_hugepd(hpd) (hugepd_ok(hpd)) -#endif - int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); void unmap_kernel_page(unsigned long va); diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h index f516f0b5b7a8..cb78392494da 100644 --- a/arch/powerpc/include/asm/nohash/pte-e500.h +++ b/arch/powerpc/include/asm/nohash/pte-e500.h @@ -19,20 +19,7 @@ #define _PAGE_BAP_SX 0x000040 #define _PAGE_BAP_UX 0x000080 #define _PAGE_PSIZE_MSK 0x000f00 -#define _PAGE_PSIZE_4K 0x000200 -#define _PAGE_PSIZE_8K 0x000300 -#define _PAGE_PSIZE_16K 0x000400 -#define _PAGE_PSIZE_32K 0x000500 -#define _PAGE_PSIZE_64K 0x000600 -#define _PAGE_PSIZE_128K 0x000700 -#define _PAGE_PSIZE_256K 0x000800 -#define _PAGE_PSIZE_512K 0x000900 -#define _PAGE_PSIZE_1M 0x000a00 -#define _PAGE_PSIZE_2M 0x000b00 -#define _PAGE_PSIZE_4M 0x000c00 -#define _PAGE_PSIZE_8M 0x000d00 -#define _PAGE_PSIZE_16M 0x000e00 -#define _PAGE_PSIZE_32M 0x000f00 +#define _PAGE_TSIZE_4K 0x000100 #define _PAGE_DIRTY 0x001000 /* C: page changed */ #define _PAGE_SW0 0x002000 #define _PAGE_U3 0x004000 @@ -46,6 +33,9 @@ #define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ #define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */ +#define _PAGE_PSIZE_SHIFT 7 +#define _PAGE_PSIZE_SHIFT_OFFSET 10 + /* "Higher level" linux bit combinations */ #define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */ #define _PAGE_READ (_PAGE_BAP_SR | _PAGE_BAP_UR) /* User read permission */ @@ -65,8 +55,6 @@ #define _PAGE_SPECIAL _PAGE_SW0 -/* Base page size */ -#define _PAGE_PSIZE _PAGE_PSIZE_4K #define PTE_RPN_SHIFT (24) #define PTE_WIMGE_SHIFT (19) @@ -89,7 +77,7 @@ * pages. We always set _PAGE_COHERENT when SMP is enabled or * the processor might need it for DMA coherency. */ -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_TSIZE_4K) #if defined(CONFIG_SMP) #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) #else @@ -105,6 +93,47 @@ static inline pte_t pte_mkexec(pte_t pte) } #define pte_mkexec pte_mkexec +static inline unsigned long pte_huge_size(pte_t pte) +{ + pte_basic_t val = pte_val(pte); + + return 1UL << (((val & _PAGE_PSIZE_MSK) >> _PAGE_PSIZE_SHIFT) + _PAGE_PSIZE_SHIFT_OFFSET); +} +#define pte_huge_size pte_huge_size + +static inline int pmd_leaf(pmd_t pmd) +{ + if (IS_ENABLED(CONFIG_PPC64)) + return (long)pmd_val(pmd) > 0; + else + return pmd_val(pmd) & _PAGE_PSIZE_MSK; +} +#define pmd_leaf pmd_leaf + +static inline unsigned long pmd_leaf_size(pmd_t pmd) +{ + return pte_huge_size(__pte(pmd_val(pmd))); +} +#define pmd_leaf_size pmd_leaf_size + +#ifdef CONFIG_PPC64 +static inline int pud_leaf(pud_t pud) +{ + if (IS_ENABLED(CONFIG_PPC64)) + return (long)pud_val(pud) > 0; + else + return pud_val(pud) & _PAGE_PSIZE_MSK; +} +#define pud_leaf pud_leaf + +static inline unsigned long pud_leaf_size(pud_t pud) +{ + return pte_huge_size(__pte(pud_val(pud))); +} +#define pud_leaf_size pud_leaf_size + +#endif + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index a2bc4b95e703..8c9d4b26bf57 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -1027,10 +1027,10 @@ struct opal_i2c_request { * The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX * with individual elements being 16 bits wide to fetch the system * wide EPOW status. Each element in the buffer will contain the - * EPOW status in it's bit representation for a particular EPOW sub + * EPOW status in its bit representation for a particular EPOW sub * class as defined here. So multiple detailed EPOW status bits * specific for any sub class can be represented in a single buffer - * element as it's bit representation. + * element as its bit representation. */ /* System EPOW type */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index e411e5a70ea3..af9a2628d1df 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -21,8 +21,7 @@ * page size. When using 64K pages however, whether we are really supporting * 64K pages in HW or not is irrelevant to those definitions. */ -#define PAGE_SHIFT CONFIG_PAGE_SHIFT -#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) +#include <vdso/page.h> #ifndef __ASSEMBLY__ #ifndef CONFIG_HUGETLB_PAGE @@ -42,13 +41,6 @@ extern unsigned int hpage_shift; #endif /* - * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we - * assign PAGE_MASK to a larger type it gets extended the way we want - * (i.e. with 1s in the high bits) - */ -#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) - -/* * KERNELBASE is the virtual address of the start of the kernel, it's often * the same as PAGE_OFFSET, but _might not be_. * @@ -269,38 +261,6 @@ static inline const void *pfn_to_kaddr(unsigned long pfn) #define is_kernel_addr(x) ((x) >= TASK_SIZE) #endif -#ifndef CONFIG_PPC_BOOK3S_64 -/* - * Use the top bit of the higher-level page table entries to indicate whether - * the entries we point to contain hugepages. This works because we know that - * the page tables live in kernel space. If we ever decide to support having - * page tables at arbitrary addresses, this breaks and will have to change. - */ -#ifdef CONFIG_PPC64 -#define PD_HUGE 0x8000000000000000UL -#else -#define PD_HUGE 0x80000000 -#endif - -#else /* CONFIG_PPC_BOOK3S_64 */ -/* - * Book3S 64 stores real addresses in the hugepd entries to - * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE. - */ -#define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK) -#endif /* CONFIG_PPC_BOOK3S_64 */ - -/* - * Some number of bits at the level of the page table that points to - * a hugepte are used to encode the size. This masks those bits. - * On 8xx, HW assistance requires 4k alignment for the hugepte. - */ -#ifdef CONFIG_PPC_8xx -#define HUGEPD_SHIFT_MASK 0xfff -#else -#define HUGEPD_SHIFT_MASK 0x3f -#endif - #ifndef __ASSEMBLY__ #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index 8e5b7d0b851c..ecf5ac70cfae 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -15,6 +15,16 @@ #endif /* CONFIG_SMP */ #endif /* __powerpc64__ */ +#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP) +#include <linux/jump_label.h> +DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged); + +#define percpu_first_chunk_is_paged \ + (static_key_enabled(&__percpu_first_chunk_is_paged.key)) +#else +#define percpu_first_chunk_is_paged false +#endif + #include <asm-generic/percpu.h> #include <asm/paca.h> diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index e2221d29fdf9..af0f46e2373b 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -89,7 +89,8 @@ struct power_pmu { #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ #define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */ #define PPMU_P10_DD1 0x00000400 /* Is power10 DD1 processor version */ -#define PPMU_HAS_ATTR_CONFIG1 0x00000800 /* Using config1 attribute */ +#define PPMU_P10 0x00000800 /* For power10 pmu */ +#define PPMU_HAS_ATTR_CONFIG1 0x00001000 /* Using config1 attribute */ /* * Values for flags to get_alternatives() @@ -101,8 +102,8 @@ struct power_pmu { int __init register_power_pmu(struct power_pmu *pmu); struct pt_regs; -extern unsigned long perf_misc_flags(struct pt_regs *regs); -extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); +extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); extern unsigned long int read_bhrb(int n); /* @@ -110,7 +111,7 @@ extern unsigned long int read_bhrb(int n); * if we have hardware PMU support. */ #ifdef CONFIG_PPC_PERF_CTRS -#define perf_misc_flags(regs) perf_misc_flags(regs) +#define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs) #endif /* diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h index 82633200b500..6bd8f89b25dc 100644 --- a/arch/powerpc/include/asm/pgtable-be-types.h +++ b/arch/powerpc/include/asm/pgtable-be-types.h @@ -101,14 +101,4 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new) return pmd_raw(old) == prev; } -#ifdef CONFIG_ARCH_HAS_HUGEPD -typedef struct { __be64 pdbe; } hugepd_t; -#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) }) - -static inline unsigned long hpd_val(hugepd_t x) -{ - return be64_to_cpu(x.pdbe); -} -#endif - #endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */ diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h index 082c85cc09b1..f3086e39e7d2 100644 --- a/arch/powerpc/include/asm/pgtable-types.h +++ b/arch/powerpc/include/asm/pgtable-types.h @@ -49,12 +49,22 @@ static inline unsigned long pud_val(pud_t x) #endif /* CONFIG_PPC64 */ /* PGD level */ +#if defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) +typedef struct { unsigned long long pgd; } pgd_t; + +static inline unsigned long long pgd_val(pgd_t x) +{ + return x.pgd; +} +#else typedef struct { unsigned long pgd; } pgd_t; -#define __pgd(x) ((pgd_t) { (x) }) + static inline unsigned long pgd_val(pgd_t x) { return x.pgd; } +#endif +#define __pgd(x) ((pgd_t) { (x) }) /* Page protection bits */ typedef struct { unsigned long pgprot; } pgprot_t; @@ -83,13 +93,4 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) } #endif -#ifdef CONFIG_ARCH_HAS_HUGEPD -typedef struct { unsigned long pd; } hugepd_t; -#define __hugepd(x) ((hugepd_t) { (x) }) -static inline unsigned long hpd_val(hugepd_t x) -{ - return x.pd; -} -#endif - #endif /* _ASM_POWERPC_PGTABLE_TYPES_H */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 239709a2f68e..93d77ad5a92f 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -53,9 +53,8 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, #define MAX_PTRS_PER_PGD PTRS_PER_PGD #endif -/* Keep these as a macros to avoid include dependency mess */ +/* Keep this as a macro to avoid include dependency mess */ #define pte_page(x) pfn_to_page(pte_pfn(x)) -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) static inline unsigned long pte_pfn(pte_t pte) { @@ -65,6 +64,7 @@ static inline unsigned long pte_pfn(pte_t pte) /* * Select all bits except the pfn */ +#define pte_pgprot pte_pgprot static inline pgprot_t pte_pgprot(pte_t pte) { unsigned long pte_flags; @@ -106,6 +106,9 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr); void pgtable_cache_add(unsigned int shift); +#ifdef CONFIG_PPC32 +void __init *early_alloc_pgtable(unsigned long size); +#endif pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va); #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index b3ee44a40c2f..f2b6cc4341bb 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -18,16 +18,6 @@ static inline long poll_pending(void) return plpar_hcall_norets(H_POLL_PENDING); } -static inline u8 get_cede_latency_hint(void) -{ - return get_lppaca()->cede_latency_hint; -} - -static inline void set_cede_latency_hint(u8 latency_hint) -{ - get_lppaca()->cede_latency_hint = latency_hint; -} - static inline long cede_processor(void) { /* @@ -37,24 +27,6 @@ static inline long cede_processor(void) return plpar_hcall_norets_notrace(H_CEDE); } -static inline long extended_cede_processor(unsigned long latency_hint) -{ - long rc; - u8 old_latency_hint = get_cede_latency_hint(); - - set_cede_latency_hint(latency_hint); - - rc = cede_processor(); - - /* Ensure that H_CEDE returns with IRQs on */ - if (WARN_ON(IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && !(mfmsr() & MSR_EE))) - __hard_irq_enable(); - - set_cede_latency_hint(old_latency_hint); - - return rc; -} - static inline long vpa_call(unsigned long flags, unsigned long cpu, unsigned long vpa) { @@ -93,6 +65,35 @@ static inline long register_dtl(unsigned long cpu, unsigned long vpa) return vpa_call(H_VPA_REG_DTL, cpu, vpa); } +/* + * Invokes H_HTM hcall with parameters passed from htm_hcall_wrapper. + * flags: Set to hardwareTarget. + * target: Specifies target using node index, nodal chip index and core index. + * operation : action to perform ie configure, start, stop, deconfigure, trace + * based on the HTM type. + * param1, param2, param3: parameters for each action. + */ +static inline long htm_call(unsigned long flags, unsigned long target, + unsigned long operation, unsigned long param1, + unsigned long param2, unsigned long param3) +{ + return plpar_hcall_norets(H_HTM, flags, target, operation, + param1, param2, param3); +} + +static inline long htm_hcall_wrapper(unsigned long flags, unsigned long nodeindex, + unsigned long nodalchipindex, unsigned long coreindexonchip, + unsigned long type, unsigned long htm_op, unsigned long param1, unsigned long param2, + unsigned long param3) +{ + return htm_call(H_HTM_FLAGS_HARDWARE_TARGET | flags, + H_HTM_TARGET_NODE_INDEX(nodeindex) | + H_HTM_TARGET_NODAL_CHIP_INDEX(nodalchipindex) | + H_HTM_TARGET_CORE_INDEX_ON_CHIP(coreindexonchip), + H_HTM_OP(htm_op) | H_HTM_TYPE(type), + param1, param2, param3); +} + extern void vpa_init(int cpu); static inline long plpar_pte_enter(unsigned long flags, diff --git a/arch/powerpc/include/asm/plpks.h b/arch/powerpc/include/asm/plpks.h index 23b77027c916..7a84069759b0 100644 --- a/arch/powerpc/include/asm/plpks.h +++ b/arch/powerpc/include/asm/plpks.h @@ -44,9 +44,8 @@ #define PLPKS_MAX_DATA_SIZE 4000 // Timeouts for PLPKS operations -#define PLPKS_MAX_TIMEOUT 5000 // msec -#define PLPKS_FLUSH_SLEEP 10 // msec -#define PLPKS_FLUSH_SLEEP_RANGE 400 +#define PLPKS_MAX_TIMEOUT (5 * USEC_PER_SEC) +#define PLPKS_FLUSH_SLEEP 10000 // usec struct plpks_var { char *component; diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h index 2495866f2e97..420e2878ae67 100644 --- a/arch/powerpc/include/asm/pmac_feature.h +++ b/arch/powerpc/include/asm/pmac_feature.h @@ -192,7 +192,7 @@ static inline long pmac_call_feature(int selector, struct device_node* node, /* PMAC_FTR_BMAC_ENABLE (struct device_node* node, 0, int value) * enable/disable the bmac (ethernet) cell of a mac-io ASIC, also drive - * it's reset line + * its reset line */ #define PMAC_FTR_BMAC_ENABLE PMAC_FTR_DEF(6) diff --git a/arch/powerpc/include/asm/pmi.h b/arch/powerpc/include/asm/pmi.h deleted file mode 100644 index 478f0a2fe7f4..000000000000 --- a/arch/powerpc/include/asm/pmi.h +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _POWERPC_PMI_H -#define _POWERPC_PMI_H - -/* - * Definitions for talking with PMI device on PowerPC - * - * PMI (Platform Management Interrupt) is a way to communicate - * with the BMC (Baseboard Management Controller) via interrupts. - * Unlike IPMI it is bidirectional and has a low latency. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#ifdef __KERNEL__ - -#define PMI_TYPE_FREQ_CHANGE 0x01 -#define PMI_TYPE_POWER_BUTTON 0x02 -#define PMI_READ_TYPE 0 -#define PMI_READ_DATA0 1 -#define PMI_READ_DATA1 2 -#define PMI_READ_DATA2 3 -#define PMI_WRITE_TYPE 4 -#define PMI_WRITE_DATA0 5 -#define PMI_WRITE_DATA1 6 -#define PMI_WRITE_DATA2 7 - -#define PMI_ACK 0x80 - -#define PMI_TIMEOUT 100 - -typedef struct { - u8 type; - u8 data0; - u8 data1; - u8 data2; -} pmi_message_t; - -struct pmi_handler { - struct list_head node; - u8 type; - void (*handle_pmi_message) (pmi_message_t); -}; - -int pmi_register_handler(struct pmi_handler *); -void pmi_unregister_handler(struct pmi_handler *); - -int pmi_send_message(pmi_message_t); - -#endif /* __KERNEL__ */ -#endif /* _POWERPC_PMI_H */ diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h index 8afc92860dbb..7e9a479951a3 100644 --- a/arch/powerpc/include/asm/pnv-pci.h +++ b/arch/powerpc/include/asm/pnv-pci.h @@ -10,7 +10,6 @@ #include <linux/pci_hotplug.h> #include <linux/irq.h> #include <linux/of.h> -#include <misc/cxl-base.h> #include <asm/opal-api.h> #define PCI_SLOT_ID_PREFIX (1UL << 63) @@ -25,25 +24,9 @@ extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state); extern int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg); -extern int pnv_pci_set_tunnel_bar(struct pci_dev *dev, uint64_t addr, - int enable); -int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode); -int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, - unsigned int virq); -int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num); -void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num); -int pnv_cxl_get_irq_count(struct pci_dev *dev); -struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev); int64_t pnv_opal_pci_msi_eoi(struct irq_data *d); bool is_pnv_opal_msi(struct irq_chip *chip); -#ifdef CONFIG_CXL_BASE -int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs, - struct pci_dev *dev, int num); -void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs, - struct pci_dev *dev); -#endif - struct pnv_php_slot { struct hotplug_slot slot; uint64_t id; diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 005601243dda..4312bcb913a4 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -471,6 +471,7 @@ #define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \ (0x10000006 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) #define PPC_RAW_LD(r, base, i) (0xe8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i)) +#define PPC_RAW_LWA(r, base, i) (0xe8000002 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i)) #define PPC_RAW_LWZ(r, base, i) (0x80000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LWZX(t, a, b) (0x7c00002e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_STD(r, base, i) (0xf8000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i)) @@ -510,6 +511,7 @@ #define PPC_RAW_STB(r, base, i) (0x98000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LBZ(r, base, i) (0x88000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_LHA(r, base, i) (0xa8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) @@ -532,7 +534,9 @@ #define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) +#define PPC_RAW_DIVW(d, a, b) (0x7c0003d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_DIVD(d, a, b) (0x7c0003d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_DIVDE(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_DIVDE_DOT(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) @@ -550,6 +554,8 @@ #define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b)) #define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) #define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i)) +#define PPC_RAW_EXTSB(d, a) (0x7c000774 | ___PPC_RA(d) | ___PPC_RS(a)) +#define PPC_RAW_EXTSH(d, a) (0x7c000734 | ___PPC_RA(d) | ___PPC_RS(a)) #define PPC_RAW_EXTSW(d, a) (0x7c0007b4 | ___PPC_RA(d) | ___PPC_RS(a)) #define PPC_RAW_SLW(d, a, s) (0x7c000030 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) #define PPC_RAW_SLD(d, a, s) (0x7c000036 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s)) @@ -581,12 +587,26 @@ #define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr)) #define PPC_RAW_EIEIO() (0x7c0006ac) +/* bcl 20,31,$+4 */ +#define PPC_RAW_BCL4() (0x429f0005) #define PPC_RAW_BRANCH(offset) (0x48000000 | PPC_LI(offset)) #define PPC_RAW_BL(offset) (0x48000001 | PPC_LI(offset)) #define PPC_RAW_TW(t0, a, b) (0x7c000008 | ___PPC_RS(t0) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_TRAP() PPC_RAW_TW(31, 0, 0) #define PPC_RAW_SETB(t, bfa) (0x7c000100 | ___PPC_RT(t) | ___PPC_RA((bfa) << 2)) +#ifdef CONFIG_PPC32 +#define PPC_RAW_STL PPC_RAW_STW +#define PPC_RAW_STLU PPC_RAW_STWU +#define PPC_RAW_LL PPC_RAW_LWZ +#define PPC_RAW_CMPLI PPC_RAW_CMPWI +#else +#define PPC_RAW_STL PPC_RAW_STD +#define PPC_RAW_STLU PPC_RAW_STDU +#define PPC_RAW_LL PPC_RAW_LD +#define PPC_RAW_CMPLI PPC_RAW_CMPDI +#endif + /* Deal with instructions that older assemblers aren't aware of */ #define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH) #define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 1d1018c1e482..b891910fce8a 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -183,7 +183,7 @@ /* * Used to name C functions called from asm */ -#ifdef CONFIG_PPC_KERNEL_PCREL +#if defined(__powerpc64__) && defined(CONFIG_PPC_KERNEL_PCREL) #define CFUNC(name) name@notoc #else #define CFUNC(name) name @@ -482,7 +482,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) * and they must be used. */ -#if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx) +#if !defined(CONFIG_44x) && !defined(CONFIG_PPC_8xx) #define tlbia \ li r4,1024; \ mtctr r4; \ diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h new file mode 100644 index 000000000000..000e2b9681f3 --- /dev/null +++ b/arch/powerpc/include/asm/preempt.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_POWERPC_PREEMPT_H +#define __ASM_POWERPC_PREEMPT_H + +#include <asm-generic/preempt.h> + +#if defined(CONFIG_PREEMPT_DYNAMIC) +#include <linux/jump_label.h> +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +#define need_irq_preemption() \ + (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) +#else +#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION)) +#endif + +#endif /* __ASM_POWERPC_PREEMPT_H */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index b2c51d337e60..6b94de17201c 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -159,7 +159,7 @@ struct thread_struct { unsigned long sr0; #endif #endif /* CONFIG_PPC32 */ -#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP) +#if defined(CONFIG_BOOKE) && defined(CONFIG_PPC_KUAP) unsigned long pid; /* value written in PID reg. at interrupt exit */ #endif /* Debug Registers */ @@ -260,7 +260,8 @@ struct thread_struct { unsigned long sier2; unsigned long sier3; unsigned long hashkeyr; - + unsigned long dexcr; + unsigned long dexcr_onexec; /* Reset value to load on exec */ #endif }; @@ -333,6 +334,16 @@ extern int set_endian(struct task_struct *tsk, unsigned int val); extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); +#ifdef CONFIG_PPC_BOOK3S_64 + +#define PPC_GET_DEXCR_ASPECT(tsk, asp) get_dexcr_prctl((tsk), (asp)) +#define PPC_SET_DEXCR_ASPECT(tsk, asp, val) set_dexcr_prctl((tsk), (asp), (val)) + +int get_dexcr_prctl(struct task_struct *tsk, unsigned long asp); +int set_dexcr_prctl(struct task_struct *tsk, unsigned long asp, unsigned long val); + +#endif + extern void load_fp_state(struct thread_fp_state *fp); extern void store_fp_state(struct thread_fp_state *fp); extern void load_vr_state(struct thread_vr_state *vr); diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index c0107d8ddd8c..f679a11a7e7f 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -17,6 +17,8 @@ struct device_node; struct property; +#define MIN_RMA 768 /* Minimum RMA (in MB) for CAS negotiation */ + #define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ #define OF_DT_END_NODE 0x2 /* End node */ #define OF_DT_PROP 0x3 /* Property: name off, size, diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index d13d8fdc3411..987e23a2bd28 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -390,11 +390,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev); int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv); void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv); -static inline struct ps3_system_bus_driver *ps3_drv_to_system_bus_drv( - struct device_driver *_drv) -{ - return container_of(_drv, struct ps3_system_bus_driver, core); -} +#define ps3_drv_to_system_bus_drv(_drv) container_of_const(_drv, struct ps3_system_bus_driver, core) static inline struct ps3_system_bus_device *ps3_dev_to_system_bus_dev( const struct device *_dev) { diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index ea8f91fbc62f..7b9350756875 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -310,7 +310,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) static inline bool cpu_has_msr_ri(void) { - return !IS_ENABLED(CONFIG_BOOKE_OR_40x); + return !IS_ENABLED(CONFIG_BOOKE); } static inline bool regs_is_unrecoverable(struct pt_regs *regs) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d3d1aea009b4..0228c90bbcc7 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -18,7 +18,7 @@ #include <asm/feature-fixups.h> /* Pickup Book E specific registers. */ -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE #include <asm/reg_booke.h> #endif @@ -233,14 +233,10 @@ /* Special Purpose Registers (SPRNs)*/ -#ifdef CONFIG_40x -#define SPRN_PID 0x3B1 /* Process ID */ -#else #define SPRN_PID 0x030 /* Process ID */ #ifdef CONFIG_BOOKE #define SPRN_PID0 SPRN_PID/* Process ID Register 0 */ #endif -#endif #define SPRN_CTR 0x009 /* Count Register */ #define SPRN_DSCR 0x11 @@ -527,7 +523,7 @@ #define SPRN_TSCR 0x399 /* Thread Switch Control Register */ #define SPRN_DEC 0x016 /* Decrement Register */ -#define SPRN_PIT 0x3DB /* Programmable Interval Timer (40x/BOOKE) */ +#define SPRN_PIT 0x3DB /* Programmable Interval Timer (BOOKE) */ #define SPRN_DER 0x095 /* Debug Enable Register */ #define DER_RSTE 0x40000000 /* Reset Interrupt */ @@ -615,7 +611,7 @@ #define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ #define HID1_PS (1<<16) /* 750FX PLL selection */ #endif -#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ +#define SPRN_HID2_750FX 0x3F8 /* IBM 750FX HID2 Register */ #define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */ #define SPRN_HID2_G2_LE 0x3F3 /* G2_LE HID2 Register */ #define HID2_G2_LE_HBE (1<<18) /* High BAT Enable (G2_LE) */ @@ -1116,15 +1112,6 @@ * - SPRG2 indicator that we are in RTAS * - SPRG4 (603 only) pseudo TLB LRU data * - * 32-bit 40x: - * - SPRG0 scratch for exception vectors - * - SPRG1 scratch for exception vectors - * - SPRG2 scratch for exception vectors - * - SPRG4 scratch for exception vectors (not 403) - * - SPRG5 scratch for exception vectors (not 403) - * - SPRG6 scratch for exception vectors (not 403) - * - SPRG7 scratch for exception vectors (not 403) - * * 32-bit 440 and FSL BookE: * - SPRG0 scratch for exception vectors * - SPRG1 scratch for exception vectors (*) @@ -1216,16 +1203,6 @@ #define SPRN_SPRG_603_LRU SPRN_SPRG4 #endif -#ifdef CONFIG_40x -#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 -#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 -#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2 -#define SPRN_SPRG_SCRATCH3 SPRN_SPRG4 -#define SPRN_SPRG_SCRATCH4 SPRN_SPRG5 -#define SPRN_SPRG_SCRATCH5 SPRN_SPRG6 -#define SPRN_SPRG_SCRATCH6 SPRN_SPRG7 -#endif - #ifdef CONFIG_BOOKE #define SPRN_SPRG_RSCRATCH0 SPRN_SPRG0 #define SPRN_SPRG_WSCRATCH0 SPRN_SPRG0 diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index af56980b6cdb..656bfaf91526 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -1,10 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Contains register definitions common to the Book E PowerPC - * specification. Notice that while the IBM-40x series of CPUs - * are not true Book E PowerPCs, they borrowed a number of features - * before Book E was finalized, and are included here as well. Unfortunately, - * they sometimes used different locations than true Book E CPUs did. + * specification. * * Copyright 2009-2010 Freescale Semiconductor, Inc. */ @@ -42,9 +39,6 @@ #define MSR_KERNEL (MSR_ | MSR_64BIT) #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) #define MSR_USER64 (MSR_USER32 | MSR_64BIT) -#elif defined (CONFIG_40x) -#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) -#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) #else #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_CE) #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) @@ -157,7 +151,6 @@ #define SPRN_TLB3CFG 0x2B3 /* TLB 3 Config Register */ #define SPRN_EPR 0x2BE /* External Proxy Register */ #define SPRN_CCR1 0x378 /* Core Configuration Register 1 */ -#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */ #define SPRN_MAS7 0x3B0 /* MMU Assist Register 7 */ #define SPRN_MMUCR 0x3B2 /* MMU Control Register */ #define SPRN_CCR0 0x3B3 /* Core Configuration Register 0 */ @@ -166,7 +159,6 @@ #define SPRN_SGR 0x3B9 /* Storage Guarded Register */ #define SPRN_DCWR 0x3BA /* Data Cache Write-thru Register */ #define SPRN_SLER 0x3BB /* Little-endian real mode */ -#define SPRN_SU0R 0x3BC /* "User 0" real mode (40x) */ #define SPRN_DCMP 0x3D1 /* Data TLB Compare Register */ #define SPRN_ICDBDR 0x3D3 /* Instruction Cache Debug Data Register */ #define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */ @@ -183,10 +175,8 @@ #define SPRN_SVR 0x3FF /* System Version Register */ /* - * SPRs which have conflicting definitions on true Book E versus classic, - * or IBM 40x. + * SPRs which have conflicting definitions on true Book E versus classic. */ -#ifdef CONFIG_BOOKE #define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */ #define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */ #define SPRN_DEAR 0x03D /* Data Error Address Register */ @@ -201,22 +191,6 @@ #define SPRN_DAC2 0x13D /* Data Address Compare 2 */ #define SPRN_TSR 0x150 /* Timer Status Register */ #define SPRN_TCR 0x154 /* Timer Control Register */ -#endif /* Book E */ -#ifdef CONFIG_40x -#define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */ -#define SPRN_ESR 0x3D4 /* Exception Syndrome Register */ -#define SPRN_DEAR 0x3D5 /* Data Error Address Register */ -#define SPRN_TSR 0x3D8 /* Timer Status Register */ -#define SPRN_TCR 0x3DA /* Timer Control Register */ -#define SPRN_SRR2 0x3DE /* Save/Restore Register 2 */ -#define SPRN_SRR3 0x3DF /* Save/Restore Register 3 */ -#define SPRN_DBSR 0x3F0 /* Debug Status Register */ -#define SPRN_DBCR0 0x3F2 /* Debug Control Register 0 */ -#define SPRN_DAC1 0x3F6 /* Data Address Compare 1 */ -#define SPRN_DAC2 0x3F7 /* Data Address Compare 2 */ -#define SPRN_CSRR0 SPRN_SRR2 /* Critical Save and Restore Register 0 */ -#define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */ -#endif #define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */ /* Bit definitions for CCR1. */ @@ -296,10 +270,6 @@ #endif /* Bit definitions for the DBSR. */ -/* - * DBSR bits which have conflicting definitions on true Book E versus IBM 40x. - */ -#ifdef CONFIG_BOOKE #define DBSR_IDE 0x80000000 /* Imprecise Debug Event */ #define DBSR_MRR 0x30000000 /* Most Recent Reset */ #define DBSR_IC 0x08000000 /* Instruction Completion */ @@ -319,21 +289,6 @@ #define DBSR_CRET 0x00000020 /* Critical Return Debug Event */ #define DBSR_IAC12ATS 0x00000002 /* Instr Address Compare 1/2 Toggle */ #define DBSR_IAC34ATS 0x00000001 /* Instr Address Compare 3/4 Toggle */ -#endif -#ifdef CONFIG_40x -#define DBSR_IC 0x80000000 /* Instruction Completion */ -#define DBSR_BT 0x40000000 /* Branch taken */ -#define DBSR_IRPT 0x20000000 /* Exception Debug Event */ -#define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */ -#define DBSR_IAC1 0x04000000 /* Instruction Address Compare 1 Event */ -#define DBSR_IAC2 0x02000000 /* Instruction Address Compare 2 Event */ -#define DBSR_IAC3 0x00080000 /* Instruction Address Compare 3 Event */ -#define DBSR_IAC4 0x00040000 /* Instruction Address Compare 4 Event */ -#define DBSR_DAC1R 0x01000000 /* Data Address Compare 1 Read Event */ -#define DBSR_DAC1W 0x00800000 /* Data Address Compare 1 Write Event */ -#define DBSR_DAC2R 0x00400000 /* Data Address Compare 2 Read Event */ -#define DBSR_DAC2W 0x00200000 /* Data Address Compare 2 Write Event */ -#endif /* Bit definitions related to the ESR. */ #define ESR_MCI 0x80000000 /* Machine Check - Instruction */ @@ -355,69 +310,6 @@ #define ESR_SPV 0x00000080 /* Signal Processing operation */ /* Bit definitions related to the DBCR0. */ -#if defined(CONFIG_40x) -#define DBCR0_EDM 0x80000000 /* External Debug Mode */ -#define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ -#define DBCR0_RST 0x30000000 /* all the bits in the RST field */ -#define DBCR0_RST_SYSTEM 0x30000000 /* System Reset */ -#define DBCR0_RST_CHIP 0x20000000 /* Chip Reset */ -#define DBCR0_RST_CORE 0x10000000 /* Core Reset */ -#define DBCR0_RST_NONE 0x00000000 /* No Reset */ -#define DBCR0_IC 0x08000000 /* Instruction Completion */ -#define DBCR0_ICMP DBCR0_IC -#define DBCR0_BT 0x04000000 /* Branch Taken */ -#define DBCR0_BRT DBCR0_BT -#define DBCR0_EDE 0x02000000 /* Exception Debug Event */ -#define DBCR0_IRPT DBCR0_EDE -#define DBCR0_TDE 0x01000000 /* TRAP Debug Event */ -#define DBCR0_IA1 0x00800000 /* Instr Addr compare 1 enable */ -#define DBCR0_IAC1 DBCR0_IA1 -#define DBCR0_IA2 0x00400000 /* Instr Addr compare 2 enable */ -#define DBCR0_IAC2 DBCR0_IA2 -#define DBCR0_IA12 0x00200000 /* Instr Addr 1-2 range enable */ -#define DBCR0_IA12X 0x00100000 /* Instr Addr 1-2 range eXclusive */ -#define DBCR0_IA3 0x00080000 /* Instr Addr compare 3 enable */ -#define DBCR0_IAC3 DBCR0_IA3 -#define DBCR0_IA4 0x00040000 /* Instr Addr compare 4 enable */ -#define DBCR0_IAC4 DBCR0_IA4 -#define DBCR0_IA34 0x00020000 /* Instr Addr 3-4 range Enable */ -#define DBCR0_IA34X 0x00010000 /* Instr Addr 3-4 range eXclusive */ -#define DBCR0_IA12T 0x00008000 /* Instr Addr 1-2 range Toggle */ -#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ -#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ - -#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0) -#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */ -#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */ -#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */ -#define DBCR_IAC34I DBCR0_IA34 /* Range Inclusive */ -#define DBCR_IAC34X (DBCR0_IA34 | DBCR0_IA34X) /* Range Exclusive */ -#define DBCR_IAC34MODE (DBCR0_IA34 | DBCR0_IA34X) /* IAC 3-4 Mode Bits */ - -/* Bit definitions related to the DBCR1. */ -#define DBCR1_DAC1R 0x80000000 /* DAC1 Read Debug Event */ -#define DBCR1_DAC2R 0x40000000 /* DAC2 Read Debug Event */ -#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */ -#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */ - -#define dbcr_dac(task) ((task)->thread.debug.dbcr1) -#define DBCR_DAC1R DBCR1_DAC1R -#define DBCR_DAC1W DBCR1_DAC1W -#define DBCR_DAC2R DBCR1_DAC2R -#define DBCR_DAC2W DBCR1_DAC2W - -/* - * Are there any active Debug Events represented in the - * Debug Control Registers? - */ -#define DBCR0_ACTIVE_EVENTS (DBCR0_ICMP | DBCR0_IAC1 | DBCR0_IAC2 | \ - DBCR0_IAC3 | DBCR0_IAC4) -#define DBCR1_ACTIVE_EVENTS (DBCR1_DAC1R | DBCR1_DAC2R | \ - DBCR1_DAC1W | DBCR1_DAC2W) -#define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1) (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \ - ((dbcr1) & DBCR1_ACTIVE_EVENTS)) - -#elif defined(CONFIG_BOOKE) #define DBCR0_EDM 0x80000000 /* External Debug Mode */ #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ #define DBCR0_RST 0x30000000 /* all the bits in the RST field */ @@ -518,7 +410,6 @@ #define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1) (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \ ((dbcr1) & DBCR1_ACTIVE_EVENTS)) -#endif /* #elif defined(CONFIG_BOOKE) */ /* Bit definitions related to the TCR. */ #define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 065ffd1b2f8a..75fa0293c508 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -397,6 +397,7 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect) #define PSERIES_HP_ELOG_RESOURCE_SLOT 3 #define PSERIES_HP_ELOG_RESOURCE_PHB 4 #define PSERIES_HP_ELOG_RESOURCE_PMEM 6 +#define PSERIES_HP_ELOG_RESOURCE_DT 7 #define PSERIES_HP_ELOG_ACTION_ADD 1 #define PSERIES_HP_ELOG_ACTION_REMOVE 2 @@ -514,6 +515,10 @@ extern char rtas_data_buf[RTAS_DATA_BUF_SIZE]; extern unsigned long rtas_rmo_buf; extern struct mutex rtas_ibm_get_vpd_lock; +extern struct mutex rtas_ibm_get_indices_lock; +extern struct mutex rtas_ibm_set_dynamic_indicator_lock; +extern struct mutex rtas_ibm_get_dynamic_sensor_state_lock; +extern struct mutex rtas_ibm_physical_attestation_lock; #define GLOBAL_INTERRUPT_QUEUE 9005 diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h index 9a025b776a4b..9c8d5747755d 100644 --- a/arch/powerpc/include/asm/set_memory.h +++ b/arch/powerpc/include/asm/set_memory.h @@ -12,37 +12,37 @@ int change_memory_attr(unsigned long addr, int numpages, long action); -static inline int set_memory_ro(unsigned long addr, int numpages) +static inline int __must_check set_memory_ro(unsigned long addr, int numpages) { return change_memory_attr(addr, numpages, SET_MEMORY_RO); } -static inline int set_memory_rw(unsigned long addr, int numpages) +static inline int __must_check set_memory_rw(unsigned long addr, int numpages) { return change_memory_attr(addr, numpages, SET_MEMORY_RW); } -static inline int set_memory_nx(unsigned long addr, int numpages) +static inline int __must_check set_memory_nx(unsigned long addr, int numpages) { return change_memory_attr(addr, numpages, SET_MEMORY_NX); } -static inline int set_memory_x(unsigned long addr, int numpages) +static inline int __must_check set_memory_x(unsigned long addr, int numpages) { return change_memory_attr(addr, numpages, SET_MEMORY_X); } -static inline int set_memory_np(unsigned long addr, int numpages) +static inline int __must_check set_memory_np(unsigned long addr, int numpages) { return change_memory_attr(addr, numpages, SET_MEMORY_NP); } -static inline int set_memory_p(unsigned long addr, int numpages) +static inline int __must_check set_memory_p(unsigned long addr, int numpages) { return change_memory_attr(addr, numpages, SET_MEMORY_P); } -static inline int set_memory_rox(unsigned long addr, int numpages) +static inline int __must_check set_memory_rox(unsigned long addr, int numpages) { return change_memory_attr(addr, numpages, SET_MEMORY_ROX); } diff --git a/arch/powerpc/include/asm/simple_spinlock_types.h b/arch/powerpc/include/asm/simple_spinlock_types.h index 08243338069d..391fc19f7272 100644 --- a/arch/powerpc/include/asm/simple_spinlock_types.h +++ b/arch/powerpc/include/asm/simple_spinlock_types.h @@ -3,7 +3,7 @@ #define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif typedef struct { diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 40b01446cf75..569765fa16bc 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -3,7 +3,7 @@ #define _ASM_POWERPC_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif #ifdef CONFIG_PPC_QUEUED_SPINLOCKS diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h index 2167d756e6d5..66b111fa1cd1 100644 --- a/arch/powerpc/include/asm/spu_priv1.h +++ b/arch/powerpc/include/asm/spu_priv1.h @@ -215,9 +215,6 @@ spu_disable_spu (struct spu_context *ctx) * and only intended to be used by the platform setup code. */ -extern const struct spu_priv1_ops spu_priv1_mmio_ops; -extern const struct spu_priv1_ops spu_priv1_beat_ops; - extern const struct spu_management_ops spu_management_of_ops; #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index 50950deedb87..e3d0e714ff28 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -173,9 +173,4 @@ int emulate_step(struct pt_regs *regs, ppc_inst_t instr); */ extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op); -extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, - const void *mem, bool cross_endian); -extern void emulate_vsx_store(struct instruction_op *op, - const union vsx_reg *reg, void *mem, - bool cross_endian); extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h index de1018cc522b..e3d5d3823dac 100644 --- a/arch/powerpc/include/asm/static_call.h +++ b/arch/powerpc/include/asm/static_call.h @@ -26,4 +26,6 @@ #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr") #define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) __PPC_SCT(name, "b .+20") +#define CALL_INSN_SIZE 4 + #endif /* _ASM_POWERPC_STATIC_CALL_H */ diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 3dd36c5e334a..4b3c52ed6e9d 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -39,6 +39,16 @@ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) return -1; } +static inline void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr) +{ + /* + * Unlike syscall_get_nr(), syscall_set_nr() can be called only when + * the target task is stopped for tracing on entering syscall, so + * there is no need to have the same check syscall_get_nr() has. + */ + regs->gpr[0] = nr; +} + static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { @@ -110,6 +120,16 @@ static inline void syscall_get_arguments(struct task_struct *task, } } +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + memcpy(®s->gpr[3], args, 6 * sizeof(args[0])); + + /* Also copy the first argument into orig_gpr3 */ + regs->orig_gpr3 = args[0]; +} + static inline int syscall_get_arch(struct task_struct *task) { if (is_tsk_32bit_task(task)) diff --git a/arch/powerpc/include/asm/systemcfg.h b/arch/powerpc/include/asm/systemcfg.h new file mode 100644 index 000000000000..2f9b1d6a5c98 --- /dev/null +++ b/arch/powerpc/include/asm/systemcfg.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _SYSTEMCFG_H +#define _SYSTEMCFG_H + +/* + * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM + * Copyright (C) 2005 Benjamin Herrenschmidy <benh@kernel.crashing.org>, + * IBM Corp. + */ + +#ifdef CONFIG_PPC64 + +/* + * If the major version changes we are incompatible. + * Minor version changes are a hint. + */ +#define SYSTEMCFG_MAJOR 1 +#define SYSTEMCFG_MINOR 1 + +#include <linux/types.h> + +struct systemcfg { + __u8 eye_catcher[16]; /* Eyecatcher: SYSTEMCFG:PPC64 0x00 */ + struct { /* Systemcfg version numbers */ + __u32 major; /* Major number 0x10 */ + __u32 minor; /* Minor number 0x14 */ + } version; + + /* Note about the platform flags: it now only contains the lpar + * bit. The actual platform number is dead and buried + */ + __u32 platform; /* Platform flags 0x18 */ + __u32 processor; /* Processor type 0x1C */ + __u64 processorCount; /* # of physical processors 0x20 */ + __u64 physicalMemorySize; /* Size of real memory(B) 0x28 */ + __u64 tb_orig_stamp; /* (NU) Timebase at boot 0x30 */ + __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */ + __u64 tb_to_xs; /* (NU) Inverse of TB to 2^20 0x40 */ + __u64 stamp_xsec; /* (NU) 0x48 */ + __u64 tb_update_count; /* (NU) Timebase atomicity ctr 0x50 */ + __u32 tz_minuteswest; /* (NU) Min. west of Greenwich 0x58 */ + __u32 tz_dsttime; /* (NU) Type of dst correction 0x5C */ + __u32 dcache_size; /* L1 d-cache size 0x60 */ + __u32 dcache_line_size; /* L1 d-cache line size 0x64 */ + __u32 icache_size; /* L1 i-cache size 0x68 */ + __u32 icache_line_size; /* L1 i-cache line size 0x6C */ +}; + +extern struct systemcfg *systemcfg; + +#endif /* CONFIG_PPC64 */ +#endif /* _SYSTEMCFG_H */ diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/text-patching.h index 0e29ccf903d0..e7f14720f630 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/text-patching.h @@ -76,6 +76,43 @@ int patch_instruction(u32 *addr, ppc_inst_t instr); int raw_patch_instruction(u32 *addr, ppc_inst_t instr); int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr); +/* + * The data patching functions patch_uint() and patch_ulong(), etc., must be + * called on aligned addresses. + * + * The instruction patching functions patch_instruction() and similar must be + * called on addresses satisfying instruction alignment requirements. + */ + +#ifdef CONFIG_PPC64 + +int patch_uint(void *addr, unsigned int val); +int patch_ulong(void *addr, unsigned long val); + +#define patch_u64 patch_ulong + +#else + +static inline int patch_uint(void *addr, unsigned int val) +{ + if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int))) + return -EINVAL; + + return patch_instruction(addr, ppc_inst(val)); +} + +static inline int patch_ulong(void *addr, unsigned long val) +{ + if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long))) + return -EINVAL; + + return patch_instruction(addr, ppc_inst(val)); +} + +#endif + +#define patch_u32 patch_uint + static inline unsigned long patch_site_addr(s32 *site) { return (unsigned long)site + *site; diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 15c5691dd218..2785c7462ebf 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -103,6 +103,7 @@ void arch_setup_new_exec(void); #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ +#define TIF_NEED_RESCHED_LAZY 9 /* Scheduler driven lazy preemption */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ @@ -122,6 +123,7 @@ void arch_setup_new_exec(void); #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) #define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_32BIT (1<<TIF_32BIT) @@ -142,9 +144,10 @@ void arch_setup_new_exec(void); _TIF_SYSCALL_EMU) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \ - _TIF_NOTIFY_SIGNAL) + _TIF_NEED_RESCHED_LAZY | _TIF_NOTIFY_RESUME | \ + _TIF_UPROBE | _TIF_RESTORE_TM | \ + _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL) + #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) /* Bits in local_flags */ @@ -226,6 +229,10 @@ static inline int arch_within_stack_frames(const void * const stack, return BAD_STACK; } +#ifdef CONFIG_PPC32 +extern void *emergency_ctx[]; +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 9f50766c4623..f8885586efaf 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -58,9 +58,6 @@ static inline u64 get_vtb(void) */ static inline u64 get_dec(void) { - if (IS_ENABLED(CONFIG_40x)) - return mfspr(SPRN_PIT); - return mfspr(SPRN_DEC); } @@ -71,9 +68,7 @@ static inline u64 get_dec(void) */ static inline void set_dec(u64 val) { - if (IS_ENABLED(CONFIG_40x)) - mtspr(SPRN_PIT, (u32)val); - else if (IS_ENABLED(CONFIG_BOOKE)) + if (IS_ENABLED(CONFIG_BOOKE)) mtspr(SPRN_DEC, val); else mtspr(SPRN_DEC, val - 1); @@ -91,12 +86,9 @@ static inline unsigned long tb_ticks_since(unsigned long tstamp) #define mulhdu(x,y) \ ({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;}) #else -extern u64 mulhdu(u64, u64); +#define mulhdu(x, y) mul_u64_u64_shr(x, y, 64) #endif -extern void div128_by_32(u64 dividend_high, u64 dividend_low, - unsigned divisor, struct div_result *dr); - extern void secondary_cpu_time_init(void); extern void __init time_init(void); diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 1ca7d4c4b90d..2058e8d3e013 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -37,6 +37,7 @@ extern void tlb_flush(struct mmu_gather *tlb); */ #define tlb_needs_table_invalidate() radix_enabled() +#define __HAVE_ARCH_TLB_REMOVE_TABLE /* Get the generic bits... */ #include <asm-generic/tlb.h> diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index f4e6f2dd04b7..da15b5efe807 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -145,17 +145,31 @@ static inline int cpu_to_coregroup_id(int cpu) #ifdef CONFIG_HOTPLUG_SMT #include <linux/cpu_smt.h> +#include <linux/cpumask.h> #include <asm/cputhreads.h> static inline bool topology_is_primary_thread(unsigned int cpu) { return cpu == cpu_first_thread_sibling(cpu); } +#define topology_is_primary_thread topology_is_primary_thread static inline bool topology_smt_thread_allowed(unsigned int cpu) { return cpu_thread_in_core(cpu) < cpu_smt_num_threads; } + +#define topology_is_core_online topology_is_core_online +static inline bool topology_is_core_online(unsigned int cpu) +{ + int i, first_cpu = cpu_first_thread_sibling(cpu); + + for (i = first_cpu; i < first_cpu + threads_per_core; ++i) { + if (cpu_online(i)) + return true; + } + return false; +} #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h index d9ac3a4f46e1..a7b69b25296b 100644 --- a/arch/powerpc/include/asm/trace.h +++ b/arch/powerpc/include/asm/trace.h @@ -137,7 +137,7 @@ TRACE_EVENT(rtas_input, TP_fast_assign( __entry->nargs = be32_to_cpu(rtas_args->nargs); - __assign_str(name, name); + __assign_str(name); be32_to_cpu_array(__get_dynamic_array(inputs), rtas_args->args, __entry->nargs); ), @@ -162,7 +162,7 @@ TRACE_EVENT(rtas_output, TP_fast_assign( __entry->nr_other = be32_to_cpu(rtas_args->nret) - 1; __entry->status = be32_to_cpu(rtas_args->rets[0]); - __assign_str(name, name); + __assign_str(name); be32_to_cpu_array(__get_dynamic_array(other_outputs), &rtas_args->rets[1], __entry->nr_other); ), diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index de10437fd206..4f5a46a77fa2 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -6,6 +6,7 @@ #include <asm/page.h> #include <asm/extable.h> #include <asm/kup.h> +#include <asm/asm-compat.h> #ifdef __powerpc64__ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ @@ -93,8 +94,18 @@ __pu_failed: \ #endif #ifdef __powerpc64__ +#ifdef CONFIG_PPC_KERNEL_PREFIXED #define __put_user_asm2_goto(x, ptr, label) \ __put_user_asm_goto(x, ptr, label, "std") +#else +#define __put_user_asm2_goto(x, addr, label) \ + asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \ + EX_TABLE(1b, %l2) \ + : \ + : "r" (x), DS_FORM_CONSTRAINT (*addr) \ + : \ + : label) +#endif // CONFIG_PPC_KERNEL_PREFIXED #else /* __powerpc64__ */ #define __put_user_asm2_goto(x, addr, label) \ asm goto( \ @@ -165,8 +176,19 @@ do { \ #endif #ifdef __powerpc64__ +#ifdef CONFIG_PPC_KERNEL_PREFIXED #define __get_user_asm2_goto(x, addr, label) \ __get_user_asm_goto(x, addr, label, "ld") +#else +#define __get_user_asm2_goto(x, addr, label) \ + asm_goto_output( \ + "1: ld%U1%X1 %0, %1 # get_user\n" \ + EX_TABLE(1b, %l2) \ + : "=r" (x) \ + : DS_FORM_CONSTRAINT (*addr) \ + : \ + : label) +#endif // CONFIG_PPC_KERNEL_PREFIXED #else /* __powerpc64__ */ #define __get_user_asm2_goto(x, addr, label) \ asm_goto_output( \ diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index b1f094728b35..a8681b12864f 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -38,13 +38,11 @@ void __init udbg_early_init(void); void __init udbg_init_debug_lpar(void); void __init udbg_init_debug_lpar_hvsi(void); void __init udbg_init_pmac_realmode(void); -void __init udbg_init_maple_realmode(void); void __init udbg_init_pas_realmode(void); void __init udbg_init_rtas_panel(void); void __init udbg_init_rtas_console(void); void __init udbg_init_btext(void); void __init udbg_init_44x_as1(void); -void __init udbg_init_40x_realmode(void); void __init udbg_init_cpm(void); void __init udbg_init_usbgecko(void); void __init udbg_init_memcons(void); diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h index e278299b9b37..6949b5daa37d 100644 --- a/arch/powerpc/include/asm/uninorth.h +++ b/arch/powerpc/include/asm/uninorth.h @@ -144,7 +144,7 @@ #define UNI_N_HWINIT_STATE_SLEEPING 0x01 #define UNI_N_HWINIT_STATE_RUNNING 0x02 /* This last bit appear to be used by the bootROM to know the second - * CPU has started and will enter it's sleep loop with IP=0 + * CPU has started and will enter its sleep loop with IP=0 */ #define UNI_N_HWINIT_STATE_CPU1_FLAG 0x10000000 diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 659a996c75aa..027ef94a12fb 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -51,7 +51,6 @@ #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 7650b6ce14c8..1ca23fbfe087 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -3,6 +3,7 @@ #define _ASM_POWERPC_VDSO_H #define VDSO_VERSION_STRING LINUX_2.6.15 +#define __VDSO_PAGES 4 #ifndef __ASSEMBLY__ @@ -25,6 +26,7 @@ int vdso_getcpu_init(void); #ifdef __VDSO64__ #define V_FUNCTION_BEGIN(name) \ .globl name; \ + .type name,@function; \ name: \ #define V_FUNCTION_END(name) \ diff --git a/arch/powerpc/include/asm/vdso/arch_data.h b/arch/powerpc/include/asm/vdso/arch_data.h new file mode 100644 index 000000000000..c240a6b87518 --- /dev/null +++ b/arch/powerpc/include/asm/vdso/arch_data.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM + * Copyright (C) 2005 Benjamin Herrenschmidy <benh@kernel.crashing.org>, + * IBM Corp. + */ +#ifndef _ASM_POWERPC_VDSO_ARCH_DATA_H +#define _ASM_POWERPC_VDSO_ARCH_DATA_H + +#include <linux/unistd.h> +#include <linux/types.h> + +#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32) + +#ifdef CONFIG_PPC64 + +struct vdso_arch_data { + __u64 tb_ticks_per_sec; /* Timebase tics / sec */ + __u32 dcache_block_size; /* L1 d-cache block size */ + __u32 icache_block_size; /* L1 i-cache block size */ + __u32 dcache_log_block_size; /* L1 d-cache log block size */ + __u32 icache_log_block_size; /* L1 i-cache log block size */ + __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ + __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */ +}; + +#else /* CONFIG_PPC64 */ + +struct vdso_arch_data { + __u64 tb_ticks_per_sec; /* Timebase tics / sec */ + __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ + __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */ +}; + +#endif /* CONFIG_PPC64 */ + +#endif /* _ASM_POWERPC_VDSO_ARCH_DATA_H */ diff --git a/arch/powerpc/include/asm/vdso/getrandom.h b/arch/powerpc/include/asm/vdso/getrandom.h new file mode 100644 index 000000000000..067a5396aac6 --- /dev/null +++ b/arch/powerpc/include/asm/vdso/getrandom.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Christophe Leroy <christophe.leroy@csgroup.eu>, CS GROUP France + */ +#ifndef _ASM_POWERPC_VDSO_GETRANDOM_H +#define _ASM_POWERPC_VDSO_GETRANDOM_H + +#ifndef __ASSEMBLY__ + +#include <asm/vdso_datapage.h> + +static __always_inline int do_syscall_3(const unsigned long _r0, const unsigned long _r3, + const unsigned long _r4, const unsigned long _r5) +{ + register long r0 asm("r0") = _r0; + register unsigned long r3 asm("r3") = _r3; + register unsigned long r4 asm("r4") = _r4; + register unsigned long r5 asm("r5") = _r5; + register int ret asm ("r3"); + + asm volatile( + " sc\n" + " bns+ 1f\n" + " neg %0, %0\n" + "1:\n" + : "=r" (ret), "+r" (r4), "+r" (r5), "+r" (r0) + : "r" (r3) + : "memory", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr"); + + return ret; +} + +/** + * getrandom_syscall - Invoke the getrandom() syscall. + * @buffer: Destination buffer to fill with random bytes. + * @len: Size of @buffer in bytes. + * @flags: Zero or more GRND_* flags. + * Returns: The number of bytes written to @buffer, or a negative value indicating an error. + */ +static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsigned int flags) +{ + return do_syscall_3(__NR_getrandom, (unsigned long)buffer, + (unsigned long)len, (unsigned long)flags); +} + +static __always_inline const struct vdso_rng_data *__arch_get_vdso_u_rng_data(void) +{ + struct vdso_rng_data *data; + + asm ( + " bcl 20, 31, .+4 ;" + "0: mflr %0 ;" + " addis %0, %0, (vdso_u_rng_data - 0b)@ha ;" + " addi %0, %0, (vdso_u_rng_data - 0b)@l ;" + : "=r" (data) : : "lr" + ); + + return data; +} +#define __arch_get_vdso_u_rng_data __arch_get_vdso_u_rng_data + +ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, + size_t opaque_len); + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_VDSO_GETRANDOM_H */ diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h index f0a4cf01e85c..99c9d6f43fde 100644 --- a/arch/powerpc/include/asm/vdso/gettimeofday.h +++ b/arch/powerpc/include/asm/vdso/gettimeofday.h @@ -4,7 +4,6 @@ #ifndef __ASSEMBLY__ -#include <asm/page.h> #include <asm/vdso/timebase.h> #include <asm/barrier.h> #include <asm/unistd.h> @@ -14,6 +13,17 @@ #define VDSO_HAS_TIME 1 +/* + * powerpc specific delta calculation. + * + * This variant removes the masking of the subtraction because the + * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX + * which would result in a pointless operation. The compiler cannot + * optimize it away as the mask comes from the vdso data and is not compile + * time constant. + */ +#define VDSO_DELTA_NOMASK 1 + static __always_inline int do_syscall_2(const unsigned long _r0, const unsigned long _r3, const unsigned long _r4) { @@ -84,42 +94,17 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) #endif static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return get_tb(); } -const struct vdso_data *__arch_get_vdso_data(void); - -#ifdef CONFIG_TIME_NS -static __always_inline -const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) -{ - return (void *)vd + PAGE_SIZE; -} -#endif - -static inline bool vdso_clocksource_ok(const struct vdso_data *vd) +static inline bool vdso_clocksource_ok(const struct vdso_clock *vc) { return true; } #define vdso_clocksource_ok vdso_clocksource_ok -/* - * powerpc specific delta calculation. - * - * This variant removes the masking of the subtraction because the - * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX - * which would result in a pointless operation. The compiler cannot - * optimize it away as the mask comes from the vdso data and is not compile - * time constant. - */ -static __always_inline u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) -{ - return (cycles - last) * mult; -} -#define vdso_calc_delta vdso_calc_delta - #ifndef __powerpc64__ static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift) { @@ -140,21 +125,22 @@ static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift) #ifdef __powerpc64__ int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts, - const struct vdso_data *vd); + const struct vdso_time_data *vd); int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res, - const struct vdso_data *vd); + const struct vdso_time_data *vd); #else int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts, - const struct vdso_data *vd); + const struct vdso_time_data *vd); int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts, - const struct vdso_data *vd); + const struct vdso_time_data *vd); int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res, - const struct vdso_data *vd); + const struct vdso_time_data *vd); #endif int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz, - const struct vdso_data *vd); + const struct vdso_time_data *vd); __kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, - const struct vdso_data *vd); + const struct vdso_time_data *vd); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/powerpc/include/asm/vdso/vsyscall.h b/arch/powerpc/include/asm/vdso/vsyscall.h index 48cf23f1e273..c2c9ae1b22e7 100644 --- a/arch/powerpc/include/asm/vdso/vsyscall.h +++ b/arch/powerpc/include/asm/vdso/vsyscall.h @@ -4,19 +4,8 @@ #ifndef __ASSEMBLY__ -#include <linux/timekeeper_internal.h> #include <asm/vdso_datapage.h> -/* - * Update the vDSO data page to keep in sync with kernel timekeeping. - */ -static __always_inline -struct vdso_data *__arch_get_k_vdso_data(void) -{ - return vdso_data->data; -} -#define __arch_get_k_vdso_data __arch_get_k_vdso_data - /* The asm-generic header needs to be included after the definitions above */ #include <asm-generic/vdso/vsyscall.h> diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index a585c8e538ff..95d45a50355d 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -9,106 +9,18 @@ * IBM Corp. */ - -/* - * Note about this structure: - * - * This structure was historically called systemcfg and exposed to - * userland via /proc/ppc64/systemcfg. Unfortunately, this became an - * ABI issue as some proprietary software started relying on being able - * to mmap() it, thus we have to keep the base layout at least for a - * few kernel versions. - * - * However, since ppc32 doesn't suffer from this backward handicap, - * a simpler version of the data structure is used there with only the - * fields actually used by the vDSO. - * - */ - -/* - * If the major version changes we are incompatible. - * Minor version changes are a hint. - */ -#define SYSTEMCFG_MAJOR 1 -#define SYSTEMCFG_MINOR 1 - #ifndef __ASSEMBLY__ -#include <linux/unistd.h> -#include <linux/time.h> #include <vdso/datapage.h> -#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32) - -/* - * So here is the ppc64 backward compatible version - */ - -#ifdef CONFIG_PPC64 - -struct vdso_arch_data { - __u8 eye_catcher[16]; /* Eyecatcher: SYSTEMCFG:PPC64 0x00 */ - struct { /* Systemcfg version numbers */ - __u32 major; /* Major number 0x10 */ - __u32 minor; /* Minor number 0x14 */ - } version; - - /* Note about the platform flags: it now only contains the lpar - * bit. The actual platform number is dead and buried - */ - __u32 platform; /* Platform flags 0x18 */ - __u32 processor; /* Processor type 0x1C */ - __u64 processorCount; /* # of physical processors 0x20 */ - __u64 physicalMemorySize; /* Size of real memory(B) 0x28 */ - __u64 tb_orig_stamp; /* (NU) Timebase at boot 0x30 */ - __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */ - __u64 tb_to_xs; /* (NU) Inverse of TB to 2^20 0x40 */ - __u64 stamp_xsec; /* (NU) 0x48 */ - __u64 tb_update_count; /* (NU) Timebase atomicity ctr 0x50 */ - __u32 tz_minuteswest; /* (NU) Min. west of Greenwich 0x58 */ - __u32 tz_dsttime; /* (NU) Type of dst correction 0x5C */ - __u32 dcache_size; /* L1 d-cache size 0x60 */ - __u32 dcache_line_size; /* L1 d-cache line size 0x64 */ - __u32 icache_size; /* L1 i-cache size 0x68 */ - __u32 icache_line_size; /* L1 i-cache line size 0x6C */ - - /* those additional ones don't have to be located anywhere - * special as they were not part of the original systemcfg - */ - __u32 dcache_block_size; /* L1 d-cache block size */ - __u32 icache_block_size; /* L1 i-cache block size */ - __u32 dcache_log_block_size; /* L1 d-cache log block size */ - __u32 icache_log_block_size; /* L1 i-cache log block size */ - __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ - __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */ - - struct vdso_data data[CS_BASES]; -}; - -#else /* CONFIG_PPC64 */ - -/* - * And here is the simpler 32 bits version - */ -struct vdso_arch_data { - __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */ - __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ - __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */ - struct vdso_data data[CS_BASES]; -}; - -#endif /* CONFIG_PPC64 */ - -extern struct vdso_arch_data *vdso_data; - #else /* __ASSEMBLY__ */ -.macro get_datapage ptr +.macro get_datapage ptr symbol bcl 20, 31, .+4 999: mflr \ptr - addis \ptr, \ptr, (_vdso_datapage - 999b)@ha - addi \ptr, \ptr, (_vdso_datapage - 999b)@l + addis \ptr, \ptr, (\symbol - 999b)@ha + addi \ptr, \ptr, (\symbol - 999b)@l .endm #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h index fcf721682a71..f2dc40e1c52a 100644 --- a/arch/powerpc/include/asm/vga.h +++ b/arch/powerpc/include/asm/vga.h @@ -40,11 +40,6 @@ static inline void scr_memsetw(u16 *s, u16 v, unsigned int n) memset16(s, cpu_to_le16(v), n / 2); } -#define VT_BUF_HAVE_MEMCPYW -#define VT_BUF_HAVE_MEMMOVEW -#define scr_memcpyw memcpy -#define scr_memmovew memmove - #endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */ #ifdef __powerpc64__ diff --git a/arch/powerpc/include/asm/fb.h b/arch/powerpc/include/asm/video.h index c0c5d1df7ad1..e1770114ffc3 100644 --- a/arch/powerpc/include/asm/fb.h +++ b/arch/powerpc/include/asm/video.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ +#ifndef _ASM_VIDEO_H_ +#define _ASM_VIDEO_H_ #include <asm/page.h> @@ -12,6 +12,6 @@ static inline pgprot_t pgprot_framebuffer(pgprot_t prot, } #define pgprot_framebuffer pgprot_framebuffer -#include <asm-generic/fb.h> +#include <asm-generic/video.h> -#endif /* _ASM_FB_H_ */ +#endif /* _ASM_VIDEO_H_ */ diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h index 6faf2a931755..7c444150c5ad 100644 --- a/arch/powerpc/include/asm/vio.h +++ b/arch/powerpc/include/asm/vio.h @@ -156,11 +156,7 @@ static inline int vio_enable_interrupts(struct vio_dev *dev) } #endif -static inline struct vio_driver *to_vio_driver(struct device_driver *drv) -{ - return container_of(drv, struct vio_driver, driver); -} - +#define to_vio_driver(__drv) container_of_const(__drv, struct vio_driver, driver) #define to_vio_dev(__dev) container_of_const(__dev, struct vio_dev, dev) #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index 89090485bec1..60ef312dab05 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -31,7 +31,6 @@ #ifdef CONFIG_PPC_ICP_NATIVE extern int icp_native_init(void); extern void icp_native_flush_interrupt(void); -extern void icp_native_cause_ipi_rm(int cpu); #else static inline int icp_native_init(void) { return -ENODEV; } #endif diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h index f2d44b44f46c..535cdb1e411a 100644 --- a/arch/powerpc/include/asm/xmon.h +++ b/arch/powerpc/include/asm/xmon.h @@ -12,13 +12,11 @@ #ifdef CONFIG_XMON extern void xmon_setup(void); -void __init xmon_register_spus(struct list_head *list); struct pt_regs; extern int xmon(struct pt_regs *excp); extern irqreturn_t xmon_irq(int, void *); #else static inline void xmon_setup(void) { } -static inline void xmon_register_spus(struct list_head *list) { } #endif #if defined(CONFIG_XMON) && defined(CONFIG_SMP) diff --git a/arch/powerpc/include/uapi/asm/bootx.h b/arch/powerpc/include/uapi/asm/bootx.h index 6728c7e24e58..1b8c121071d9 100644 --- a/arch/powerpc/include/uapi/asm/bootx.h +++ b/arch/powerpc/include/uapi/asm/bootx.h @@ -108,7 +108,7 @@ typedef struct boot_infos /* ALL BELOW NEW (vers. 4) */ /* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag - (non-PCI) only. On PCI, memory is contiguous and it's size is in the + (non-PCI) only. On PCI, memory is contiguous and its size is in the device-tree. */ boot_info_map_entry_t physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */ diff --git a/arch/powerpc/include/uapi/asm/ioctls.h b/arch/powerpc/include/uapi/asm/ioctls.h index 2c145da3b774..b5211e413829 100644 --- a/arch/powerpc/include/uapi/asm/ioctls.h +++ b/arch/powerpc/include/uapi/asm/ioctls.h @@ -23,10 +23,10 @@ #define TCSETSW _IOW('t', 21, struct termios) #define TCSETSF _IOW('t', 22, struct termios) -#define TCGETA _IOR('t', 23, struct termio) -#define TCSETA _IOW('t', 24, struct termio) -#define TCSETAW _IOW('t', 25, struct termio) -#define TCSETAF _IOW('t', 28, struct termio) +#define TCGETA 0x40147417 /* _IOR('t', 23, struct termio) */ +#define TCSETA 0x80147418 /* _IOW('t', 24, struct termio) */ +#define TCSETAW 0x80147419 /* _IOW('t', 25, struct termio) */ +#define TCSETAF 0x8014741c /* _IOW('t', 28, struct termio) */ #define TCSBRK _IO('t', 29) #define TCXONC _IO('t', 30) diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 1691297a766a..eaeda001784e 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -645,6 +645,9 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) #define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) #define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) +#define KVM_REG_PPC_DEXCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc6) +#define KVM_REG_PPC_HASHKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc7) +#define KVM_REG_PPC_HASHPKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc8) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/include/uapi/asm/papr-indices.h b/arch/powerpc/include/uapi/asm/papr-indices.h new file mode 100644 index 000000000000..c2999d89d52a --- /dev/null +++ b/arch/powerpc/include/uapi/asm/papr-indices.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_PAPR_INDICES_H_ +#define _UAPI_PAPR_INDICES_H_ + +#include <linux/types.h> +#include <asm/ioctl.h> +#include <asm/papr-miscdev.h> + +#define LOC_CODE_SIZE 80 +#define RTAS_GET_INDICES_BUF_SIZE SZ_4K + +struct papr_indices_io_block { + union { + struct { + __u8 is_sensor; /* 0 for indicator and 1 for sensor */ + __u32 indice_type; + } indices; + struct { + __u32 token; /* Sensor or indicator token */ + __u32 state; /* get / set state */ + /* + * PAPR+ 12.3.2.4 Converged Location Code Rules - Length + * Restrictions. 79 characters plus null. + */ + char location_code_str[LOC_CODE_SIZE]; /* location code */ + } dynamic_param; + }; +}; + +/* + * ioctls for /dev/papr-indices. + * PAPR_INDICES_IOC_GET: Returns a get-indices handle fd to read data + * PAPR_DYNAMIC_SENSOR_IOC_GET: Gets the state of the input sensor + * PAPR_DYNAMIC_INDICATOR_IOC_SET: Sets the new state for the input indicator + */ +#define PAPR_INDICES_IOC_GET _IOW(PAPR_MISCDEV_IOC_ID, 3, struct papr_indices_io_block) +#define PAPR_DYNAMIC_SENSOR_IOC_GET _IOWR(PAPR_MISCDEV_IOC_ID, 4, struct papr_indices_io_block) +#define PAPR_DYNAMIC_INDICATOR_IOC_SET _IOW(PAPR_MISCDEV_IOC_ID, 5, struct papr_indices_io_block) + + +#endif /* _UAPI_PAPR_INDICES_H_ */ diff --git a/arch/powerpc/include/uapi/asm/papr-physical-attestation.h b/arch/powerpc/include/uapi/asm/papr-physical-attestation.h new file mode 100644 index 000000000000..ea746837bb9a --- /dev/null +++ b/arch/powerpc/include/uapi/asm/papr-physical-attestation.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_PAPR_PHYSICAL_ATTESTATION_H_ +#define _UAPI_PAPR_PHYSICAL_ATTESTATION_H_ + +#include <linux/types.h> +#include <asm/ioctl.h> +#include <asm/papr-miscdev.h> + +#define PAPR_PHYATTEST_MAX_INPUT 4084 /* Max 4K buffer: 4K-12 */ + +/* + * Defined in PAPR 2.13+ 21.6 Attestation Command Structures. + * User space pass this struct and the max size should be 4K. + */ +struct papr_phy_attest_io_block { + __u8 version; + __u8 command; + __u8 TCG_major_ver; + __u8 TCG_minor_ver; + __be32 length; + __be32 correlator; + __u8 payload[PAPR_PHYATTEST_MAX_INPUT]; +}; + +/* + * ioctl for /dev/papr-physical-attestation. Returns a attestation + * command fd handle + */ +#define PAPR_PHY_ATTEST_IOC_HANDLE _IOW(PAPR_MISCDEV_IOC_ID, 8, struct papr_phy_attest_io_block) + +#endif /* _UAPI_PAPR_PHYSICAL_ATTESTATION_H_ */ diff --git a/arch/powerpc/include/uapi/asm/papr-platform-dump.h b/arch/powerpc/include/uapi/asm/papr-platform-dump.h new file mode 100644 index 000000000000..8a1c060e89a9 --- /dev/null +++ b/arch/powerpc/include/uapi/asm/papr-platform-dump.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_PAPR_PLATFORM_DUMP_H_ +#define _UAPI_PAPR_PLATFORM_DUMP_H_ + +#include <linux/types.h> +#include <asm/ioctl.h> +#include <asm/papr-miscdev.h> + +/* + * ioctl for /dev/papr-platform-dump. Returns a platform-dump handle fd + * corresponding to dump tag. + */ +#define PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE _IOW(PAPR_MISCDEV_IOC_ID, 6, __u64) +#define PAPR_PLATFORM_DUMP_IOC_INVALIDATE _IOW(PAPR_MISCDEV_IOC_ID, 7, __u64) + +#endif /* _UAPI_PAPR_PLATFORM_DUMP_H_ */ diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h deleted file mode 100644 index 17439925045c..000000000000 --- a/arch/powerpc/include/uapi/asm/papr_pdsm.h +++ /dev/null @@ -1,165 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * PAPR nvDimm Specific Methods (PDSM) and structs for libndctl - * - * (C) Copyright IBM 2020 - * - * Author: Vaibhav Jain <vaibhav at linux.ibm.com> - */ - -#ifndef _UAPI_ASM_POWERPC_PAPR_PDSM_H_ -#define _UAPI_ASM_POWERPC_PAPR_PDSM_H_ - -#include <linux/types.h> -#include <linux/ndctl.h> - -/* - * PDSM Envelope: - * - * The ioctl ND_CMD_CALL exchange data between user-space and kernel via - * envelope which consists of 2 headers sections and payload sections as - * illustrated below: - * +-----------------+---------------+---------------------------+ - * | 64-Bytes | 8-Bytes | Max 184-Bytes | - * +-----------------+---------------+---------------------------+ - * | ND-HEADER | PDSM-HEADER | PDSM-PAYLOAD | - * +-----------------+---------------+---------------------------+ - * | nd_family | | | - * | nd_size_out | cmd_status | | - * | nd_size_in | reserved | nd_pdsm_payload | - * | nd_command | payload --> | | - * | nd_fw_size | | | - * | nd_payload ---> | | | - * +---------------+-----------------+---------------------------+ - * - * ND Header: - * This is the generic libnvdimm header described as 'struct nd_cmd_pkg' - * which is interpreted by libnvdimm before passed on to papr_scm. Important - * member fields used are: - * 'nd_family' : (In) NVDIMM_FAMILY_PAPR_SCM - * 'nd_size_in' : (In) PDSM-HEADER + PDSM-IN-PAYLOAD (usually 0) - * 'nd_size_out' : (In) PDSM-HEADER + PDSM-RETURN-PAYLOAD - * 'nd_command' : (In) One of PAPR_PDSM_XXX - * 'nd_fw_size' : (Out) PDSM-HEADER + size of actual payload returned - * - * PDSM Header: - * This is papr-scm specific header that precedes the payload. This is defined - * as nd_cmd_pdsm_pkg. Following fields aare available in this header: - * - * 'cmd_status' : (Out) Errors if any encountered while servicing PDSM. - * 'reserved' : Not used, reserved for future and should be set to 0. - * 'payload' : A union of all the possible payload structs - * - * PDSM Payload: - * - * The layout of the PDSM Payload is defined by various structs shared between - * papr_scm and libndctl so that contents of payload can be interpreted. As such - * its defined as a union of all possible payload structs as - * 'union nd_pdsm_payload'. Based on the value of 'nd_cmd_pkg.nd_command' - * appropriate member of the union is accessed. - */ - -/* Max payload size that we can handle */ -#define ND_PDSM_PAYLOAD_MAX_SIZE 184 - -/* Max payload size that we can handle */ -#define ND_PDSM_HDR_SIZE \ - (sizeof(struct nd_pkg_pdsm) - ND_PDSM_PAYLOAD_MAX_SIZE) - -/* Various nvdimm health indicators */ -#define PAPR_PDSM_DIMM_HEALTHY 0 -#define PAPR_PDSM_DIMM_UNHEALTHY 1 -#define PAPR_PDSM_DIMM_CRITICAL 2 -#define PAPR_PDSM_DIMM_FATAL 3 - -/* struct nd_papr_pdsm_health.extension_flags field flags */ - -/* Indicate that the 'dimm_fuel_gauge' field is valid */ -#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1 - -/* Indicate that the 'dimm_dsc' field is valid */ -#define PDSM_DIMM_DSC_VALID 2 - -/* - * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH - * Various flags indicate the health status of the dimm. - * - * extension_flags : Any extension fields present in the struct. - * dimm_unarmed : Dimm not armed. So contents wont persist. - * dimm_bad_shutdown : Previous shutdown did not persist contents. - * dimm_bad_restore : Contents from previous shutdown werent restored. - * dimm_scrubbed : Contents of the dimm have been scrubbed. - * dimm_locked : Contents of the dimm cant be modified until CEC reboot - * dimm_encrypted : Contents of dimm are encrypted. - * dimm_health : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX - * dimm_fuel_gauge : Life remaining of DIMM as a percentage from 0-100 - */ -struct nd_papr_pdsm_health { - union { - struct { - __u32 extension_flags; - __u8 dimm_unarmed; - __u8 dimm_bad_shutdown; - __u8 dimm_bad_restore; - __u8 dimm_scrubbed; - __u8 dimm_locked; - __u8 dimm_encrypted; - __u16 dimm_health; - - /* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */ - __u16 dimm_fuel_gauge; - - /* Extension flag PDSM_DIMM_DSC_VALID */ - __u64 dimm_dsc; - }; - __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; - }; -}; - -/* Flags for injecting specific smart errors */ -#define PDSM_SMART_INJECT_HEALTH_FATAL (1 << 0) -#define PDSM_SMART_INJECT_BAD_SHUTDOWN (1 << 1) - -struct nd_papr_pdsm_smart_inject { - union { - struct { - /* One or more of PDSM_SMART_INJECT_ */ - __u32 flags; - __u8 fatal_enable; - __u8 unsafe_shutdown_enable; - }; - __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; - }; -}; - -/* - * Methods to be embedded in ND_CMD_CALL request. These are sent to the kernel - * via 'nd_cmd_pkg.nd_command' member of the ioctl struct - */ -enum papr_pdsm { - PAPR_PDSM_MIN = 0x0, - PAPR_PDSM_HEALTH, - PAPR_PDSM_SMART_INJECT, - PAPR_PDSM_MAX, -}; - -/* Maximal union that can hold all possible payload types */ -union nd_pdsm_payload { - struct nd_papr_pdsm_health health; - struct nd_papr_pdsm_smart_inject smart_inject; - __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; -} __packed; - -/* - * PDSM-header + payload expected with ND_CMD_CALL ioctl from libnvdimm - * Valid member of union 'payload' is identified via 'nd_cmd_pkg.nd_command' - * that should always precede this struct when sent to papr_scm via CMD_CALL - * interface. - */ -struct nd_pkg_pdsm { - __s32 cmd_status; /* Out: Sub-cmd status returned back */ - __u16 reserved[2]; /* Ignored and to be set as '0' */ - union nd_pdsm_payload payload; -} __packed; - -#endif /* _UAPI_ASM_POWERPC_PAPR_PDSM_H_ */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index d3282fbea4f2..fb2b95267022 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -3,9 +3,6 @@ # Makefile for the linux kernel. # -ifdef CONFIG_PPC64 -CFLAGS_prom_init.o += $(NO_MINIMAL_TOC) -endif ifdef CONFIG_PPC32 CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o += -fPIC @@ -73,7 +70,7 @@ obj-y := cputable.o syscalls.o switch.o \ signal.o sysfs.o cacheinfo.o time.o \ prom.o traps.o setup-common.o \ udbg.o misc.o io.o misc_$(BITS).o \ - of_platform.o prom_parse.o firmware.o \ + prom_parse.o firmware.o \ hw_breakpoint_constraints.o interrupt.o \ kdebugfs.o stacktrace.o syscall.o obj-y += ptrace/ @@ -87,6 +84,7 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_DAWR) += dawr.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o +obj-$(CONFIG_PPC_BOOK3S_64) += dexcr.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_64e.o obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o @@ -125,11 +123,10 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_PPC64) += head_64.o obj-$(CONFIG_PPC_BOOK3S_32) += head_book3s_32.o -obj-$(CONFIG_40x) += head_40x.o obj-$(CONFIG_44x) += head_44x.o obj-$(CONFIG_PPC_8xx) += head_8xx.o obj-$(CONFIG_PPC_85xx) += head_85xx.o -extra-y += vmlinux.lds +always-$(KBUILD_BUILTIN) += vmlinux.lds obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o @@ -142,6 +139,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o obj-$(CONFIG_ARCH_HAS_DMA_SET_MASK) += dma-mask.o @@ -154,8 +152,6 @@ obj-$(CONFIG_PCI_MSI) += msi.o obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o -obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o - obj-y += trace/ ifneq ($(CONFIG_PPC_INDIRECT_PIO),y) @@ -164,9 +160,7 @@ endif obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o -ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)(CONFIG_PPC_BOOK3S),) obj-y += ppc_save_regs.o -endif obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o @@ -190,9 +184,6 @@ GCOV_PROFILE_kprobes-ftrace.o := n KCOV_INSTRUMENT_kprobes-ftrace.o := n KCSAN_SANITIZE_kprobes-ftrace.o := n UBSAN_SANITIZE_kprobes-ftrace.o := n -GCOV_PROFILE_syscall_64.o := n -KCOV_INSTRUMENT_syscall_64.o := n -UBSAN_SANITIZE_syscall_64.o := n UBSAN_SANITIZE_vdso.o := n # Necessary for booting with kcov enabled on book3e machines diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index f029755f9e69..b3048f6d3822 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -54,7 +54,7 @@ #endif #ifdef CONFIG_PPC32 -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE #include "head_booke.h" #endif #endif @@ -334,7 +334,6 @@ int main(void) #endif /* ! CONFIG_PPC64 */ /* datapage offsets for use by vdso */ - OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data); OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec); #ifdef CONFIG_PPC64 OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size); @@ -594,7 +593,6 @@ int main(void) HSTATE_FIELD(HSTATE_DABR, dabr); HSTATE_FIELD(HSTATE_DECEXP, dec_expires); HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode); - DEFINE(IPI_PRIORITY, IPI_PRIORITY); OFFSET(KVM_SPLIT_RPR, kvm_split_mode, rpr); OFFSET(KVM_SPLIT_PMMAR, kvm_split_mode, pmmar); OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar); @@ -674,5 +672,16 @@ int main(void) DEFINE(BPT_SIZE, BPT_SIZE); #endif +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + DEFINE(FTRACE_OOL_STUB_SIZE, sizeof(struct ftrace_ool_stub)); +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + OFFSET(FTRACE_OPS_FUNC, ftrace_ops, func); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + OFFSET(FTRACE_OPS_DIRECT_CALL, ftrace_ops, direct_call); +#endif +#endif + return 0; } diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index f502337dd37d..0fcc463b02e2 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -735,7 +735,7 @@ static const struct sysfs_ops cache_index_ops = { .show = cache_index_show, }; -static struct kobj_type cache_index_type = { +static const struct kobj_type cache_index_type = { .release = cache_index_release, .sysfs_ops = &cache_index_ops, .default_groups = cache_index_default_groups, diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index bfd3f442e5eb..ab3ca74e6730 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -401,7 +401,7 @@ _GLOBAL(__save_cpu_setup) andi. r3,r3,0xff00 cmpwi cr0,r3,0x0200 bne 1f - mfspr r4,SPRN_HID2 + mfspr r4,SPRN_HID2_750FX stw r4,CS_HID2(r5) 1: mtcr r7 @@ -496,7 +496,7 @@ _GLOBAL(__restore_cpu_setup) bne 4f lwz r4,CS_HID2(r5) rlwinm r4,r4,0,19,17 - mtspr SPRN_HID2,r4 + mtspr SPRN_HID2_750FX,r4 sync 4: lwz r4,CS_HID1(r5) diff --git a/arch/powerpc/kernel/cpu_specs.h b/arch/powerpc/kernel/cpu_specs.h index 85ded3f77204..5ea14605bb41 100644 --- a/arch/powerpc/kernel/cpu_specs.h +++ b/arch/powerpc/kernel/cpu_specs.h @@ -1,9 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -#ifdef CONFIG_40x -#include "cpu_specs_40x.h" -#endif - #ifdef CONFIG_PPC_47x #include "cpu_specs_47x.h" #elif defined(CONFIG_44x) diff --git a/arch/powerpc/kernel/cpu_specs_40x.h b/arch/powerpc/kernel/cpu_specs_40x.h deleted file mode 100644 index a1362a75b8c8..000000000000 --- a/arch/powerpc/kernel/cpu_specs_40x.h +++ /dev/null @@ -1,280 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) - */ - -static struct cpu_spec cpu_specs[] __initdata = { - { /* STB 04xxx */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41810000, - .cpu_name = "STB04xxx", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* NP405L */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41610000, - .cpu_name = "NP405L", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* NP4GS3 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x40B10000, - .cpu_name = "NP4GS3", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* NP405H */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41410000, - .cpu_name = "NP405H", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405GPr */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x50910000, - .cpu_name = "405GPr", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* STBx25xx */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x51510000, - .cpu_name = "STBx25xx", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405LP */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41F10000, - .cpu_name = "405LP", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EP */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x51210000, - .cpu_name = "405EP", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. A/B with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910007, - .cpu_name = "405EX Rev. A/B", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. C without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x1291000d, - .cpu_name = "405EX Rev. C", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. C with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x1291000f, - .cpu_name = "405EX Rev. C", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. D without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910003, - .cpu_name = "405EX Rev. D", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. D with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910005, - .cpu_name = "405EX Rev. D", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. A/B without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910001, - .cpu_name = "405EXr Rev. A/B", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. C without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910009, - .cpu_name = "405EXr Rev. C", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. C with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x1291000b, - .cpu_name = "405EXr Rev. C", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. D without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910000, - .cpu_name = "405EXr Rev. D", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. D with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910002, - .cpu_name = "405EXr Rev. D", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { - /* 405EZ */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41510000, - .cpu_name = "405EZ", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* APM8018X */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x7ff11432, - .cpu_name = "APM8018X", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic 40x PPC)", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | - PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - } -}; diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 2086fa6cdc25..103b6605dd68 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -13,7 +13,7 @@ #include <linux/io.h> #include <linux/memblock.h> #include <linux/of.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/kdump.h> #include <asm/firmware.h> #include <linux/uio.h> diff --git a/arch/powerpc/kernel/dexcr.c b/arch/powerpc/kernel/dexcr.c new file mode 100644 index 000000000000..3a0358e91c60 --- /dev/null +++ b/arch/powerpc/kernel/dexcr.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/capability.h> +#include <linux/cpu.h> +#include <linux/init.h> +#include <linux/prctl.h> +#include <linux/sched.h> + +#include <asm/cpu_has_feature.h> +#include <asm/cputable.h> +#include <asm/processor.h> +#include <asm/reg.h> + +static int __init init_task_dexcr(void) +{ + if (!early_cpu_has_feature(CPU_FTR_ARCH_31)) + return 0; + + current->thread.dexcr_onexec = mfspr(SPRN_DEXCR); + + return 0; +} +early_initcall(init_task_dexcr) + +/* Allow thread local configuration of these by default */ +#define DEXCR_PRCTL_EDITABLE ( \ + DEXCR_PR_IBRTPD | \ + DEXCR_PR_SRAPD | \ + DEXCR_PR_NPHIE) + +static int prctl_to_aspect(unsigned long which, unsigned int *aspect) +{ + switch (which) { + case PR_PPC_DEXCR_SBHE: + *aspect = DEXCR_PR_SBHE; + break; + case PR_PPC_DEXCR_IBRTPD: + *aspect = DEXCR_PR_IBRTPD; + break; + case PR_PPC_DEXCR_SRAPD: + *aspect = DEXCR_PR_SRAPD; + break; + case PR_PPC_DEXCR_NPHIE: + *aspect = DEXCR_PR_NPHIE; + break; + default: + return -ENODEV; + } + + return 0; +} + +int get_dexcr_prctl(struct task_struct *task, unsigned long which) +{ + unsigned int aspect; + int ret; + + ret = prctl_to_aspect(which, &aspect); + if (ret) + return ret; + + if (aspect & DEXCR_PRCTL_EDITABLE) + ret |= PR_PPC_DEXCR_CTRL_EDITABLE; + + if (aspect & mfspr(SPRN_DEXCR)) + ret |= PR_PPC_DEXCR_CTRL_SET; + else + ret |= PR_PPC_DEXCR_CTRL_CLEAR; + + if (aspect & task->thread.dexcr_onexec) + ret |= PR_PPC_DEXCR_CTRL_SET_ONEXEC; + else + ret |= PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC; + + return ret; +} + +int set_dexcr_prctl(struct task_struct *task, unsigned long which, unsigned long ctrl) +{ + unsigned long dexcr; + unsigned int aspect; + int err = 0; + + err = prctl_to_aspect(which, &aspect); + if (err) + return err; + + if (!(aspect & DEXCR_PRCTL_EDITABLE)) + return -EPERM; + + if (ctrl & ~PR_PPC_DEXCR_CTRL_MASK) + return -EINVAL; + + if (ctrl & PR_PPC_DEXCR_CTRL_SET && ctrl & PR_PPC_DEXCR_CTRL_CLEAR) + return -EINVAL; + + if (ctrl & PR_PPC_DEXCR_CTRL_SET_ONEXEC && ctrl & PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC) + return -EINVAL; + + /* + * We do not want an unprivileged process being able to disable + * a setuid process's hash check instructions + */ + if (aspect == DEXCR_PR_NPHIE && + ctrl & PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + + dexcr = mfspr(SPRN_DEXCR); + + if (ctrl & PR_PPC_DEXCR_CTRL_SET) + dexcr |= aspect; + else if (ctrl & PR_PPC_DEXCR_CTRL_CLEAR) + dexcr &= ~aspect; + + if (ctrl & PR_PPC_DEXCR_CTRL_SET_ONEXEC) + task->thread.dexcr_onexec |= aspect; + else if (ctrl & PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC) + task->thread.dexcr_onexec &= ~aspect; + + mtspr(SPRN_DEXCR, dexcr); + + return 0; +} diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 8920862ffd79..4d64a5db50f3 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -136,7 +136,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) struct pci_dev *pdev = to_pci_dev(dev); struct pci_controller *phb = pci_bus_to_host(pdev->bus); - if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported) + if (!phb->controller_ops.iommu_bypass_supported) return false; return phb->controller_ops.iommu_bypass_supported(pdev, mask); } @@ -216,6 +216,6 @@ const struct dma_map_ops dma_iommu_ops = { .get_required_mask = dma_iommu_get_required_mask, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index af4263594eb2..3af6c06af02f 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -867,7 +867,7 @@ bool __init dt_cpu_ftrs_init(void *fdt) using_dt_cpu_ftrs = false; /* Setup and verify the FDT, if it fails we just bail */ - if (!early_init_dt_verify(fdt)) + if (!early_init_dt_verify(fdt, __pa(fdt))) return false; if (!of_scan_flat_dt(fdt_find_cpu_features, NULL)) @@ -1087,12 +1087,10 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char /* Count and allocate space for cpu features */ of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes, &nr_dt_cpu_features); - dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE); - if (!dt_cpu_features) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, - sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, - PAGE_SIZE); + dt_cpu_features = + memblock_alloc_or_panic( + sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, + PAGE_SIZE); cpufeatures_setup_start(isa); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index ab316e155ea9..ca7f7bb2b478 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -506,9 +506,18 @@ int eeh_dev_check_failure(struct eeh_dev *edev) * We will punt with the following conditions: Failure to get * PE's state, EEH not support and Permanently unavailable * state, PE is in good state. + * + * On the pSeries, after reaching the threshold, get_state might + * return EEH_STATE_NOT_SUPPORT. However, it's possible that the + * device state remains uncleared if the device is not marked + * pci_channel_io_perm_failure. Therefore, consider logging the + * event to let device removal happen. + * */ if ((ret < 0) || - (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) { + (ret == EEH_STATE_NOT_SUPPORT && + dev->error_state == pci_channel_io_perm_failure) || + eeh_state_active(ret)) { eeh_stats.false_positives++; pe->false_positives++; rc = 0; @@ -1264,22 +1273,6 @@ EXPORT_SYMBOL(eeh_dev_release); #ifdef CONFIG_IOMMU_API -static int dev_has_iommu_table(struct device *dev, void *data) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct pci_dev **ppdev = data; - - if (!dev) - return 0; - - if (device_iommu_mapped(dev)) { - *ppdev = pdev; - return 1; - } - - return 0; -} - /** * eeh_iommu_group_to_pe - Convert IOMMU group to EEH PE * @group: IOMMU group @@ -1516,6 +1509,8 @@ int eeh_pe_configure(struct eeh_pe *pe) /* Invalid PE ? */ if (!pe) return -ENODEV; + else + ret = eeh_ops->configure_bridge(pe); return ret; } @@ -1544,10 +1539,6 @@ int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func, if (!eeh_ops || !eeh_ops->err_inject) return -ENOENT; - /* Check on PCI error type */ - if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64) - return -EINVAL; - /* Check on PCI error function */ if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX) return -EINVAL; @@ -1585,6 +1576,104 @@ static int proc_eeh_show(struct seq_file *m, void *v) } #endif /* CONFIG_PROC_FS */ +static int eeh_break_device(struct pci_dev *pdev) +{ + struct resource *bar = NULL; + void __iomem *mapped; + u16 old, bit; + int i, pos; + + /* Do we have an MMIO BAR to disable? */ + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + struct resource *r = &pdev->resource[i]; + + if (!r->flags || !r->start) + continue; + if (r->flags & IORESOURCE_IO) + continue; + if (r->flags & IORESOURCE_UNSET) + continue; + + bar = r; + break; + } + + if (!bar) { + pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n"); + return -ENXIO; + } + + pci_err(pdev, "Going to break: %pR\n", bar); + + if (pdev->is_virtfn) { +#ifndef CONFIG_PCI_IOV + return -ENXIO; +#else + /* + * VFs don't have a per-function COMMAND register, so the best + * we can do is clear the Memory Space Enable bit in the PF's + * SRIOV control reg. + * + * Unfortunately, this requires that we have a PF (i.e doesn't + * work for a passed-through VF) and it has the potential side + * effect of also causing an EEH on every other VF under the + * PF. Oh well. + */ + pdev = pdev->physfn; + if (!pdev) + return -ENXIO; /* passed through VFs have no PF */ + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pos += PCI_SRIOV_CTRL; + bit = PCI_SRIOV_CTRL_MSE; +#endif /* !CONFIG_PCI_IOV */ + } else { + bit = PCI_COMMAND_MEMORY; + pos = PCI_COMMAND; + } + + /* + * Process here is: + * + * 1. Disable Memory space. + * + * 2. Perform an MMIO to the device. This should result in an error + * (CA / UR) being raised by the device which results in an EEH + * PE freeze. Using the in_8() accessor skips the eeh detection hook + * so the freeze hook so the EEH Detection machinery won't be + * triggered here. This is to match the usual behaviour of EEH + * where the HW will asynchronously freeze a PE and it's up to + * the kernel to notice and deal with it. + * + * 3. Turn Memory space back on. This is more important for VFs + * since recovery will probably fail if we don't. For normal + * the COMMAND register is reset as a part of re-initialising + * the device. + * + * Breaking stuff is the point so who cares if it's racy ;) + */ + pci_read_config_word(pdev, pos, &old); + + mapped = ioremap(bar->start, PAGE_SIZE); + if (!mapped) { + pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar); + return -ENXIO; + } + + pci_write_config_word(pdev, pos, old & ~bit); + in_8(mapped); + pci_write_config_word(pdev, pos, old); + + iounmap(mapped); + + return 0; +} + +int eeh_pe_inject_mmio_error(struct pci_dev *pdev) +{ + return eeh_break_device(pdev); +} + #ifdef CONFIG_DEBUG_FS @@ -1689,7 +1778,6 @@ static ssize_t eeh_force_recover_write(struct file *filp, static const struct file_operations eeh_force_recover_fops = { .open = simple_open, - .llseek = no_llseek, .write = eeh_force_recover_write, }; @@ -1733,104 +1821,10 @@ static ssize_t eeh_dev_check_write(struct file *filp, static const struct file_operations eeh_dev_check_fops = { .open = simple_open, - .llseek = no_llseek, .write = eeh_dev_check_write, .read = eeh_debugfs_dev_usage, }; -static int eeh_debugfs_break_device(struct pci_dev *pdev) -{ - struct resource *bar = NULL; - void __iomem *mapped; - u16 old, bit; - int i, pos; - - /* Do we have an MMIO BAR to disable? */ - for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { - struct resource *r = &pdev->resource[i]; - - if (!r->flags || !r->start) - continue; - if (r->flags & IORESOURCE_IO) - continue; - if (r->flags & IORESOURCE_UNSET) - continue; - - bar = r; - break; - } - - if (!bar) { - pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n"); - return -ENXIO; - } - - pci_err(pdev, "Going to break: %pR\n", bar); - - if (pdev->is_virtfn) { -#ifndef CONFIG_PCI_IOV - return -ENXIO; -#else - /* - * VFs don't have a per-function COMMAND register, so the best - * we can do is clear the Memory Space Enable bit in the PF's - * SRIOV control reg. - * - * Unfortunately, this requires that we have a PF (i.e doesn't - * work for a passed-through VF) and it has the potential side - * effect of also causing an EEH on every other VF under the - * PF. Oh well. - */ - pdev = pdev->physfn; - if (!pdev) - return -ENXIO; /* passed through VFs have no PF */ - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - pos += PCI_SRIOV_CTRL; - bit = PCI_SRIOV_CTRL_MSE; -#endif /* !CONFIG_PCI_IOV */ - } else { - bit = PCI_COMMAND_MEMORY; - pos = PCI_COMMAND; - } - - /* - * Process here is: - * - * 1. Disable Memory space. - * - * 2. Perform an MMIO to the device. This should result in an error - * (CA / UR) being raised by the device which results in an EEH - * PE freeze. Using the in_8() accessor skips the eeh detection hook - * so the freeze hook so the EEH Detection machinery won't be - * triggered here. This is to match the usual behaviour of EEH - * where the HW will asynchronously freeze a PE and it's up to - * the kernel to notice and deal with it. - * - * 3. Turn Memory space back on. This is more important for VFs - * since recovery will probably fail if we don't. For normal - * the COMMAND register is reset as a part of re-initialising - * the device. - * - * Breaking stuff is the point so who cares if it's racy ;) - */ - pci_read_config_word(pdev, pos, &old); - - mapped = ioremap(bar->start, PAGE_SIZE); - if (!mapped) { - pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar); - return -ENXIO; - } - - pci_write_config_word(pdev, pos, old & ~bit); - in_8(mapped); - pci_write_config_word(pdev, pos, old); - - iounmap(mapped); - - return 0; -} - static ssize_t eeh_dev_break_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos) @@ -1842,7 +1836,7 @@ static ssize_t eeh_dev_break_write(struct file *filp, if (IS_ERR(pdev)) return PTR_ERR(pdev); - ret = eeh_debugfs_break_device(pdev); + ret = eeh_break_device(pdev); pci_dev_put(pdev); if (ret < 0) @@ -1853,7 +1847,6 @@ static ssize_t eeh_dev_break_write(struct file *filp, static const struct file_operations eeh_dev_break_fops = { .open = simple_open, - .llseek = no_llseek, .write = eeh_dev_break_write, .read = eeh_debugfs_dev_usage, }; @@ -1900,7 +1893,6 @@ static ssize_t eeh_dev_can_recover(struct file *filp, static const struct file_operations eeh_dev_can_recover_fops = { .open = simple_open, - .llseek = no_llseek, .write = eeh_dev_can_recover, .read = eeh_debugfs_dev_usage, }; diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 48773d2d9be3..7efe04c68f0f 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -865,9 +865,18 @@ void eeh_handle_normal_event(struct eeh_pe *pe) devices++; if (!devices) { - pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n", + pr_warn("EEH: Frozen PHB#%x-PE#%x is empty!\n", pe->phb->global_number, pe->addr); - goto out; /* nothing to recover */ + /* + * The device is removed, tear down its state, on powernv + * hotplug driver would take care of it but not on pseries, + * permanently disable the card as it is hot removed. + * + * In the case of powernv, note that the removal of device + * is covered by pci rescan lock, so no problem even if hotplug + * driver attempts to remove the device. + */ + goto recover_failed; } /* Log the event */ diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index e0ce81279624..d283d281d28e 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -24,10 +24,10 @@ static int eeh_pe_aux_size = 0; static LIST_HEAD(eeh_phb_pe); /** - * eeh_set_pe_aux_size - Set PE auxillary data size - * @size: PE auxillary data size + * eeh_set_pe_aux_size - Set PE auxiliary data size + * @size: PE auxiliary data size in bytes * - * Set PE auxillary data size + * Set PE auxiliary data size. */ void eeh_set_pe_aux_size(int size) { @@ -527,7 +527,7 @@ EXPORT_SYMBOL_GPL(eeh_pe_state_mark); * eeh_pe_mark_isolated * @pe: EEH PE * - * Record that a PE has been isolated by marking the PE and it's children as + * Record that a PE has been isolated by marking the PE and its children as * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices * as pci_channel_io_frozen. */ @@ -849,6 +849,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) { struct eeh_dev *edev; struct pci_dev *pdev; + struct pci_bus *bus = NULL; if (pe->type & EEH_PE_PHB) return pe->phb->bus; @@ -859,9 +860,11 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) /* Retrieve the parent PCI bus of first (top) PCI device */ edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); + pci_lock_rescan_remove(); pdev = eeh_dev_to_pci_dev(edev); if (pdev) - return pdev->bus; + bus = pdev->bus; + pci_unlock_rescan_remove(); - return NULL; + return bus; } diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 7eda33a24bb4..f4a8c9877249 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -108,7 +108,7 @@ transfer_to_syscall: stw r11, 0(r1) mflr r12 stw r12, _LINK(r1) -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ #endif lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ @@ -158,9 +158,6 @@ syscall_exit_finish: 1: REST_GPR(2, r1) REST_GPR(1, r1) rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif 3: mtcr r5 lwz r4,_CTR(r1) @@ -214,7 +211,7 @@ start_kernel_thread: .globl fast_exception_return fast_exception_return: -#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) +#ifndef CONFIG_BOOKE andi. r10,r9,MSR_RI /* check for recoverable interrupt */ beq 3f /* if not, we've got problems */ #endif @@ -237,9 +234,6 @@ fast_exception_return: REST_GPR(12, r11) REST_GPR(11, r11) rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif _ASM_NOKPROBE_SYMBOL(fast_exception_return) /* aargh, a nonrecoverable interrupt, panic */ @@ -296,9 +290,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) REST_GPR(0, r1) REST_GPR(1, r1) rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif .Lrestore_nvgprs: REST_NVGPRS(r1) @@ -346,9 +337,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) REST_GPR(0, r1) REST_GPR(1, r1) rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif 1: /* * Emulate stack store with update. New r1 value was already calculated @@ -375,12 +363,9 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) mfspr r9, SPRN_SPRG_SCRATCH0 #endif rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif _ASM_NOKPROBE_SYMBOL(interrupt_return) -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) +#ifdef CONFIG_BOOKE /* * Returning from a critical interrupt in user mode doesn't need @@ -395,17 +380,6 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) * time of the critical interrupt. * */ -#ifdef CONFIG_40x -#define PPC_40x_TURN_OFF_MSR_DR \ - /* avoid any possible TLB misses here by turning off MSR.DR, we \ - * assume the instructions here are mapped by a pinned TLB entry */ \ - li r10,MSR_IR; \ - mtmsr r10; \ - isync; \ - tophys(r1, r1); -#else -#define PPC_40x_TURN_OFF_MSR_DR -#endif #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \ REST_NVGPRS(r1); \ @@ -423,7 +397,6 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) mtlr r11; \ lwz r10,_CCR(r1); \ mtcrf 0xff,r10; \ - PPC_40x_TURN_OFF_MSR_DR; \ lwz r9,_DEAR(r1); \ lwz r10,_ESR(r1); \ mtspr SPRN_DEAR,r9; \ @@ -471,20 +444,6 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) #define RESTORE_MMU_REGS #endif -#ifdef CONFIG_40x - .globl ret_from_crit_exc -ret_from_crit_exc: - lis r9,crit_srr0@ha; - lwz r9,crit_srr0@l(r9); - lis r10,crit_srr1@ha; - lwz r10,crit_srr1@l(r10); - mtspr SPRN_SRR0,r9; - mtspr SPRN_SRR1,r10; - RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) -_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) -#endif /* CONFIG_40x */ - -#ifdef CONFIG_BOOKE .globl ret_from_crit_exc ret_from_crit_exc: RESTORE_xSRR(SRR0,SRR1); @@ -509,4 +468,3 @@ ret_from_mcheck_exc: RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI) _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) #endif /* CONFIG_BOOKE */ -#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S index 1a9b5ae8ccb2..6a414ed5a411 100644 --- a/arch/powerpc/kernel/epapr_hcalls.S +++ b/arch/powerpc/kernel/epapr_hcalls.S @@ -21,7 +21,7 @@ _GLOBAL(epapr_ev_idle) ori r4, r4,_TLF_NAPPING /* so when we take an exception */ PPC_STL r4, TI_LOCAL_FLAGS(r2) /* it will return to our caller */ -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE wrteei 1 #else mfmsr r4 diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index d4b8aff20815..247ab2acaccc 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -9,7 +9,7 @@ #include <linux/of_fdt.h> #include <asm/epapr_hcalls.h> #include <asm/cacheflush.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/machdep.h> #include <asm/inst.h> diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index dcf0591ad3c2..63f6b9f513a4 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -485,8 +485,8 @@ interrupt_base_book3e: /* fake trap */ EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */ EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */ EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ - EXCEPTION_STUB(0x1c0, data_tlb_miss) - EXCEPTION_STUB(0x1e0, instruction_tlb_miss) + EXCEPTION_STUB(0x1c0, data_tlb_miss_bolted) + EXCEPTION_STUB(0x1e0, instruction_tlb_miss_bolted) EXCEPTION_STUB(0x200, altivec_unavailable) EXCEPTION_STUB(0x220, altivec_assist) EXCEPTION_STUB(0x260, perfmon) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index eaf2f167c342..b7229430ca94 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1989,13 +1989,6 @@ INT_DEFINE_END(system_call) INTERRUPT_TO_KERNEL #endif -#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH -BEGIN_FTR_SECTION - cmpdi r0,0x1ebe - beq- 1f -END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) -#endif - /* We reach here with PACA in r13, r13 in r9. */ mfspr r11,SPRN_SRR0 mfspr r12,SPRN_SRR1 @@ -2015,16 +2008,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) b system_call_common #endif .endif - -#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH - /* Fast LE/BE switch system call */ -1: mfspr r12,SPRN_SRR1 - xori r12,r12,MSR_LE - mtspr SPRN_SRR1,r12 - mr r13,r9 - RFI_TO_USER /* return to userspace */ - b . /* prevent speculative execution */ -#endif .endm EXC_REAL_BEGIN(system_call, 0xc00, 0x100) @@ -2554,27 +2537,8 @@ EXC_REAL_NONE(0x1000, 0x100) EXC_VIRT_NONE(0x5000, 0x100) EXC_REAL_NONE(0x1100, 0x100) EXC_VIRT_NONE(0x5100, 0x100) - -#ifdef CONFIG_CBE_RAS -INT_DEFINE_BEGIN(cbe_system_error) - IVEC=0x1200 - IHSRR=1 -INT_DEFINE_END(cbe_system_error) - -EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100) - GEN_INT_ENTRY cbe_system_error, virt=0 -EXC_REAL_END(cbe_system_error, 0x1200, 0x100) -EXC_VIRT_NONE(0x5200, 0x100) -EXC_COMMON_BEGIN(cbe_system_error_common) - GEN_COMMON cbe_system_error - addi r3,r1,STACK_INT_FRAME_REGS - bl CFUNC(cbe_system_error_exception) - b interrupt_return_hsrr - -#else /* CONFIG_CBE_RAS */ EXC_REAL_NONE(0x1200, 0x100) EXC_VIRT_NONE(0x5200, 0x100) -#endif /** * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt. @@ -2725,26 +2689,8 @@ EXC_COMMON_BEGIN(denorm_exception_common) b interrupt_return_hsrr -#ifdef CONFIG_CBE_RAS -INT_DEFINE_BEGIN(cbe_maintenance) - IVEC=0x1600 - IHSRR=1 -INT_DEFINE_END(cbe_maintenance) - -EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100) - GEN_INT_ENTRY cbe_maintenance, virt=0 -EXC_REAL_END(cbe_maintenance, 0x1600, 0x100) -EXC_VIRT_NONE(0x5600, 0x100) -EXC_COMMON_BEGIN(cbe_maintenance_common) - GEN_COMMON cbe_maintenance - addi r3,r1,STACK_INT_FRAME_REGS - bl CFUNC(cbe_maintenance_exception) - b interrupt_return_hsrr - -#else /* CONFIG_CBE_RAS */ EXC_REAL_NONE(0x1600, 0x100) EXC_VIRT_NONE(0x5600, 0x100) -#endif INT_DEFINE_BEGIN(altivec_assist) @@ -2772,26 +2718,8 @@ EXC_COMMON_BEGIN(altivec_assist_common) b interrupt_return_srr -#ifdef CONFIG_CBE_RAS -INT_DEFINE_BEGIN(cbe_thermal) - IVEC=0x1800 - IHSRR=1 -INT_DEFINE_END(cbe_thermal) - -EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100) - GEN_INT_ENTRY cbe_thermal, virt=0 -EXC_REAL_END(cbe_thermal, 0x1800, 0x100) -EXC_VIRT_NONE(0x5800, 0x100) -EXC_COMMON_BEGIN(cbe_thermal_common) - GEN_COMMON cbe_thermal - addi r3,r1,STACK_INT_FRAME_REGS - bl CFUNC(cbe_thermal_exception) - b interrupt_return_hsrr - -#else /* CONFIG_CBE_RAS */ EXC_REAL_NONE(0x1800, 0x100) EXC_VIRT_NONE(0x5800, 0x100) -#endif #ifdef CONFIG_PPC_WATCHDOG diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index d14eda1e8589..8ca49e40c473 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -33,6 +33,7 @@ #include <asm/fadump-internal.h> #include <asm/setup.h> #include <asm/interrupt.h> +#include <asm/prom.h> /* * The CPU who acquired the lock to trigger the fadump crash should @@ -53,8 +54,6 @@ static struct kobject *fadump_kobj; static atomic_t cpus_in_fadump; static DEFINE_MUTEX(fadump_mutex); -static struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false }; - #define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */ #define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \ sizeof(struct fadump_memory_range)) @@ -80,26 +79,38 @@ static struct cma *fadump_cma; * But for some reason even if it fails we still have the memory reservation * with us and we can still continue doing fadump. */ -static int __init fadump_cma_init(void) +void __init fadump_cma_init(void) { - unsigned long long base, size; + unsigned long long base, size, end; int rc; - if (!fw_dump.fadump_enabled) - return 0; - + if (!fw_dump.fadump_supported || !fw_dump.fadump_enabled || + fw_dump.dump_active) + return; /* * Do not use CMA if user has provided fadump=nocma kernel parameter. - * Return 1 to continue with fadump old behaviour. */ - if (fw_dump.nocma) - return 1; + if (fw_dump.nocma || !fw_dump.boot_memory_size) + return; + /* + * [base, end) should be reserved during early init in + * fadump_reserve_mem(). No need to check this here as + * cma_init_reserved_mem() already checks for overlap. + * Here we give the aligned chunk of this reserved memory to CMA. + */ base = fw_dump.reserve_dump_area_start; size = fw_dump.boot_memory_size; + end = base + size; - if (!size) - return 0; + base = ALIGN(base, CMA_MIN_ALIGNMENT_BYTES); + end = ALIGN_DOWN(end, CMA_MIN_ALIGNMENT_BYTES); + size = end - base; + + if (end <= base) { + pr_warn("%s: Too less memory to give to CMA\n", __func__); + return; + } rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma); if (rc) { @@ -110,7 +121,7 @@ static int __init fadump_cma_init(void) * blocked from production system usage. Hence return 1, * so that we can continue with fadump. */ - return 1; + return; } /* @@ -122,17 +133,50 @@ static int __init fadump_cma_init(void) /* * So we now have successfully initialized cma area for fadump. */ - pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx " + pr_info("Initialized [0x%llx, %luMB] cma area from [0x%lx, %luMB] " "bytes of memory reserved for firmware-assisted dump\n", - cma_get_size(fadump_cma), - (unsigned long)cma_get_base(fadump_cma) >> 20, - fw_dump.reserve_dump_area_size); - return 1; + cma_get_base(fadump_cma), cma_get_size(fadump_cma) >> 20, + fw_dump.reserve_dump_area_start, + fw_dump.boot_memory_size >> 20); + return; } -#else -static int __init fadump_cma_init(void) { return 1; } #endif /* CONFIG_CMA */ +/* + * Additional parameters meant for capture kernel are placed in a dedicated area. + * If this is capture kernel boot, append these parameters to bootargs. + */ +void __init fadump_append_bootargs(void) +{ + char *append_args; + size_t len; + + if (!fw_dump.dump_active || !fw_dump.param_area_supported || !fw_dump.param_area) + return; + + if (fw_dump.param_area < fw_dump.boot_mem_top) { + if (memblock_reserve(fw_dump.param_area, COMMAND_LINE_SIZE)) { + pr_warn("WARNING: Can't use additional parameters area!\n"); + fw_dump.param_area = 0; + return; + } + } + + append_args = (char *)fw_dump.param_area; + len = strlen(boot_command_line); + + /* + * Too late to fail even if cmdline size exceeds. Truncate additional parameters + * to cmdline size and proceed anyway. + */ + if (len + strlen(append_args) >= COMMAND_LINE_SIZE - 1) + pr_warn("WARNING: Appending parameters exceeds cmdline size. Truncating!\n"); + + pr_debug("Cmdline: %s\n", boot_command_line); + snprintf(boot_command_line + len, COMMAND_LINE_SIZE - len, " %s", append_args); + pr_info("Updated cmdline: %s\n", boot_command_line); +} + /* Scan the Firmware Assisted dump configuration details. */ int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data) @@ -223,28 +267,6 @@ static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end) } /* - * Returns true, if there are no holes in boot memory area, - * false otherwise. - */ -bool is_fadump_boot_mem_contiguous(void) -{ - unsigned long d_start, d_end; - bool ret = false; - int i; - - for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { - d_start = fw_dump.boot_mem_addr[i]; - d_end = d_start + fw_dump.boot_mem_sz[i]; - - ret = is_fadump_mem_area_contiguous(d_start, d_end); - if (!ret) - break; - } - - return ret; -} - -/* * Returns true, if there are no holes in reserved memory area, * false otherwise. */ @@ -268,10 +290,8 @@ static void __init fadump_show_config(void) if (!fw_dump.fadump_supported) return; - pr_debug("Fadump enabled : %s\n", - (fw_dump.fadump_enabled ? "yes" : "no")); - pr_debug("Dump Active : %s\n", - (fw_dump.dump_active ? "yes" : "no")); + pr_debug("Fadump enabled : %s\n", str_yes_no(fw_dump.fadump_enabled)); + pr_debug("Dump Active : %s\n", str_yes_no(fw_dump.dump_active)); pr_debug("Dump section sizes:\n"); pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size); pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size); @@ -373,12 +393,6 @@ static unsigned long __init get_fadump_area_size(void) size = PAGE_ALIGN(size); size += fw_dump.boot_memory_size; size += sizeof(struct fadump_crash_info_header); - size += sizeof(struct elfhdr); /* ELF core header.*/ - size += sizeof(struct elf_phdr); /* place holder for cpu notes */ - /* Program headers for crash memory regions. */ - size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2); - - size = PAGE_ALIGN(size); /* This is to hold kernel metadata on platforms that support it */ size += (fw_dump.ops->fadump_get_metadata_size ? @@ -389,10 +403,11 @@ static unsigned long __init get_fadump_area_size(void) static int __init add_boot_mem_region(unsigned long rstart, unsigned long rsize) { + int max_boot_mem_rgns = fw_dump.ops->fadump_max_boot_mem_rgns(); int i = fw_dump.boot_mem_regs_cnt++; - if (fw_dump.boot_mem_regs_cnt > FADUMP_MAX_MEM_REGS) { - fw_dump.boot_mem_regs_cnt = FADUMP_MAX_MEM_REGS; + if (fw_dump.boot_mem_regs_cnt > max_boot_mem_rgns) { + fw_dump.boot_mem_regs_cnt = max_boot_mem_rgns; return 0; } @@ -552,13 +567,6 @@ int __init fadump_reserve_mem(void) if (!fw_dump.dump_active) { fw_dump.boot_memory_size = PAGE_ALIGN(fadump_calculate_reserve_size()); -#ifdef CONFIG_CMA - if (!fw_dump.nocma) { - fw_dump.boot_memory_size = - ALIGN(fw_dump.boot_memory_size, - CMA_MIN_ALIGNMENT_BYTES); - } -#endif bootmem_min = fw_dump.ops->fadump_get_bootmem_min(); if (fw_dump.boot_memory_size < bootmem_min) { @@ -573,22 +581,6 @@ int __init fadump_reserve_mem(void) } } - /* - * Calculate the memory boundary. - * If memory_limit is less than actual memory boundary then reserve - * the memory for fadump beyond the memory_limit and adjust the - * memory_limit accordingly, so that the running kernel can run with - * specified memory_limit. - */ - if (memory_limit && memory_limit < memblock_end_of_DRAM()) { - size = get_fadump_area_size(); - if ((memory_limit + size) < memblock_end_of_DRAM()) - memory_limit += size; - else - memory_limit = memblock_end_of_DRAM(); - printk(KERN_INFO "Adjusted memory_limit for firmware-assisted" - " dump, now %#016llx\n", memory_limit); - } if (memory_limit) mem_boundary = memory_limit; else @@ -647,8 +639,6 @@ int __init fadump_reserve_mem(void) pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n", (size >> 20), base, (memblock_phys_mem_size() >> 20)); - - ret = fadump_cma_init(); } return ret; @@ -705,7 +695,7 @@ void crash_fadump(struct pt_regs *regs, const char *str) * old_cpu == -1 means this is the first CPU which has come here, * go ahead and trigger fadump. * - * old_cpu != -1 means some other CPU has already on it's way + * old_cpu != -1 means some other CPU has already on its way * to trigger fadump, just keep looping here. */ this_cpu = smp_processor_id(); @@ -760,7 +750,7 @@ u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) * prstatus.pr_pid = ???? */ elf_core_copy_regs(&prstatus.pr_reg, regs); - buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS, + buf = append_elf_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); return buf; } @@ -931,36 +921,6 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, return 0; } -static int fadump_exclude_reserved_area(u64 start, u64 end) -{ - u64 ra_start, ra_end; - int ret = 0; - - ra_start = fw_dump.reserve_dump_area_start; - ra_end = ra_start + fw_dump.reserve_dump_area_size; - - if ((ra_start < end) && (ra_end > start)) { - if ((start < ra_start) && (end > ra_end)) { - ret = fadump_add_mem_range(&crash_mrange_info, - start, ra_start); - if (ret) - return ret; - - ret = fadump_add_mem_range(&crash_mrange_info, - ra_end, end); - } else if (start < ra_start) { - ret = fadump_add_mem_range(&crash_mrange_info, - start, ra_start); - } else if (ra_end < end) { - ret = fadump_add_mem_range(&crash_mrange_info, - ra_end, end); - } - } else - ret = fadump_add_mem_range(&crash_mrange_info, start, end); - - return ret; -} - static int fadump_init_elfcore_header(char *bufp) { struct elfhdr *elf; @@ -998,52 +958,6 @@ static int fadump_init_elfcore_header(char *bufp) } /* - * Traverse through memblock structure and setup crash memory ranges. These - * ranges will be used create PT_LOAD program headers in elfcore header. - */ -static int fadump_setup_crash_memory_ranges(void) -{ - u64 i, start, end; - int ret; - - pr_debug("Setup crash memory ranges.\n"); - crash_mrange_info.mem_range_cnt = 0; - - /* - * Boot memory region(s) registered with firmware are moved to - * different location at the time of crash. Create separate program - * header(s) for this memory chunk(s) with the correct offset. - */ - for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { - start = fw_dump.boot_mem_addr[i]; - end = start + fw_dump.boot_mem_sz[i]; - ret = fadump_add_mem_range(&crash_mrange_info, start, end); - if (ret) - return ret; - } - - for_each_mem_range(i, &start, &end) { - /* - * skip the memory chunk that is already added - * (0 through boot_memory_top). - */ - if (start < fw_dump.boot_mem_top) { - if (end > fw_dump.boot_mem_top) - start = fw_dump.boot_mem_top; - else - continue; - } - - /* add this range excluding the reserved dump area. */ - ret = fadump_exclude_reserved_area(start, end); - if (ret) - return ret; - } - - return 0; -} - -/* * If the given physical address falls within the boot memory region then * return the relocated address that points to the dump region reserved * for saving initial boot memory contents. @@ -1073,36 +987,50 @@ static inline unsigned long fadump_relocate(unsigned long paddr) return raddr; } -static int fadump_create_elfcore_headers(char *bufp) +static void __init populate_elf_pt_load(struct elf_phdr *phdr, u64 start, + u64 size, unsigned long long offset) { - unsigned long long raddr, offset; - struct elf_phdr *phdr; + phdr->p_align = 0; + phdr->p_memsz = size; + phdr->p_filesz = size; + phdr->p_paddr = start; + phdr->p_offset = offset; + phdr->p_type = PT_LOAD; + phdr->p_flags = PF_R|PF_W|PF_X; + phdr->p_vaddr = (unsigned long)__va(start); +} + +static void __init fadump_populate_elfcorehdr(struct fadump_crash_info_header *fdh) +{ + char *bufp; struct elfhdr *elf; - int i, j; + struct elf_phdr *phdr; + u64 boot_mem_dest_offset; + unsigned long long i, ra_start, ra_end, ra_size, mstart, mend; + bufp = (char *) fw_dump.elfcorehdr_addr; fadump_init_elfcore_header(bufp); elf = (struct elfhdr *)bufp; bufp += sizeof(struct elfhdr); /* - * setup ELF PT_NOTE, place holder for cpu notes info. The notes info - * will be populated during second kernel boot after crash. Hence - * this PT_NOTE will always be the first elf note. + * Set up ELF PT_NOTE, a placeholder for CPU notes information. + * The notes info will be populated later by platform-specific code. + * Hence, this PT_NOTE will always be the first ELF note. * * NOTE: Any new ELF note addition should be placed after this note. */ phdr = (struct elf_phdr *)bufp; bufp += sizeof(struct elf_phdr); phdr->p_type = PT_NOTE; - phdr->p_flags = 0; - phdr->p_vaddr = 0; - phdr->p_align = 0; - - phdr->p_offset = 0; - phdr->p_paddr = 0; - phdr->p_filesz = 0; - phdr->p_memsz = 0; - + phdr->p_flags = 0; + phdr->p_vaddr = 0; + phdr->p_align = 0; + phdr->p_offset = 0; + phdr->p_paddr = 0; + phdr->p_filesz = 0; + phdr->p_memsz = 0; + /* Increment number of program headers. */ (elf->e_phnum)++; /* setup ELF PT_NOTE for vmcoreinfo */ @@ -1112,55 +1040,66 @@ static int fadump_create_elfcore_headers(char *bufp) phdr->p_flags = 0; phdr->p_vaddr = 0; phdr->p_align = 0; - - phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note()); - phdr->p_offset = phdr->p_paddr; - phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE; - + phdr->p_paddr = phdr->p_offset = fdh->vmcoreinfo_raddr; + phdr->p_memsz = phdr->p_filesz = fdh->vmcoreinfo_size; /* Increment number of program headers. */ (elf->e_phnum)++; - /* setup PT_LOAD sections. */ - j = 0; - offset = 0; - raddr = fw_dump.boot_mem_addr[0]; - for (i = 0; i < crash_mrange_info.mem_range_cnt; i++) { - u64 mbase, msize; - - mbase = crash_mrange_info.mem_ranges[i].base; - msize = crash_mrange_info.mem_ranges[i].size; - if (!msize) - continue; - + /* + * Setup PT_LOAD sections. first include boot memory regions + * and then add rest of the memory regions. + */ + boot_mem_dest_offset = fw_dump.boot_mem_dest_addr; + for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) { phdr = (struct elf_phdr *)bufp; bufp += sizeof(struct elf_phdr); - phdr->p_type = PT_LOAD; - phdr->p_flags = PF_R|PF_W|PF_X; - phdr->p_offset = mbase; - - if (mbase == raddr) { - /* - * The entire real memory region will be moved by - * firmware to the specified destination_address. - * Hence set the correct offset. - */ - phdr->p_offset = fw_dump.boot_mem_dest_addr + offset; - if (j < (fw_dump.boot_mem_regs_cnt - 1)) { - offset += fw_dump.boot_mem_sz[j]; - raddr = fw_dump.boot_mem_addr[++j]; - } + populate_elf_pt_load(phdr, fw_dump.boot_mem_addr[i], + fw_dump.boot_mem_sz[i], + boot_mem_dest_offset); + /* Increment number of program headers. */ + (elf->e_phnum)++; + boot_mem_dest_offset += fw_dump.boot_mem_sz[i]; + } + + /* Memory reserved for fadump in first kernel */ + ra_start = fw_dump.reserve_dump_area_start; + ra_size = get_fadump_area_size(); + ra_end = ra_start + ra_size; + + phdr = (struct elf_phdr *)bufp; + for_each_mem_range(i, &mstart, &mend) { + /* Boot memory regions already added, skip them now */ + if (mstart < fw_dump.boot_mem_top) { + if (mend > fw_dump.boot_mem_top) + mstart = fw_dump.boot_mem_top; + else + continue; } - phdr->p_paddr = mbase; - phdr->p_vaddr = (unsigned long)__va(mbase); - phdr->p_filesz = msize; - phdr->p_memsz = msize; - phdr->p_align = 0; + /* Handle memblock regions overlaps with fadump reserved area */ + if ((ra_start < mend) && (ra_end > mstart)) { + if ((mstart < ra_start) && (mend > ra_end)) { + populate_elf_pt_load(phdr, mstart, ra_start - mstart, mstart); + /* Increment number of program headers. */ + (elf->e_phnum)++; + bufp += sizeof(struct elf_phdr); + phdr = (struct elf_phdr *)bufp; + populate_elf_pt_load(phdr, ra_end, mend - ra_end, ra_end); + } else if (mstart < ra_start) { + populate_elf_pt_load(phdr, mstart, ra_start - mstart, mstart); + } else if (ra_end < mend) { + populate_elf_pt_load(phdr, ra_end, mend - ra_end, ra_end); + } + } else { + /* No overlap with fadump reserved memory region */ + populate_elf_pt_load(phdr, mstart, mend - mstart, mstart); + } /* Increment number of program headers. */ (elf->e_phnum)++; + bufp += sizeof(struct elf_phdr); + phdr = (struct elf_phdr *) bufp; } - return 0; } static unsigned long init_fadump_header(unsigned long addr) @@ -1175,14 +1114,25 @@ static unsigned long init_fadump_header(unsigned long addr) memset(fdh, 0, sizeof(struct fadump_crash_info_header)); fdh->magic_number = FADUMP_CRASH_INFO_MAGIC; - fdh->elfcorehdr_addr = addr; + fdh->version = FADUMP_HEADER_VERSION; /* We will set the crashing cpu id in crash_fadump() during crash. */ fdh->crashing_cpu = FADUMP_CPU_UNKNOWN; + + /* + * The physical address and size of vmcoreinfo are required in the + * second kernel to prepare elfcorehdr. + */ + fdh->vmcoreinfo_raddr = fadump_relocate(paddr_vmcoreinfo_note()); + fdh->vmcoreinfo_size = VMCOREINFO_NOTE_SIZE; + + + fdh->pt_regs_sz = sizeof(struct pt_regs); /* * When LPAR is terminated by PYHP, ensure all possible CPUs' * register data is processed while exporting the vmcore. */ fdh->cpu_mask = *cpu_possible_mask; + fdh->cpu_mask_sz = sizeof(struct cpumask); return addr; } @@ -1190,8 +1140,6 @@ static unsigned long init_fadump_header(unsigned long addr) static int register_fadump(void) { unsigned long addr; - void *vaddr; - int ret; /* * If no memory is reserved then we can not register for firmware- @@ -1200,18 +1148,10 @@ static int register_fadump(void) if (!fw_dump.reserve_dump_area_size) return -ENODEV; - ret = fadump_setup_crash_memory_ranges(); - if (ret) - return ret; - addr = fw_dump.fadumphdr_addr; /* Initialize fadump crash info header. */ addr = init_fadump_header(addr); - vaddr = __va(addr); - - pr_debug("Creating ELF core headers at %#016lx\n", addr); - fadump_create_elfcore_headers(vaddr); /* register the future kernel dump with firmware. */ pr_debug("Registering for firmware-assisted kernel dump...\n"); @@ -1230,7 +1170,6 @@ void fadump_cleanup(void) } else if (fw_dump.dump_registered) { /* Un-register Firmware-assisted dump if it was registered. */ fw_dump.ops->fadump_unregister(&fw_dump); - fadump_free_mem_ranges(&crash_mrange_info); } if (fw_dump.ops->fadump_cleanup) @@ -1416,6 +1355,22 @@ static void fadump_release_memory(u64 begin, u64 end) fadump_release_reserved_area(tstart, end); } +static void fadump_free_elfcorehdr_buf(void) +{ + if (fw_dump.elfcorehdr_addr == 0 || fw_dump.elfcorehdr_size == 0) + return; + + /* + * Before freeing the memory of `elfcorehdr`, reset the global + * `elfcorehdr_addr` to prevent modules like `vmcore` from accessing + * invalid memory. + */ + elfcorehdr_addr = ELFCORE_ADDR_ERR; + fadump_free_buffer(fw_dump.elfcorehdr_addr, fw_dump.elfcorehdr_size); + fw_dump.elfcorehdr_addr = 0; + fw_dump.elfcorehdr_size = 0; +} + static void fadump_invalidate_release_mem(void) { mutex_lock(&fadump_mutex); @@ -1427,6 +1382,7 @@ static void fadump_invalidate_release_mem(void) fadump_cleanup(); mutex_unlock(&fadump_mutex); + fadump_free_elfcorehdr_buf(); fadump_release_memory(fw_dump.boot_mem_top, memblock_end_of_DRAM()); fadump_free_cpu_notes_buf(); @@ -1484,6 +1440,18 @@ static ssize_t enabled_show(struct kobject *kobj, return sprintf(buf, "%d\n", fw_dump.fadump_enabled); } +/* + * /sys/kernel/fadump/hotplug_ready sysfs node returns 1, which inidcates + * to usersapce that fadump re-registration is not required on memory + * hotplug events. + */ +static ssize_t hotplug_ready_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", 1); +} + static ssize_t mem_reserved_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -1498,6 +1466,43 @@ static ssize_t registered_show(struct kobject *kobj, return sprintf(buf, "%d\n", fw_dump.dump_registered); } +static ssize_t bootargs_append_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", (char *)__va(fw_dump.param_area)); +} + +static ssize_t bootargs_append_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + char *params; + + if (!fw_dump.fadump_enabled || fw_dump.dump_active) + return -EPERM; + + if (count >= COMMAND_LINE_SIZE) + return -EINVAL; + + /* + * Fail here instead of handling this scenario with + * some silly workaround in capture kernel. + */ + if (saved_command_line_len + count >= COMMAND_LINE_SIZE) { + pr_err("Appending parameters exceeds cmdline size!\n"); + return -ENOSPC; + } + + params = __va(fw_dump.param_area); + strscpy_pad(params, buf, COMMAND_LINE_SIZE); + /* Remove newline character at the end. */ + if (params[count-1] == '\n') + params[count-1] = '\0'; + + return count; +} + static ssize_t registered_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) @@ -1556,11 +1561,14 @@ static struct kobj_attribute release_attr = __ATTR_WO(release_mem); static struct kobj_attribute enable_attr = __ATTR_RO(enabled); static struct kobj_attribute register_attr = __ATTR_RW(registered); static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved); +static struct kobj_attribute hotplug_ready_attr = __ATTR_RO(hotplug_ready); +static struct kobj_attribute bootargs_append_attr = __ATTR_RW(bootargs_append); static struct attribute *fadump_attrs[] = { &enable_attr.attr, ®ister_attr.attr, &mem_reserved_attr.attr, + &hotplug_ready_attr.attr, NULL, }; @@ -1578,6 +1586,12 @@ static void __init fadump_init_files(void) return; } + if (fw_dump.param_area) { + rc = sysfs_create_file(fadump_kobj, &bootargs_append_attr.attr); + if (rc) + pr_err("unable to create bootargs_append sysfs file (%d)\n", rc); + } + debugfs_create_file("fadump_region", 0444, arch_debugfs_dir, NULL, &fadump_region_fops); @@ -1632,6 +1646,150 @@ static void __init fadump_init_files(void) return; } +static int __init fadump_setup_elfcorehdr_buf(void) +{ + int elf_phdr_cnt; + unsigned long elfcorehdr_size; + + /* + * Program header for CPU notes comes first, followed by one for + * vmcoreinfo, and the remaining program headers correspond to + * memory regions. + */ + elf_phdr_cnt = 2 + fw_dump.boot_mem_regs_cnt + memblock_num_regions(memory); + elfcorehdr_size = sizeof(struct elfhdr) + (elf_phdr_cnt * sizeof(struct elf_phdr)); + elfcorehdr_size = PAGE_ALIGN(elfcorehdr_size); + + fw_dump.elfcorehdr_addr = (u64)fadump_alloc_buffer(elfcorehdr_size); + if (!fw_dump.elfcorehdr_addr) { + pr_err("Failed to allocate %lu bytes for elfcorehdr\n", + elfcorehdr_size); + return -ENOMEM; + } + fw_dump.elfcorehdr_size = elfcorehdr_size; + return 0; +} + +/* + * Check if the fadump header of crashed kernel is compatible with fadump kernel. + * + * It checks the magic number, endianness, and size of non-primitive type + * members of fadump header to ensure safe dump collection. + */ +static bool __init is_fadump_header_compatible(struct fadump_crash_info_header *fdh) +{ + if (fdh->magic_number == FADUMP_CRASH_INFO_MAGIC_OLD) { + pr_err("Old magic number, can't process the dump.\n"); + return false; + } + + if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { + if (fdh->magic_number == swab64(FADUMP_CRASH_INFO_MAGIC)) + pr_err("Endianness mismatch between the crashed and fadump kernels.\n"); + else + pr_err("Fadump header is corrupted.\n"); + + return false; + } + + /* + * Dump collection is not safe if the size of non-primitive type members + * of the fadump header do not match between crashed and fadump kernel. + */ + if (fdh->pt_regs_sz != sizeof(struct pt_regs) || + fdh->cpu_mask_sz != sizeof(struct cpumask)) { + pr_err("Fadump header size mismatch.\n"); + return false; + } + + return true; +} + +static void __init fadump_process(void) +{ + struct fadump_crash_info_header *fdh; + + fdh = (struct fadump_crash_info_header *) __va(fw_dump.fadumphdr_addr); + if (!fdh) { + pr_err("Crash info header is empty.\n"); + goto err_out; + } + + /* Avoid processing the dump if fadump header isn't compatible */ + if (!is_fadump_header_compatible(fdh)) + goto err_out; + + /* Allocate buffer for elfcorehdr */ + if (fadump_setup_elfcorehdr_buf()) + goto err_out; + + fadump_populate_elfcorehdr(fdh); + + /* Let platform update the CPU notes in elfcorehdr */ + if (fw_dump.ops->fadump_process(&fw_dump) < 0) + goto err_out; + + /* + * elfcorehdr is now ready to be exported. + * + * set elfcorehdr_addr so that vmcore module will export the + * elfcorehdr through '/proc/vmcore'. + */ + elfcorehdr_addr = virt_to_phys((void *)fw_dump.elfcorehdr_addr); + return; + +err_out: + fadump_invalidate_release_mem(); +} + +/* + * Reserve memory to store additional parameters to be passed + * for fadump/capture kernel. + */ +void __init fadump_setup_param_area(void) +{ + phys_addr_t range_start, range_end; + + if (!fw_dump.param_area_supported || fw_dump.dump_active) + return; + + /* This memory can't be used by PFW or bootloader as it is shared across kernels */ + if (early_radix_enabled()) { + /* + * Anywhere in the upper half should be good enough as all memory + * is accessible in real mode. + */ + range_start = memblock_end_of_DRAM() / 2; + range_end = memblock_end_of_DRAM(); + } else { + /* + * Memory range for passing additional parameters for HASH MMU + * must meet the following conditions: + * 1. The first memory block size must be higher than the + * minimum RMA (MIN_RMA) size. Bootloader can use memory + * upto RMA size. So it should be avoided. + * 2. The range should be between MIN_RMA and RMA size (ppc64_rma_size) + * 3. It must not overlap with the fadump reserved area. + */ + if (ppc64_rma_size < MIN_RMA*1024*1024) + return; + + range_start = MIN_RMA * 1024 * 1024; + range_end = min(ppc64_rma_size, fw_dump.boot_mem_top); + } + + fw_dump.param_area = memblock_phys_alloc_range(COMMAND_LINE_SIZE, + COMMAND_LINE_SIZE, + range_start, + range_end); + if (!fw_dump.param_area) { + pr_warn("WARNING: Could not setup area to pass additional parameters!\n"); + return; + } + + memset((void *)fw_dump.param_area, 0, COMMAND_LINE_SIZE); +} + /* * Prepare for firmware-assisted dump. */ @@ -1651,12 +1809,7 @@ int __init setup_fadump(void) * saving it to the disk. */ if (fw_dump.dump_active) { - /* - * if dump process fails then invalidate the registration - * and release memory before proceeding for re-registration. - */ - if (fw_dump.ops->fadump_process(&fw_dump) < 0) - fadump_invalidate_release_mem(); + fadump_process(); } /* Initialize the kernel dump memory structure and register with f/w */ else if (fw_dump.reserve_dump_area_size) { @@ -1735,8 +1888,3 @@ static void __init fadump_reserve_crash_area(u64 base) memblock_reserve(mstart, msize); } } - -unsigned long __init arch_reserved_kernel_pages(void) -{ - return memblock_reserved_size() / PAGE_SIZE; -} diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index f8e2911478a7..9cba7dbf58dd 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -21,17 +21,9 @@ mtspr SPRN_SPRG_SCRATCH1,r11 mfspr r10, SPRN_SPRG_THREAD .if \handle_dar_dsisr -#ifdef CONFIG_40x - mfspr r11, SPRN_DEAR -#else mfspr r11, SPRN_DAR -#endif stw r11, DAR(r10) -#ifdef CONFIG_40x - mfspr r11, SPRN_ESR -#else mfspr r11, SPRN_DSISR -#endif stw r11, DSISR(r10) .endif mfspr r11, SPRN_SRR0 @@ -96,9 +88,7 @@ .endif lwz r9, SRR1(r12) lwz r12, SRR0(r12) -#ifdef CONFIG_40x - rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ -#elif defined(CONFIG_PPC_8xx) +#ifdef CONFIG_PPC_8xx mtspr SPRN_EID, r2 /* Set MSR_RI */ #else li r10, MSR_KERNEL /* can take exceptions */ diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S deleted file mode 100644 index 9fc90410b385..000000000000 --- a/arch/powerpc/kernel/head_40x.S +++ /dev/null @@ -1,721 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> - * Initial PowerPC version. - * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> - * Rewritten for PReP - * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> - * Low-level exception handers, MMU support, and rewrite. - * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> - * PowerPC 8xx modifications. - * Copyright (c) 1998-1999 TiVo, Inc. - * PowerPC 403GCX modifications. - * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> - * PowerPC 403GCX/405GP modifications. - * Copyright 2000 MontaVista Software Inc. - * PPC405 modifications - * PowerPC 403GCX/405GP modifications. - * Author: MontaVista Software, Inc. - * frank_rowand@mvista.com or source@mvista.com - * debbie_chu@mvista.com - * - * Module name: head_4xx.S - * - * Description: - * Kernel execution entry point code. - */ - -#include <linux/init.h> -#include <linux/pgtable.h> -#include <linux/sizes.h> -#include <linux/linkage.h> - -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/mmu.h> -#include <asm/cputable.h> -#include <asm/thread_info.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> -#include <asm/ptrace.h> - -#include "head_32.h" - -/* As with the other PowerPC ports, it is expected that when code - * execution begins here, the following registers contain valid, yet - * optional, information: - * - * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) - * r4 - Starting address of the init RAM disk - * r5 - Ending address of the init RAM disk - * r6 - Start of kernel command line string (e.g. "mem=96m") - * r7 - End of kernel command line string - * - * This is all going to change RSN when we add bi_recs....... -- Dan - */ - __HEAD -_GLOBAL(_stext); -_GLOBAL(_start); - - mr r31,r3 /* save device tree ptr */ - - /* We have to turn on the MMU right away so we get cache modes - * set correctly. - */ - bl initial_mmu - -/* We now have the lower 16 Meg mapped into TLB entries, and the caches - * ready to work. - */ -turn_on_mmu: - lis r0,MSR_KERNEL@h - ori r0,r0,MSR_KERNEL@l - mtspr SPRN_SRR1,r0 - lis r0,start_here@h - ori r0,r0,start_here@l - mtspr SPRN_SRR0,r0 - rfi /* enables MMU */ - b . /* prevent prefetch past rfi */ - -/* - * This area is used for temporarily saving registers during the - * critical exception prolog. - */ - . = 0xc0 -crit_save: -_GLOBAL(crit_r10) - .space 4 -_GLOBAL(crit_r11) - .space 4 -_GLOBAL(crit_srr0) - .space 4 -_GLOBAL(crit_srr1) - .space 4 -_GLOBAL(crit_r1) - .space 4 -_GLOBAL(crit_dear) - .space 4 -_GLOBAL(crit_esr) - .space 4 - -/* - * Exception prolog for critical exceptions. This is a little different - * from the normal exception prolog above since a critical exception - * can potentially occur at any point during normal exception processing. - * Thus we cannot use the same SPRG registers as the normal prolog above. - * Instead we use a couple of words of memory at low physical addresses. - * This is OK since we don't support SMP on these processors. - */ -.macro CRITICAL_EXCEPTION_PROLOG trapno name - stw r10,crit_r10@l(0) /* save two registers to work with */ - stw r11,crit_r11@l(0) - mfspr r10,SPRN_SRR0 - mfspr r11,SPRN_SRR1 - stw r10,crit_srr0@l(0) - stw r11,crit_srr1@l(0) - mfspr r10,SPRN_DEAR - mfspr r11,SPRN_ESR - stw r10,crit_dear@l(0) - stw r11,crit_esr@l(0) - mfcr r10 /* save CR in r10 for now */ - mfspr r11,SPRN_SRR3 /* check whether user or kernel */ - andi. r11,r11,MSR_PR - lis r11,(critirq_ctx-PAGE_OFFSET)@ha - lwz r11,(critirq_ctx-PAGE_OFFSET)@l(r11) - beq 1f - /* COMING FROM USER MODE */ - mfspr r11,SPRN_SPRG_THREAD /* if from user, start at top of */ - lwz r11,TASK_STACK-THREAD(r11) /* this thread's kernel stack */ -1: stw r1,crit_r1@l(0) - addi r1,r11,THREAD_SIZE-INT_FRAME_SIZE /* Alloc an excpt frm */ - LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)) /* re-enable MMU */ - mtspr SPRN_SRR1, r11 - lis r11, 1f@h - ori r11, r11, 1f@l - mtspr SPRN_SRR0, r11 - rfi - - .text -1: -\name\()_virt: - lwz r11,crit_r1@l(0) - stw r11,GPR1(r1) - stw r11,0(r1) - mr r11,r1 - stw r10,_CCR(r11) /* save various registers */ - stw r12,GPR12(r11) - stw r9,GPR9(r11) - mflr r10 - stw r10,_LINK(r11) - lis r9,PAGE_OFFSET@ha - lwz r10,crit_r10@l(r9) - lwz r12,crit_r11@l(r9) - stw r10,GPR10(r11) - stw r12,GPR11(r11) - lwz r12,crit_dear@l(r9) - lwz r9,crit_esr@l(r9) - stw r12,_DEAR(r11) /* since they may have had stuff */ - stw r9,_ESR(r11) /* exception was taken */ - mfspr r12,SPRN_SRR2 - mfspr r9,SPRN_SRR3 - rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ - COMMON_EXCEPTION_PROLOG_END \trapno + 2 -_ASM_NOKPROBE_SYMBOL(\name\()_virt) -.endm - - /* - * State at this point: - * r9 saved in stack frame, now saved SRR3 & ~MSR_WE - * r10 saved in crit_r10 and in stack frame, trashed - * r11 saved in crit_r11 and in stack frame, - * now phys stack/exception frame pointer - * r12 saved in stack frame, now saved SRR2 - * CR saved in stack frame, CR0.EQ = !SRR3.PR - * LR, DEAR, ESR in stack frame - * r1 saved in stack frame, now virt stack/excframe pointer - * r0, r3-r8 saved in stack frame - */ - -/* - * Exception vectors. - */ -#define CRITICAL_EXCEPTION(n, label, hdlr) \ - START_EXCEPTION(n, label); \ - CRITICAL_EXCEPTION_PROLOG n label; \ - prepare_transfer_to_handler; \ - bl hdlr; \ - b ret_from_crit_exc - -/* - * 0x0100 - Critical Interrupt Exception - */ - CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception) - -/* - * 0x0200 - Machine Check Exception - */ - CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) - -/* - * 0x0300 - Data Storage Exception - * This happens for just a few reasons. U0 set (but we don't do that), - * or zone protection fault (user violation, write to protected page). - * The other Data TLB exceptions bail out to this point - * if they can't resolve the lightweight TLB fault. - */ - START_EXCEPTION(0x0300, DataStorage) - EXCEPTION_PROLOG 0x300 DataStorage handle_dar_dsisr=1 - prepare_transfer_to_handler - bl do_page_fault - b interrupt_return - -/* - * 0x0400 - Instruction Storage Exception - * This is caused by a fetch from non-execute or guarded pages. - */ - START_EXCEPTION(0x0400, InstructionAccess) - EXCEPTION_PROLOG 0x400 InstructionAccess - li r5,0 - stw r5, _ESR(r11) /* Zero ESR */ - stw r12, _DEAR(r11) /* SRR0 as DEAR */ - prepare_transfer_to_handler - bl do_page_fault - b interrupt_return - -/* 0x0500 - External Interrupt Exception */ - EXCEPTION(0x0500, HardwareInterrupt, do_IRQ) - -/* 0x0600 - Alignment Exception */ - START_EXCEPTION(0x0600, Alignment) - EXCEPTION_PROLOG 0x600 Alignment handle_dar_dsisr=1 - prepare_transfer_to_handler - bl alignment_exception - REST_NVGPRS(r1) - b interrupt_return - -/* 0x0700 - Program Exception */ - START_EXCEPTION(0x0700, ProgramCheck) - EXCEPTION_PROLOG 0x700 ProgramCheck handle_dar_dsisr=1 - prepare_transfer_to_handler - bl program_check_exception - REST_NVGPRS(r1) - b interrupt_return - - EXCEPTION(0x0800, Trap_08, unknown_exception) - EXCEPTION(0x0900, Trap_09, unknown_exception) - EXCEPTION(0x0A00, Trap_0A, unknown_exception) - EXCEPTION(0x0B00, Trap_0B, unknown_exception) - -/* 0x0C00 - System Call Exception */ - START_EXCEPTION(0x0C00, SystemCall) - SYSCALL_ENTRY 0xc00 -/* Trap_0D is commented out to get more space for system call exception */ - -/* EXCEPTION(0x0D00, Trap_0D, unknown_exception) */ - EXCEPTION(0x0E00, Trap_0E, unknown_exception) - EXCEPTION(0x0F00, Trap_0F, unknown_exception) - -/* 0x1000 - Programmable Interval Timer (PIT) Exception */ - START_EXCEPTION(0x1000, DecrementerTrap) - b Decrementer - -/* 0x1010 - Fixed Interval Timer (FIT) Exception */ - START_EXCEPTION(0x1010, FITExceptionTrap) - b FITException - -/* 0x1020 - Watchdog Timer (WDT) Exception */ - START_EXCEPTION(0x1020, WDTExceptionTrap) - b WDTException - -/* 0x1100 - Data TLB Miss Exception - * As the name implies, translation is not in the MMU, so search the - * page tables and fix it. The only purpose of this function is to - * load TLB entries from the page table if they exist. - */ - START_EXCEPTION(0x1100, DTLBMiss) - mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */ - mtspr SPRN_SPRG_SCRATCH6, r11 - mtspr SPRN_SPRG_SCRATCH3, r12 - mtspr SPRN_SPRG_SCRATCH4, r9 - mfcr r12 - mfspr r9, SPRN_PID - rlwimi r12, r9, 0, 0xff - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, PAGE_OFFSET@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - li r9, 0 - mtspr SPRN_PID, r9 /* TLB will have 0 TID */ - b 4f - - /* Get the PGD for the current thread. - */ -3: - mfspr r11,SPRN_SPRG_THREAD - lwz r11,PGDIR(r11) -#ifdef CONFIG_PPC_KUAP - rlwinm. r9, r9, 0, 0xff - beq 5f /* Kuap fault */ -#endif -4: - tophys(r11, r11) - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r11, 0(r11) /* Get L1 entry */ - andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */ - beq 2f /* Bail if no table */ - - rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r11) /* Get Linux PTE */ - li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ - andc. r9, r9, r11 /* Check permission */ - bne 5f - - rlwinm r9, r11, 1, _PAGE_WRITE /* dirty => w */ - and r9, r9, r11 /* hwwrite = dirty & w */ - rlwimi r11, r9, 0, _PAGE_WRITE /* replace w by hwwrite */ - - /* Create TLB tag. This is the faulting address plus a static - * set of bits. These are size, valid, E, U0. - */ - li r9, 0x00c0 - rlwimi r10, r9, 0, 20, 31 - - b finish_tlb_load - -2: /* Check for possible large-page pmd entry */ - rlwinm. r9, r11, 2, 22, 24 - beq 5f - - /* Create TLB tag. This is the faulting address, plus a static - * set of bits (valid, E, U0) plus the size from the PMD. - */ - ori r9, r9, 0x40 - rlwimi r10, r9, 0, 20, 31 - - b finish_tlb_load - -5: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mtspr SPRN_PID, r12 - mtcrf 0x80, r12 - mfspr r9, SPRN_SPRG_SCRATCH4 - mfspr r12, SPRN_SPRG_SCRATCH3 - mfspr r11, SPRN_SPRG_SCRATCH6 - mfspr r10, SPRN_SPRG_SCRATCH5 - b DataStorage - -/* 0x1200 - Instruction TLB Miss Exception - * Nearly the same as above, except we get our information from different - * registers and bailout to a different point. - */ - START_EXCEPTION(0x1200, ITLBMiss) - mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */ - mtspr SPRN_SPRG_SCRATCH6, r11 - mtspr SPRN_SPRG_SCRATCH3, r12 - mtspr SPRN_SPRG_SCRATCH4, r9 - mfcr r12 - mfspr r9, SPRN_PID - rlwimi r12, r9, 0, 0xff - mfspr r10, SPRN_SRR0 /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, PAGE_OFFSET@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - li r9, 0 - mtspr SPRN_PID, r9 /* TLB will have 0 TID */ - b 4f - - /* Get the PGD for the current thread. - */ -3: - mfspr r11,SPRN_SPRG_THREAD - lwz r11,PGDIR(r11) -#ifdef CONFIG_PPC_KUAP - rlwinm. r9, r9, 0, 0xff - beq 5f /* Kuap fault */ -#endif -4: - tophys(r11, r11) - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r11, 0(r11) /* Get L1 entry */ - andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */ - beq 2f /* Bail if no table */ - - rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r11) /* Get Linux PTE */ - li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC - andc. r9, r9, r11 /* Check permission */ - bne 5f - - rlwinm r9, r11, 1, _PAGE_WRITE /* dirty => w */ - and r9, r9, r11 /* hwwrite = dirty & w */ - rlwimi r11, r9, 0, _PAGE_WRITE /* replace w by hwwrite */ - - /* Create TLB tag. This is the faulting address plus a static - * set of bits. These are size, valid, E, U0. - */ - li r9, 0x00c0 - rlwimi r10, r9, 0, 20, 31 - - b finish_tlb_load - -2: /* Check for possible large-page pmd entry */ - rlwinm. r9, r11, 2, 22, 24 - beq 5f - - /* Create TLB tag. This is the faulting address, plus a static - * set of bits (valid, E, U0) plus the size from the PMD. - */ - ori r9, r9, 0x40 - rlwimi r10, r9, 0, 20, 31 - - b finish_tlb_load - -5: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ - mtspr SPRN_PID, r12 - mtcrf 0x80, r12 - mfspr r9, SPRN_SPRG_SCRATCH4 - mfspr r12, SPRN_SPRG_SCRATCH3 - mfspr r11, SPRN_SPRG_SCRATCH6 - mfspr r10, SPRN_SPRG_SCRATCH5 - b InstructionAccess - - EXCEPTION(0x1300, Trap_13, unknown_exception) - EXCEPTION(0x1400, Trap_14, unknown_exception) - EXCEPTION(0x1500, Trap_15, unknown_exception) - EXCEPTION(0x1600, Trap_16, unknown_exception) - EXCEPTION(0x1700, Trap_17, unknown_exception) - EXCEPTION(0x1800, Trap_18, unknown_exception) - EXCEPTION(0x1900, Trap_19, unknown_exception) - EXCEPTION(0x1A00, Trap_1A, unknown_exception) - EXCEPTION(0x1B00, Trap_1B, unknown_exception) - EXCEPTION(0x1C00, Trap_1C, unknown_exception) - EXCEPTION(0x1D00, Trap_1D, unknown_exception) - EXCEPTION(0x1E00, Trap_1E, unknown_exception) - EXCEPTION(0x1F00, Trap_1F, unknown_exception) - -/* Check for a single step debug exception while in an exception - * handler before state has been saved. This is to catch the case - * where an instruction that we are trying to single step causes - * an exception (eg ITLB/DTLB miss) and thus the first instruction of - * the exception handler generates a single step debug exception. - * - * If we get a debug trap on the first instruction of an exception handler, - * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is - * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). - * The exception handler was handling a non-critical interrupt, so it will - * save (and later restore) the MSR via SPRN_SRR1, which will still have - * the MSR_DE bit set. - */ - /* 0x2000 - Debug Exception */ - START_EXCEPTION(0x2000, DebugTrap) - CRITICAL_EXCEPTION_PROLOG 0x2000 DebugTrap - - /* - * If this is a single step or branch-taken exception in an - * exception entry sequence, it was probably meant to apply to - * the code where the exception occurred (since exception entry - * doesn't turn off DE automatically). We simulate the effect - * of turning off DE on entry to an exception handler by turning - * off DE in the SRR3 value and clearing the debug status. - */ - mfspr r10,SPRN_DBSR /* check single-step/branch taken */ - andis. r10,r10,DBSR_IC@h - beq+ 2f - - andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */ - beq 1f /* branch and fix it up */ - - mfspr r10,SPRN_SRR2 /* Faulting instruction address */ - cmplwi r10,0x2100 - bgt+ 2f /* address above exception vectors */ - - /* here it looks like we got an inappropriate debug exception. */ -1: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */ - lis r10,DBSR_IC@h /* clear the IC event */ - mtspr SPRN_DBSR,r10 - /* restore state and get out */ - lwz r10,_CCR(r11) - lwz r0,GPR0(r11) - lwz r1,GPR1(r11) - mtcrf 0x80,r10 - mtspr SPRN_SRR2,r12 - mtspr SPRN_SRR3,r9 - lwz r9,GPR9(r11) - lwz r12,GPR12(r11) - lwz r10,crit_r10@l(0) - lwz r11,crit_r11@l(0) - rfci - b . - - /* continue normal handling for a critical exception... */ -2: mfspr r4,SPRN_DBSR - stw r4,_ESR(r11) /* DebugException takes DBSR in _ESR */ - prepare_transfer_to_handler - bl DebugException - b ret_from_crit_exc - - /* Programmable Interval Timer (PIT) Exception. (from 0x1000) */ - __HEAD -Decrementer: - EXCEPTION_PROLOG 0x1000 Decrementer - lis r0,TSR_PIS@h - mtspr SPRN_TSR,r0 /* Clear the PIT exception */ - prepare_transfer_to_handler - bl timer_interrupt - b interrupt_return - - /* Fixed Interval Timer (FIT) Exception. (from 0x1010) */ - __HEAD -FITException: - EXCEPTION_PROLOG 0x1010 FITException - prepare_transfer_to_handler - bl unknown_exception - b interrupt_return - - /* Watchdog Timer (WDT) Exception. (from 0x1020) */ - __HEAD -WDTException: - CRITICAL_EXCEPTION_PROLOG 0x1020 WDTException - prepare_transfer_to_handler - bl WatchdogException - b ret_from_crit_exc - -/* Other PowerPC processors, namely those derived from the 6xx-series - * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. - * However, for the 4xx-series processors these are neither defined nor - * reserved. - */ - - __HEAD - /* Damn, I came up one instruction too many to fit into the - * exception space :-). Both the instruction and data TLB - * miss get to this point to load the TLB. - * r10 - TLB_TAG value - * r11 - Linux PTE - * r9 - available to use - * PID - loaded with proper value when we get here - * Upon exit, we reload everything and RFI. - * Actually, it will fit now, but oh well.....a common place - * to load the TLB. - */ -tlb_4xx_index: - .long 0 -finish_tlb_load: - /* - * Clear out the software-only bits in the PTE to generate the - * TLB_DATA value. These are the bottom 2 bits of the RPM, the - * 4 bits of the zone field, and M. - */ - li r9, 0x0cf2 - andc r11, r11, r9 - rlwimi r11, r10, 8, 24, 27 /* Copy 4 upper address bit into zone */ - - /* load the next available TLB index. */ - lwz r9, tlb_4xx_index@l(0) - addi r9, r9, 1 - andi. r9, r9, PPC40X_TLB_SIZE - 1 - stw r9, tlb_4xx_index@l(0) - - tlbwe r11, r9, TLB_DATA /* Load TLB LO */ - tlbwe r10, r9, TLB_TAG /* Load TLB HI */ - - /* Done...restore registers and get out of here. - */ - mtspr SPRN_PID, r12 - mtcrf 0x80, r12 - mfspr r9, SPRN_SPRG_SCRATCH4 - mfspr r12, SPRN_SPRG_SCRATCH3 - mfspr r11, SPRN_SPRG_SCRATCH6 - mfspr r10, SPRN_SPRG_SCRATCH5 - rfi /* Should sync shadow TLBs */ - b . /* prevent prefetch past rfi */ - -/* This is where the main kernel code starts. - */ -start_here: - - /* ptr to current */ - lis r2,init_task@h - ori r2,r2,init_task@l - - /* ptr to phys current thread */ - tophys(r4,r2) - addi r4,r4,THREAD /* init task's THREAD */ - mtspr SPRN_SPRG_THREAD,r4 - - /* stack */ - lis r1,init_thread_union@ha - addi r1,r1,init_thread_union@l - li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1) - - bl early_init /* We have to do this with MMU on */ - -/* - * Decide what sort of machine this is and initialize the MMU. - */ -#ifdef CONFIG_KASAN - bl kasan_early_init -#endif - li r3,0 - mr r4,r31 - bl machine_init - bl MMU_init - -/* Go back to running unmapped so we can load up new values - * and change to using our exception vectors. - * On the 4xx, all we have to do is invalidate the TLB to clear - * the old 16M byte TLB mappings. - */ - lis r4,2f@h - ori r4,r4,2f@l - tophys(r4,r4) - lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h - ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r3 - rfi - b . /* prevent prefetch past rfi */ - -/* Load up the kernel context */ -2: - sync /* Flush to memory before changing TLB */ - tlbia - isync /* Flush shadow TLBs */ - - /* set up the PTE pointers for the Abatron bdiGDB. - */ - lis r6, swapper_pg_dir@h - ori r6, r6, swapper_pg_dir@l - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(0) /* Must match your Abatron config file */ - tophys(r5,r5) - stw r6, 0(r5) - -/* Now turn on the MMU for real! */ - lis r4,MSR_KERNEL@h - ori r4,r4,MSR_KERNEL@l - lis r3,start_kernel@h - ori r3,r3,start_kernel@l - mtspr SPRN_SRR0,r3 - mtspr SPRN_SRR1,r4 - rfi /* enable MMU and jump to start_kernel */ - b . /* prevent prefetch past rfi */ - -/* Set up the initial MMU state so we can do the first level of - * kernel initialization. This maps the first 32 MBytes of memory 1:1 - * virtual to physical and more importantly sets the cache mode. - */ -SYM_FUNC_START_LOCAL(initial_mmu) - tlbia /* Invalidate all TLB entries */ - isync - - /* We should still be executing code at physical address 0x0000xxxx - * at this point. However, start_here is at virtual address - * 0xC000xxxx. So, set up a TLB mapping to cover this once - * translation is enabled. - */ - - lis r3,KERNELBASE@h /* Load the kernel virtual address */ - ori r3,r3,KERNELBASE@l - tophys(r4,r3) /* Load the kernel physical address */ - - iccci r0,r3 /* Invalidate the i-cache before use */ - - /* Load the kernel PID. - */ - li r0,0 - mtspr SPRN_PID,r0 - sync - - /* Configure and load one entry into TLB slots 63 */ - clrrwi r4,r4,10 /* Mask off the real page number */ - ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ - - clrrwi r3,r3,10 /* Mask off the effective page number */ - ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) - - li r0,63 /* TLB slot 63 */ - - tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ - tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ - - li r0,62 /* TLB slot 62 */ - addis r4,r4,SZ_16M@h - addis r3,r3,SZ_16M@h - tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ - tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ - - isync - - /* Establish the exception vector base - */ - lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */ - tophys(r0,r4) /* Use the physical address */ - mtspr SPRN_EVPR,r0 - - blr -SYM_FUNC_END(initial_mmu) - -_GLOBAL(abort) - mfspr r13,SPRN_DBCR0 - oris r13,r13,DBCR0_RST_SYSTEM@h - mtspr SPRN_DBCR0,r13 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 4690c219bfa4..63432a33ec49 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -647,8 +647,9 @@ __after_prom_start: * Note: This process overwrites the OF exception vectors. */ LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET) - mr. r4,r26 /* In some cases the loader may */ - beq 9f /* have already put us at zero */ + mr r4,r26 /* Load the virtual source address into r4 */ + cmpld r3,r4 /* Check if source == dest */ + beq 9f /* If so skip the copy */ li r6,0x100 /* Start offset, the first 0x100 */ /* bytes were copied earlier. */ diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S index 39724ff5ae1f..f9a73fae6464 100644 --- a/arch/powerpc/kernel/head_85xx.S +++ b/arch/powerpc/kernel/head_85xx.S @@ -294,9 +294,10 @@ set_ivor: /* Macros to hide the PTE size differences * * FIND_PTE -- walks the page tables given EA & pgdir pointer - * r10 -- EA of fault + * r10 -- free * r11 -- PGDIR pointer * r12 -- free + * r13 -- EA of fault * label 2: is the bailout case * * if we find the pte (fall through): @@ -307,34 +308,34 @@ set_ivor: #ifdef CONFIG_PTE_64BIT #ifdef CONFIG_HUGETLB_PAGE #define FIND_PTE \ - rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ - lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ + rlwinm r12, r13, 14, 18, 28; /* Compute pgdir/pmd offset */ \ + add r12, r11, r12; \ + lwz r11, 4(r12); /* Get pgd/pmd entry */ \ + rlwinm. r10, r11, 32 - _PAGE_PSIZE_SHIFT, 0x1e; /* get tsize*/ \ + bne 1000f; /* Huge page (leaf entry) */ \ rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ - blt 1000f; /* Normal non-huge page */ \ beq 2f; /* Bail if no table */ \ - oris r11, r11, PD_HUGE@h; /* Put back address bit */ \ - andi. r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \ - xor r12, r10, r11; /* drop size bits from pointer */ \ - b 1001f; \ -1000: rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ + rlwimi r12, r13, 23, 20, 28; /* Compute pte address */ \ li r10, 0; /* clear r10 */ \ -1001: lwz r11, 4(r12); /* Get pte entry */ + lwz r11, 4(r12); /* Get pte entry */ \ +1000: #else #define FIND_PTE \ - rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ - lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ + rlwinm r12, r13, 14, 18, 28; /* Compute pgdir/pmd offset */ \ + add r12, r11, r12; \ + lwz r11, 4(r12); /* Get pgd/pmd entry */ \ rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ beq 2f; /* Bail if no table */ \ - rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ + rlwimi r12, r13, 23, 20, 28; /* Compute pte address */ \ lwz r11, 4(r12); /* Get pte entry */ #endif /* HUGEPAGE */ #else /* !PTE_64BIT */ #define FIND_PTE \ - rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ + rlwimi r11, r13, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ lwz r11, 0(r11); /* Get L1 entry */ \ rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ beq 2f; /* Bail if no table */ \ - rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ + rlwimi r12, r13, 22, 20, 29; /* Compute PTE address */ \ lwz r11, 0(r12); /* Get Linux PTE */ #endif @@ -441,13 +442,13 @@ START_BTB_FLUSH_SECTION BTB_FLUSH(r10) 1: END_BTB_FLUSH_SECTION - mfspr r10, SPRN_DEAR /* Get faulting address */ + mfspr r13, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11, PAGE_OFFSET@h - cmplw 5, r10, r11 + cmplw 5, r13, r11 blt 5, 3f lis r11, swapper_pg_dir@h ori r11, r11, swapper_pg_dir@l @@ -470,29 +471,14 @@ END_BTB_FLUSH_SECTION #endif 4: - /* Mask of required permission bits. Note that while we - * do copy ESR:ST to _PAGE_WRITE position as trying to write - * to an RO page is pretty common, we don't do it with - * _PAGE_DIRTY. We could do it, but it's a fairly rare - * event so I'd rather take the overhead when it happens - * rather than adding an instruction here. We should measure - * whether the whole thing is worth it in the first place - * as we could avoid loading SPRN_ESR completely in the first - * place... - * - * TODO: Is it worth doing that mfspr & rlwimi in the first - * place or can we save a couple of instructions here ? - */ - mfspr r12,SPRN_ESR + FIND_PTE + #ifdef CONFIG_PTE_64BIT li r13,_PAGE_PRESENT|_PAGE_BAP_SR oris r13,r13,_PAGE_ACCESSED@h #else li r13,_PAGE_PRESENT|_PAGE_READ|_PAGE_ACCESSED #endif - rlwimi r13,r12,11,29,29 - - FIND_PTE andc. r13,r13,r11 /* Check permission */ #ifdef CONFIG_PTE_64BIT @@ -549,13 +535,13 @@ START_BTB_FLUSH_SECTION 1: END_BTB_FLUSH_SECTION - mfspr r10, SPRN_SRR0 /* Get faulting address */ + mfspr r13, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the * kernel page tables. */ lis r11, PAGE_OFFSET@h - cmplw 5, r10, r11 + cmplw 5, r13, r11 blt 5, 3f lis r11, swapper_pg_dir@h ori r11, r11, swapper_pg_dir@l @@ -564,6 +550,7 @@ END_BTB_FLUSH_SECTION rlwinm r12,r12,0,16,1 mtspr SPRN_MAS1,r12 + FIND_PTE /* Make up the required permissions for kernel code */ #ifdef CONFIG_PTE_64BIT li r13,_PAGE_PRESENT | _PAGE_BAP_SX @@ -584,6 +571,7 @@ END_BTB_FLUSH_SECTION beq 2f /* KUAP fault */ #endif + FIND_PTE /* Make up the required permissions for user code */ #ifdef CONFIG_PTE_64BIT li r13,_PAGE_PRESENT | _PAGE_BAP_UX @@ -593,7 +581,6 @@ END_BTB_FLUSH_SECTION #endif 4: - FIND_PTE andc. r13,r13,r11 /* Check permission */ #ifdef CONFIG_PTE_64BIT @@ -746,17 +733,12 @@ finish_tlb_load: lwz r15, 0(r14) 100: stw r15, 0(r17) - /* - * Calc MAS1_TSIZE from r10 (which has pshift encoded) - * tlb_enc = (pshift - 10). - */ - subi r15, r10, 10 mfspr r16, SPRN_MAS1 - rlwimi r16, r15, 7, 20, 24 + rlwimi r16, r10, MAS1_TSIZE_SHIFT, MAS1_TSIZE_MASK mtspr SPRN_MAS1, r16 /* copy the pshift for use later */ - mr r14, r10 + addi r14, r10, _PAGE_PSIZE_SHIFT_OFFSET /* fall through */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 647b0b445e89..56c5ebe21b99 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -40,16 +40,6 @@ #include "head_32.h" -.macro compare_to_kernel_boundary scratch, addr -#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 -/* By simply checking Address >= 0x80000000, we know if its a kernel address */ - not. \scratch, \addr -#else - rlwinm \scratch, \addr, 16, 0xfff8 - cmpli cr0, \scratch, PAGE_OFFSET@h -#endif -.endm - #define PAGE_SHIFT_512K 19 #define PAGE_SHIFT_8M 23 @@ -199,18 +189,7 @@ instruction_counter: mfspr r10, SPRN_SRR0 /* Get effective address of fault */ INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11) mtspr SPRN_MD_EPN, r10 -#ifdef CONFIG_MODULES - mfcr r11 - compare_to_kernel_boundary r10, r10 -#endif mfspr r10, SPRN_M_TWB /* Get level 1 table */ -#ifdef CONFIG_MODULES - blt+ 3f - rlwinm r10, r10, 0, 20, 31 - oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha -3: - mtcr r11 -#endif lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ mtspr SPRN_MD_TWC, r11 mfspr r10, SPRN_MD_TWC @@ -248,19 +227,12 @@ instruction_counter: START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss) mtspr SPRN_SPRG_SCRATCH2, r10 mtspr SPRN_M_TW, r11 - mfcr r11 /* If we are faulting a kernel address, we have to use the * kernel page tables. */ mfspr r10, SPRN_MD_EPN - compare_to_kernel_boundary r10, r10 mfspr r10, SPRN_M_TWB /* Get level 1 table */ - blt+ 3f - rlwinm r10, r10, 0, 20, 31 - oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha -3: - mtcr r11 lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ mtspr SPRN_MD_TWC, r11 @@ -332,15 +304,19 @@ instruction_counter: cmpwi cr1, r11, RPN_PATTERN beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */ DARFixed:/* Return from dcbx instruction bug workaround */ + mfspr r11, SPRN_DSISR + rlwinm r11, r11, 0, DSISR_NOHPTE + cmpwi cr1, r11, 0 + beq+ cr1, .Ldtlbie + mfspr r11, SPRN_DAR + tlbie r11 + rlwinm r11, r11, 16, 0xffff + cmplwi cr1, r11, TASK_SIZE@h + bge- cr1, FixupPGD +.Ldtlbie: EXCEPTION_PROLOG_1 /* 0x300 is DataAccess exception, needed by bad_page_fault() */ EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1 - lwz r4, _DAR(r11) - lwz r5, _DSISR(r11) - andis. r10,r5,DSISR_NOHPTE@h - beq+ .Ldtlbie - tlbie r4 -.Ldtlbie: prepare_transfer_to_handler bl do_page_fault b interrupt_return @@ -394,6 +370,30 @@ DARFixed:/* Return from dcbx instruction bug workaround */ __HEAD . = 0x2000 +FixupPGD: + mtspr SPRN_M_TW, r10 + mfspr r10, SPRN_DAR + mtspr SPRN_MD_EPN, r10 + mfspr r11, SPRN_M_TWB /* Get level 1 table */ + lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ + cmpwi cr1, r10, 0 + bne cr1, 1f + + rlwinm r10, r11, 0, 20, 31 + oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha + lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r10) /* Get the level 1 entry */ + cmpwi cr1, r10, 0 + beq cr1, 1f + stw r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Set the level 1 entry */ + mfspr r10, SPRN_M_TW + mtcr r10 + mfspr r10, SPRN_SPRG_SCRATCH0 + mfspr r11, SPRN_SPRG_SCRATCH1 + rfi +1: + mfspr r10, SPRN_M_TW + b .Ldtlbie + /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions * by decoding the registers used by the dcbx instruction and adding them. * DAR is set to the calculated address. @@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */ mfspr r10, SPRN_SRR0 mtspr SPRN_MD_EPN, r10 rlwinm r11, r10, 16, 0xfff8 - cmpli cr1, r11, PAGE_OFFSET@h + cmpli cr1, r11, TASK_SIZE@h mfspr r11, SPRN_M_TWB /* Get level 1 table */ blt+ cr1, 3f @@ -415,14 +415,13 @@ FixupDAR:/* Entry point for dcbx workaround. */ oris r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha 3: lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ + rlwinm r11, r11, 0, ~_PMD_PAGE_8M mtspr SPRN_MD_TWC, r11 - mtcrf 0x01, r11 mfspr r11, SPRN_MD_TWC lwz r11, 0(r11) /* Get the pte */ - bt 28,200f /* bit 28 = Large page (8M) */ /* concat physical page address(r11) and page offset(r10) */ rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31 -201: lwz r11,0(r11) + lwz r11,0(r11) /* Check if it really is a dcbx instruction. */ /* dcbt and dcbtst does not generate DTLB Misses/Errors, * no need to include them here */ @@ -441,11 +440,6 @@ FixupDAR:/* Entry point for dcbx workaround. */ 141: mfspr r10,SPRN_M_TW b DARFixed /* Nope, go back to normal TLB processing */ -200: - /* concat physical page address(r11) and page offset(r10) */ - rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31 - b 201b - 144: mfspr r10, SPRN_DSISR rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ mtspr SPRN_DSISR, r10 @@ -500,6 +494,7 @@ FixupDAR:/* Entry point for dcbx workaround. */ bctr /* jump into table */ 152: mfdar r11 + mtdar r10 mtctr r11 /* restore ctr reg from DAR */ mfspr r11, SPRN_SPRG_THREAD stw r10, DAR(r11) @@ -593,6 +588,10 @@ start_here: lis r0, (MD_TWAM | MD_RSV4I)@h mtspr SPRN_MD_CTR, r0 #endif +#ifndef CONFIG_PIN_TLB_TEXT + li r0, 0 + mtspr SPRN_MI_CTR, r0 +#endif #if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR) lis r0, MD_TWAM@h mtspr SPRN_MD_CTR, r0 @@ -689,6 +688,7 @@ SYM_FUNC_START_LOCAL(initial_mmu) blr SYM_FUNC_END(initial_mmu) +#ifdef CONFIG_PIN_TLB _GLOBAL(mmu_pin_tlb) lis r9, (1f - PAGE_OFFSET)@h ori r9, r9, (1f - PAGE_OFFSET)@l @@ -710,6 +710,7 @@ _GLOBAL(mmu_pin_tlb) mtspr SPRN_MD_CTR, r6 tlbia +#ifdef CONFIG_PIN_TLB_TEXT LOAD_REG_IMMEDIATE(r5, 28 << 8) LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) @@ -730,6 +731,7 @@ _GLOBAL(mmu_pin_tlb) bdnzt lt, 2b lis r0, MI_RSV4I@h mtspr SPRN_MI_CTR, r0 +#endif LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM) #ifdef CONFIG_PIN_TLB_DATA @@ -789,3 +791,4 @@ _GLOBAL(mmu_pin_tlb) mtspr SPRN_SRR1, r10 mtspr SPRN_SRR0, r11 rfi +#endif diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index c1d89764dd22..cb2bca76be53 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -411,39 +411,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) */ . = INTERRUPT_INST_TLB_MISS_603 InstructionTLBMiss: -/* - * r0: userspace flag (later scratch) - * r1: linux style pte ( later becomes ppc hardware pte ) - * r2: ptr to linux-style pte - * r3: fault address - */ /* Get PTE (linux-style) and check access */ - mfspr r3,SPRN_IMISS -#ifdef CONFIG_MODULES - lis r1, TASK_SIZE@h /* check if kernel address */ - cmplw 0,r1,r3 -#endif + mfspr r0,SPRN_IMISS mfspr r2, SPRN_SDR1 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC rlwinm r2, r2, 28, 0xfffff000 -#ifdef CONFIG_MODULES - li r0, 3 - bgt- 112f - lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ - li r0, 0 - addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ -#endif -112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ +#ifdef CONFIG_EXECMEM + rlwinm r3, r0, 4, 0xf + subi r3, r3, (TASK_SIZE >> 28) & 0xf +#endif rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- InstructionAddressInvalid /* return if no mapping */ - rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */ lwz r2,0(r2) /* get linux-style pte */ andc. r1,r1,r2 /* check access & ~permission */ bne- InstructionAddressInvalid /* return if access not permitted */ /* Convert linux-style PTE to low word of PPC-style PTE */ -#ifdef CONFIG_MODULES - rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */ +#ifdef CONFIG_EXECMEM + rlwimi r2, r3, 1, 31, 31 /* userspace ? -> PP lsb */ #endif ori r1, r1, 0xe06 /* clear out reserved bits */ andc r1, r2, r1 /* PP = user? 1 : 0 */ @@ -451,7 +438,7 @@ BEGIN_FTR_SECTION rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 - tlbli r3 + tlbli r0 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r3 rfi @@ -480,35 +467,24 @@ InstructionAddressInvalid: */ . = INTERRUPT_DATA_LOAD_TLB_MISS_603 DataLoadTLBMiss: -/* - * r0: userspace flag (later scratch) - * r1: linux style pte ( later becomes ppc hardware pte ) - * r2: ptr to linux-style pte - * r3: fault address - */ /* Get PTE (linux-style) and check access */ - mfspr r3,SPRN_DMISS - lis r1, TASK_SIZE@h /* check if kernel address */ - cmplw 0,r1,r3 + mfspr r0,SPRN_DMISS mfspr r2, SPRN_SDR1 - li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ - rlwinm r2, r2, 28, 0xfffff000 - li r0, 3 - bgt- 112f - lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ - li r0, 0 - addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ -112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ - lwz r2,0(r2) /* get pmd entry */ + rlwinm r1, r2, 28, 0xfffff000 + rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r1) /* get pmd entry */ + rlwinm r3, r0, 4, 0xf rlwinm. r2,r2,0,0,19 /* extract address of pte page */ - beq- DataAddressInvalid /* return if no mapping */ - rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + subi r3, r3, (TASK_SIZE >> 28) & 0xf + beq- 2f /* bail if no mapping */ +1: rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */ lwz r2,0(r2) /* get linux-style pte */ + li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ andc. r1,r1,r2 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwinm r1,r2,32-9,30,30 /* _PAGE_WRITE -> PP msb */ - rlwimi r2,r0,0,30,31 /* userspace ? -> PP */ + rlwimi r2,r3,2,30,31 /* userspace ? -> PP */ rlwimi r1,r2,32-3,24,24 /* _PAGE_WRITE -> _PAGE_DIRTY */ xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */ ori r1,r1,0xe04 /* clear out reserved bits */ @@ -518,25 +494,35 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mtspr SPRN_RPA,r1 BEGIN_MMU_FTR_SECTION - li r0,1 + li r3,1 mfspr r1,SPRN_SPRG_603_LRU - rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */ - slw r0,r0,r2 - xor r1,r0,r1 - srw r0,r1,r2 + rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */ + slw r3,r3,r2 + xor r1,r3,r1 + srw r3,r1,r2 mtspr SPRN_SPRG_603_LRU,r1 mfspr r2,SPRN_SRR1 - rlwimi r2,r0,31-14,14,14 + rlwimi r2,r3,31-14,14,14 mtspr SPRN_SRR1,r2 mtcrf 0x80,r2 - tlbld r3 + tlbld r0 rfi MMU_FTR_SECTION_ELSE mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r2 - tlbld r3 + tlbld r0 rfi ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) + +2: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ + rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + cmpwi cr0,r2,0 + beq- DataAddressInvalid /* return if no mapping */ + stw r2,0(r1) + rlwinm. r2,r2,0,0,19 /* extract address of pte page */ + b 1b DataAddressInvalid: mfspr r3,SPRN_SRR1 rlwinm r1,r3,9,6,6 /* Get load/store bit */ @@ -560,34 +546,24 @@ DataAddressInvalid: */ . = INTERRUPT_DATA_STORE_TLB_MISS_603 DataStoreTLBMiss: -/* - * r0: userspace flag (later scratch) - * r1: linux style pte ( later becomes ppc hardware pte ) - * r2: ptr to linux-style pte - * r3: fault address - */ /* Get PTE (linux-style) and check access */ - mfspr r3,SPRN_DMISS - lis r1, TASK_SIZE@h /* check if kernel address */ - cmplw 0,r1,r3 + mfspr r0,SPRN_DMISS mfspr r2, SPRN_SDR1 - li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED - rlwinm r2, r2, 28, 0xfffff000 - li r0, 3 - bgt- 112f - lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ - li r0, 0 - addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ -112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ - lwz r2,0(r2) /* get pmd entry */ + rlwinm r1, r2, 28, 0xfffff000 + rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r1) /* get pmd entry */ + rlwinm r3, r0, 4, 0xf rlwinm. r2,r2,0,0,19 /* extract address of pte page */ - beq- DataAddressInvalid /* return if no mapping */ - rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ + subi r3, r3, (TASK_SIZE >> 28) & 0xf + beq- 2f /* bail if no mapping */ +1: + rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */ lwz r2,0(r2) /* get linux-style pte */ + li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED andc. r1,r1,r2 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ /* Convert linux-style PTE to low word of PPC-style PTE */ - rlwimi r2,r0,0,31,31 /* userspace ? -> PP lsb */ + rlwimi r2,r3,1,31,31 /* userspace ? -> PP lsb */ li r1,0xe06 /* clear out reserved bits & PP msb */ andc r1,r2,r1 /* PP = user? 1: 0 */ BEGIN_FTR_SECTION @@ -597,26 +573,36 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r2 BEGIN_MMU_FTR_SECTION - li r0,1 + li r3,1 mfspr r1,SPRN_SPRG_603_LRU - rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */ - slw r0,r0,r2 - xor r1,r0,r1 - srw r0,r1,r2 + rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */ + slw r3,r3,r2 + xor r1,r3,r1 + srw r3,r1,r2 mtspr SPRN_SPRG_603_LRU,r1 mfspr r2,SPRN_SRR1 - rlwimi r2,r0,31-14,14,14 + rlwimi r2,r3,31-14,14,14 mtspr SPRN_SRR1,r2 mtcrf 0x80,r2 - tlbld r3 + tlbld r0 rfi MMU_FTR_SECTION_ELSE mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ mtcrf 0x80,r2 - tlbld r3 + tlbld r0 rfi ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) +2: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ + rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */ + lwz r2,0(r2) /* get pmd entry */ + cmpwi cr0,r2,0 + beq- DataAddressInvalid /* return if no mapping */ + stw r2,0(r1) + rlwinm r2,r2,0,0,19 /* extract address of pte page */ + b 1b + #ifndef CONFIG_ALTIVEC #define altivec_assist_exception unknown_exception #endif diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index b6b5b01a173c..0b5c1993809e 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -145,10 +145,9 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) b transfer_to_syscall /* jump to handler */ .endm -/* To handle the additional exception priority levels on 40x and Book-E +/* To handle the additional exception priority levels on Book-E * processors we allocate a stack per additional priority level. * - * On 40x critical is the only additional level * On 44x/e500 we have critical and machine check * * Additionally we reserve a SPRG for each priority level so we can free up a diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 30b56c67fa61..e527cd3ef128 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -97,7 +97,7 @@ void power4_idle(void) /* * Register the sysctl to set/clear powersave_nap. */ -static struct ctl_table powersave_nap_ctl_table[] = { +static const struct ctl_table powersave_nap_ctl_table[] = { { .procname = "powersave-nap", .data = &powersave_nap, diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index eca293794a1e..e0c681d0b076 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -25,6 +25,10 @@ unsigned long global_dbcr0[NR_CPUS]; #endif +#if defined(CONFIG_PREEMPT_DYNAMIC) +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +#endif + #ifdef CONFIG_PPC_BOOK3S_64 DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); static inline bool exit_must_hard_disable(void) @@ -185,7 +189,7 @@ again: ti_flags = read_thread_flags(); while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); - if (ti_flags & _TIF_NEED_RESCHED) { + if (ti_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) { schedule(); } else { /* @@ -266,7 +270,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, unsigned long ret = 0; bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv; - CT_WARN_ON(ct_state() == CONTEXT_USER); + CT_WARN_ON(ct_state() == CT_STATE_USER); kuap_assert_locked(); @@ -344,7 +348,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs) BUG_ON(regs_is_unrecoverable(regs)); BUG_ON(arch_irq_disabled_regs(regs)); - CT_WARN_ON(ct_state() == CONTEXT_USER); + CT_WARN_ON(ct_state() == CT_STATE_USER); /* * We don't need to restore AMR on the way back to userspace for KUAP. @@ -386,7 +390,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) && TRAP(regs) != INTERRUPT_PROGRAM && TRAP(regs) != INTERRUPT_PERFMON) - CT_WARN_ON(ct_state() == CONTEXT_USER); + CT_WARN_ON(ct_state() == CT_STATE_USER); kuap = kuap_get_and_assert_locked(); @@ -396,7 +400,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) /* Returning to a kernel context with local irqs enabled. */ WARN_ON_ONCE(!(regs->msr & MSR_EE)); again: - if (IS_ENABLED(CONFIG_PREEMPT)) { + if (need_irq_preemption()) { /* Return to preemptible kernel context */ if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) { if (preempt_count() == 0) diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c deleted file mode 100644 index c877f074d174..000000000000 --- a/arch/powerpc/kernel/io-workarounds.c +++ /dev/null @@ -1,197 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Support PCI IO workaround - * - * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org> - * IBM, Corp. - * (C) Copyright 2007-2008 TOSHIBA CORPORATION - */ -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/sched/mm.h> /* for init_mm */ -#include <linux/pgtable.h> - -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/ppc-pci.h> -#include <asm/io-workarounds.h> -#include <asm/pte-walk.h> - - -#define IOWA_MAX_BUS 8 - -static struct iowa_bus iowa_busses[IOWA_MAX_BUS]; -static unsigned int iowa_bus_count; - -static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) -{ - int i, j; - struct resource *res; - unsigned long vstart, vend; - - for (i = 0; i < iowa_bus_count; i++) { - struct iowa_bus *bus = &iowa_busses[i]; - struct pci_controller *phb = bus->phb; - - if (vaddr) { - vstart = (unsigned long)phb->io_base_virt; - vend = vstart + phb->pci_io_size - 1; - if ((vaddr >= vstart) && (vaddr <= vend)) - return bus; - } - - if (paddr) - for (j = 0; j < 3; j++) { - res = &phb->mem_resources[j]; - if (paddr >= res->start && paddr <= res->end) - return bus; - } - } - - return NULL; -} - -#ifdef CONFIG_PPC_INDIRECT_MMIO -struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) -{ - struct iowa_bus *bus; - int token; - - token = PCI_GET_ADDR_TOKEN(addr); - - if (token && token <= iowa_bus_count) - bus = &iowa_busses[token - 1]; - else { - unsigned long vaddr, paddr; - - vaddr = (unsigned long)PCI_FIX_ADDR(addr); - if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) - return NULL; - - paddr = ppc_find_vmap_phys(vaddr); - - bus = iowa_pci_find(vaddr, paddr); - - if (bus == NULL) - return NULL; - } - - return bus; -} -#else /* CONFIG_PPC_INDIRECT_MMIO */ -struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) -{ - return NULL; -} -#endif /* !CONFIG_PPC_INDIRECT_MMIO */ - -#ifdef CONFIG_PPC_INDIRECT_PIO -struct iowa_bus *iowa_pio_find_bus(unsigned long port) -{ - unsigned long vaddr = (unsigned long)pci_io_base + port; - return iowa_pci_find(vaddr, 0); -} -#else -struct iowa_bus *iowa_pio_find_bus(unsigned long port) -{ - return NULL; -} -#endif - -#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ -static ret iowa_##name at \ -{ \ - struct iowa_bus *bus; \ - bus = iowa_##space##_find_bus(aa); \ - if (bus && bus->ops && bus->ops->name) \ - return bus->ops->name al; \ - return __do_##name al; \ -} - -#define DEF_PCI_AC_NORET(name, at, al, space, aa) \ -static void iowa_##name at \ -{ \ - struct iowa_bus *bus; \ - bus = iowa_##space##_find_bus(aa); \ - if (bus && bus->ops && bus->ops->name) { \ - bus->ops->name al; \ - return; \ - } \ - __do_##name al; \ -} - -#include <asm/io-defs.h> - -#undef DEF_PCI_AC_RET -#undef DEF_PCI_AC_NORET - -static const struct ppc_pci_io iowa_pci_io = { - -#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name, -#define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name, - -#include <asm/io-defs.h> - -#undef DEF_PCI_AC_RET -#undef DEF_PCI_AC_NORET - -}; - -#ifdef CONFIG_PPC_INDIRECT_MMIO -void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, - pgprot_t prot, void *caller) -{ - struct iowa_bus *bus; - void __iomem *res = __ioremap_caller(addr, size, prot, caller); - int busno; - - bus = iowa_pci_find(0, (unsigned long)addr); - if (bus != NULL) { - busno = bus - iowa_busses; - PCI_SET_ADDR_TOKEN(res, busno + 1); - } - return res; -} -#endif /* !CONFIG_PPC_INDIRECT_MMIO */ - -bool io_workaround_inited; - -/* Enable IO workaround */ -static void io_workaround_init(void) -{ - if (io_workaround_inited) - return; - ppc_pci_io = iowa_pci_io; - io_workaround_inited = true; -} - -/* Register new bus to support workaround */ -void iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops, - int (*initfunc)(struct iowa_bus *, void *), void *data) -{ - struct iowa_bus *bus; - struct device_node *np = phb->dn; - - io_workaround_init(); - - if (iowa_bus_count >= IOWA_MAX_BUS) { - pr_err("IOWA:Too many pci bridges, " - "workarounds disabled for %pOF\n", np); - return; - } - - bus = &iowa_busses[iowa_bus_count]; - bus->phb = phb; - bus->ops = ops; - bus->private = data; - - if (initfunc) - if ((*initfunc)(bus, data)) - return; - - iowa_bus_count++; - - pr_debug("IOWA:[%d]Add bus, %pOF.\n", iowa_bus_count-1, np); -} - diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c index 6af535905984..bcc201c01514 100644 --- a/arch/powerpc/kernel/io.c +++ b/arch/powerpc/kernel/io.c @@ -31,13 +31,14 @@ void _insb(const volatile u8 __iomem *port, void *buf, long count) if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { tmp = *(const volatile u8 __force *)port; eieio(); *tbuf++ = tmp; } while (--count != 0); - asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); + data_barrier(tmp); } EXPORT_SYMBOL(_insb); @@ -47,75 +48,80 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count) if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { *(volatile u8 __force *)port = *tbuf++; } while (--count != 0); - asm volatile("sync"); + mb(); } EXPORT_SYMBOL(_outsb); -void _insw_ns(const volatile u16 __iomem *port, void *buf, long count) +void _insw(const volatile u16 __iomem *port, void *buf, long count) { u16 *tbuf = buf; u16 tmp; if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { tmp = *(const volatile u16 __force *)port; eieio(); *tbuf++ = tmp; } while (--count != 0); - asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); + data_barrier(tmp); } -EXPORT_SYMBOL(_insw_ns); +EXPORT_SYMBOL(_insw); -void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count) +void _outsw(volatile u16 __iomem *port, const void *buf, long count) { const u16 *tbuf = buf; if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { *(volatile u16 __force *)port = *tbuf++; } while (--count != 0); - asm volatile("sync"); + mb(); } -EXPORT_SYMBOL(_outsw_ns); +EXPORT_SYMBOL(_outsw); -void _insl_ns(const volatile u32 __iomem *port, void *buf, long count) +void _insl(const volatile u32 __iomem *port, void *buf, long count) { u32 *tbuf = buf; u32 tmp; if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { tmp = *(const volatile u32 __force *)port; eieio(); *tbuf++ = tmp; } while (--count != 0); - asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); + data_barrier(tmp); } -EXPORT_SYMBOL(_insl_ns); +EXPORT_SYMBOL(_insl); -void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count) +void _outsl(volatile u32 __iomem *port, const void *buf, long count) { const u32 *tbuf = buf; if (unlikely(count <= 0)) return; - asm volatile("sync"); + + mb(); do { *(volatile u32 __force *)port = *tbuf++; } while (--count != 0); - asm volatile("sync"); + mb(); } -EXPORT_SYMBOL(_outsl_ns); +EXPORT_SYMBOL(_outsl); #define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0) @@ -127,7 +133,7 @@ _memset_io(volatile void __iomem *addr, int c, unsigned long n) lc |= lc << 8; lc |= lc << 16; - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); while(n && !IO_CHECK_ALIGN(p, 4)) { *((volatile u8 *)p) = c; p++; @@ -143,7 +149,7 @@ _memset_io(volatile void __iomem *addr, int c, unsigned long n) p++; n--; } - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); } EXPORT_SYMBOL(_memset_io); @@ -152,7 +158,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src, { void *vsrc = (void __force *) src; - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) { *((u8 *)dest) = *((volatile u8 *)vsrc); eieio(); @@ -174,7 +180,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src, dest++; n--; } - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); } EXPORT_SYMBOL(_memcpy_fromio); @@ -182,7 +188,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) { void *vdest = (void __force *) dest; - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) { *((volatile u8 *)vdest) = *((u8 *)src); src++; @@ -201,6 +207,6 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) vdest++; n--; } - __asm__ __volatile__ ("sync" : : : "memory"); + mb(); } EXPORT_SYMBOL(_memcpy_toio); diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 1185efebf032..244eb4857e7f 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -16,6 +16,7 @@ #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/dma-mapping.h> #include <linux/bitmap.h> #include <linux/iommu-helper.h> @@ -26,6 +27,7 @@ #include <linux/iommu.h> #include <linux/sched.h> #include <linux/debugfs.h> +#include <linux/vmalloc.h> #include <asm/io.h> #include <asm/iommu.h> #include <asm/pci-bridge.h> @@ -642,7 +644,7 @@ void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, tbl->it_ops->flush(tbl); } -static void iommu_table_clear(struct iommu_table *tbl) +void iommu_table_clear(struct iommu_table *tbl) { /* * In case of firmware assisted dump system goes through clean @@ -683,10 +685,10 @@ static void iommu_table_clear(struct iommu_table *tbl) #endif } -static void iommu_table_reserve_pages(struct iommu_table *tbl, +void iommu_table_reserve_pages(struct iommu_table *tbl, unsigned long res_start, unsigned long res_end) { - int i; + unsigned long i; WARN_ON_ONCE(res_end < res_start); /* @@ -768,8 +770,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, iommu_table_clear(tbl); if (!welcomed) { - printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", - novmerge ? "disabled" : "enabled"); + pr_info("IOMMU table initialized, virtual merging %s\n", + str_disabled_enabled(novmerge)); welcomed = 1; } @@ -987,6 +989,23 @@ unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir) EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm); #ifdef CONFIG_IOMMU_API + +int dev_has_iommu_table(struct device *dev, void *data) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_dev **ppdev = data; + + if (!dev) + return 0; + + if (device_iommu_mapped(dev)) { + *ppdev = pdev; + return 1; + } + + return 0; +} + /* * SPAPR TCE API */ @@ -1101,59 +1120,6 @@ void iommu_tce_kill(struct iommu_table *tbl, } EXPORT_SYMBOL_GPL(iommu_tce_kill); -#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) -static int iommu_take_ownership(struct iommu_table *tbl) -{ - unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; - int ret = 0; - - /* - * VFIO does not control TCE entries allocation and the guest - * can write new TCEs on top of existing ones so iommu_tce_build() - * must be able to release old pages. This functionality - * requires exchange() callback defined so if it is not - * implemented, we disallow taking ownership over the table. - */ - if (!tbl->it_ops->xchg_no_kill) - return -EINVAL; - - spin_lock_irqsave(&tbl->large_pool.lock, flags); - for (i = 0; i < tbl->nr_pools; i++) - spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); - - if (iommu_table_in_use(tbl)) { - pr_err("iommu_tce: it_map is not empty"); - ret = -EBUSY; - } else { - memset(tbl->it_map, 0xff, sz); - } - - for (i = 0; i < tbl->nr_pools; i++) - spin_unlock(&tbl->pools[i].lock); - spin_unlock_irqrestore(&tbl->large_pool.lock, flags); - - return ret; -} - -static void iommu_release_ownership(struct iommu_table *tbl) -{ - unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; - - spin_lock_irqsave(&tbl->large_pool.lock, flags); - for (i = 0; i < tbl->nr_pools; i++) - spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); - - memset(tbl->it_map, 0, sz); - - iommu_table_reserve_pages(tbl, tbl->it_reserved_start, - tbl->it_reserved_end); - - for (i = 0; i < tbl->nr_pools; i++) - spin_unlock(&tbl->pools[i].lock); - spin_unlock_irqrestore(&tbl->large_pool.lock, flags); -} -#endif - int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) { /* @@ -1186,98 +1152,6 @@ EXPORT_SYMBOL_GPL(iommu_add_device); #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* - * A simple iommu_table_group_ops which only allows reusing the existing - * iommu_table. This handles VFIO for POWER7 or the nested KVM. - * The ops does not allow creating windows and only allows reusing the existing - * one if it matches table_group->tce32_start/tce32_size/page_shift. - */ -static unsigned long spapr_tce_get_table_size(__u32 page_shift, - __u64 window_size, __u32 levels) -{ - unsigned long size; - - if (levels > 1) - return ~0U; - size = window_size >> (page_shift - 3); - return size; -} - -static long spapr_tce_create_table(struct iommu_table_group *table_group, int num, - __u32 page_shift, __u64 window_size, __u32 levels, - struct iommu_table **ptbl) -{ - struct iommu_table *tbl = table_group->tables[0]; - - if (num > 0) - return -EPERM; - - if (tbl->it_page_shift != page_shift || - tbl->it_size != (window_size >> page_shift) || - tbl->it_indirect_levels != levels - 1) - return -EINVAL; - - *ptbl = iommu_tce_table_get(tbl); - return 0; -} - -static long spapr_tce_set_window(struct iommu_table_group *table_group, - int num, struct iommu_table *tbl) -{ - return tbl == table_group->tables[num] ? 0 : -EPERM; -} - -static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num) -{ - return 0; -} - -static long spapr_tce_take_ownership(struct iommu_table_group *table_group) -{ - int i, j, rc = 0; - - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { - struct iommu_table *tbl = table_group->tables[i]; - - if (!tbl || !tbl->it_map) - continue; - - rc = iommu_take_ownership(tbl); - if (!rc) - continue; - - for (j = 0; j < i; ++j) - iommu_release_ownership(table_group->tables[j]); - return rc; - } - return 0; -} - -static void spapr_tce_release_ownership(struct iommu_table_group *table_group) -{ - int i; - - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { - struct iommu_table *tbl = table_group->tables[i]; - - if (!tbl) - continue; - - iommu_table_clear(tbl); - if (tbl->it_map) - iommu_release_ownership(tbl); - } -} - -struct iommu_table_group_ops spapr_tce_table_group_ops = { - .get_table_size = spapr_tce_get_table_size, - .create_table = spapr_tce_create_table, - .set_window = spapr_tce_set_window, - .unset_window = spapr_tce_unset_window, - .take_ownership = spapr_tce_take_ownership, - .release_ownership = spapr_tce_release_ownership, -}; - -/* * A simple iommu_ops to allow less cruft in generic VFIO code. */ static int @@ -1285,21 +1159,20 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain, struct device *dev) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iommu_group *grp = iommu_group_get(dev); struct iommu_table_group *table_group; + struct iommu_group *grp; /* At first attach the ownership is already set */ - if (!domain) { - iommu_group_put(grp); + if (!domain) return 0; - } + grp = iommu_group_get(dev); table_group = iommu_group_get_iommudata(grp); /* * The domain being set to PLATFORM from earlier * BLOCKED. The table_group ownership has to be released. */ - table_group->ops->release_ownership(table_group); + table_group->ops->release_ownership(table_group, dev); iommu_group_put(grp); return 0; @@ -1327,7 +1200,7 @@ spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain, * also sets the dma_api ops */ table_group = iommu_group_get_iommudata(grp); - ret = table_group->ops->take_ownership(table_group); + ret = table_group->ops->take_ownership(table_group, dev); iommu_group_put(grp); return ret; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 7504ceec5c58..a0e8b998c9b5 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -89,69 +89,69 @@ int arch_show_interrupts(struct seq_file *p, int prec) #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) if (tau_initialized) { - seq_printf(p, "%*s: ", prec, "TAU"); + seq_printf(p, "%*s:", prec, "TAU"); for_each_online_cpu(j) - seq_printf(p, "%10u ", tau_interrupts(j)); + seq_put_decimal_ull_width(p, " ", tau_interrupts(j), 10); seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); } #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ - seq_printf(p, "%*s: ", prec, "LOC"); + seq_printf(p, "%*s:", prec, "LOC"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); + seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).timer_irqs_event, 10); seq_printf(p, " Local timer interrupts for timer event device\n"); - seq_printf(p, "%*s: ", prec, "BCT"); + seq_printf(p, "%*s:", prec, "BCT"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); + seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).broadcast_irqs_event, 10); seq_printf(p, " Broadcast timer interrupts for timer event device\n"); - seq_printf(p, "%*s: ", prec, "LOC"); + seq_printf(p, "%*s:", prec, "LOC"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); + seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).timer_irqs_others, 10); seq_printf(p, " Local timer interrupts for others\n"); - seq_printf(p, "%*s: ", prec, "SPU"); + seq_printf(p, "%*s:", prec, "SPU"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); + seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).spurious_irqs, 10); seq_printf(p, " Spurious interrupts\n"); - seq_printf(p, "%*s: ", prec, "PMI"); + seq_printf(p, "%*s:", prec, "PMI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); + seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).pmu_irqs, 10); seq_printf(p, " Performance monitoring interrupts\n"); - seq_printf(p, "%*s: ", prec, "MCE"); + seq_printf(p, "%*s:", prec, "MCE"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); + seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).mce_exceptions, 10); seq_printf(p, " Machine check exceptions\n"); #ifdef CONFIG_PPC_BOOK3S_64 if (cpu_has_feature(CPU_FTR_HVMODE)) { - seq_printf(p, "%*s: ", prec, "HMI"); + seq_printf(p, "%*s:", prec, "HMI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs); + seq_put_decimal_ull_width(p, " ", paca_ptrs[j]->hmi_irqs, 10); seq_printf(p, " Hypervisor Maintenance Interrupts\n"); } #endif - seq_printf(p, "%*s: ", prec, "NMI"); + seq_printf(p, "%*s:", prec, "NMI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); + seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).sreset_irqs, 10); seq_printf(p, " System Reset interrupts\n"); #ifdef CONFIG_PPC_WATCHDOG - seq_printf(p, "%*s: ", prec, "WDG"); + seq_printf(p, "%*s:", prec, "WDG"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); + seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).soft_nmi_irqs, 10); seq_printf(p, " Watchdog soft-NMI interrupts\n"); #endif #ifdef CONFIG_PPC_DOORBELL if (cpu_has_feature(CPU_FTR_DBELL)) { - seq_printf(p, "%*s: ", prec, "DBL"); + seq_printf(p, "%*s:", prec, "DBL"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); + seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).doorbell_irqs, 10); seq_printf(p, " Doorbell interrupts\n"); } #endif @@ -333,7 +333,7 @@ void __init init_IRQ(void) static_call_update(ppc_get_irq, ppc_md.get_irq); } -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE void *critirq_ctx[NR_CPUS] __read_mostly; void *dbgirq_ctx[NR_CPUS] __read_mostly; void *mcheckirq_ctx[NR_CPUS] __read_mostly; diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index 5277cf582c16..2659e1ac8604 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -5,7 +5,7 @@ #include <linux/kernel.h> #include <linux/jump_label.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/inst.h> void arch_jump_label_transform(struct jump_entry *entry, diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index ebe4d1645ca1..5081334b7bd2 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -21,7 +21,7 @@ #include <asm/processor.h> #include <asm/machdep.h> #include <asm/debug.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <linux/slab.h> #include <asm/inst.h> @@ -45,7 +45,7 @@ static struct hard_trap_info { 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */ { 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */ { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */ -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */ #if defined(CONFIG_PPC_85xx) { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */ @@ -64,7 +64,7 @@ static struct hard_trap_info { 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */ { 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */ #endif -#else /* !CONFIG_BOOKE_OR_40x */ +#else /* !CONFIG_BOOKE */ { 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */ #if defined(CONFIG_PPC_8xx) { 0x1000, 0x04 /* SIGILL */ }, /* software emulation */ diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 072ebe7f290b..f8208c027148 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, struct pt_regs *regs; int bit; + if (unlikely(kprobe_ftrace_disabled)) + return; + bit = ftrace_test_recursion_trylock(nip, parent_nip); if (bit < 0) return; diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index bbca90a5e2ec..c0d9f12cb441 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -19,9 +19,9 @@ #include <linux/extable.h> #include <linux/kdebug.h> #include <linux/slab.h> -#include <linux/moduleloader.h> #include <linux/set_memory.h> -#include <asm/code-patching.h> +#include <linux/execmem.h> +#include <asm/text-patching.h> #include <asm/cacheflush.h> #include <asm/sstep.h> #include <asm/sections.h> @@ -105,47 +105,25 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) return addr; } -static bool arch_kprobe_on_func_entry(unsigned long offset) +static bool arch_kprobe_on_func_entry(unsigned long addr, unsigned long offset) { -#ifdef CONFIG_PPC64_ELF_ABI_V2 -#ifdef CONFIG_KPROBES_ON_FTRACE - return offset <= 16; -#else - return offset <= 8; -#endif -#else + unsigned long ip = ftrace_location(addr); + + if (ip) + return offset <= (ip - addr); + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + return offset <= 8; return !offset; -#endif } /* XXX try and fold the magic of kprobe_lookup_name() in this */ kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry) { - *on_func_entry = arch_kprobe_on_func_entry(offset); + *on_func_entry = arch_kprobe_on_func_entry(addr, offset); return (kprobe_opcode_t *)(addr + offset); } -void *alloc_insn_page(void) -{ - void *page; - - page = module_alloc(PAGE_SIZE); - if (!page) - return NULL; - - if (strict_module_rwx_enabled()) { - int err = set_memory_rox((unsigned long)page, 1); - - if (err) - goto error; - } - return page; -error: - module_memfree(page); - return NULL; -} - int arch_prepare_kprobe(struct kprobe *p) { int ret = 0; @@ -248,16 +226,6 @@ static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs kcb->kprobe_saved_msr = regs->msr; } -void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) -{ - ri->ret_addr = (kprobe_opcode_t *)regs->link; - ri->fp = NULL; - - /* Replace the return addr with trampoline addr */ - regs->link = (unsigned long)__kretprobe_trampoline; -} -NOKPROBE_SYMBOL(arch_prepare_kretprobe); - static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) { int ret; @@ -415,49 +383,6 @@ no_kprobe: NOKPROBE_SYMBOL(kprobe_handler); /* - * Function return probe trampoline: - * - init_kprobes() establishes a probepoint here - * - When the probed function returns, this probe - * causes the handlers to fire - */ -asm(".global __kretprobe_trampoline\n" - ".type __kretprobe_trampoline, @function\n" - "__kretprobe_trampoline:\n" - "nop\n" - "blr\n" - ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n"); - -/* - * Called when the probe at kretprobe trampoline is hit - */ -static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) -{ - unsigned long orig_ret_address; - - orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); - /* - * We get here through one of two paths: - * 1. by taking a trap -> kprobe_handler() -> here - * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here - * - * When going back through (1), we need regs->nip to be setup properly - * as it is used to determine the return address from the trap. - * For (2), since nip is not honoured with optprobes, we instead setup - * the link register properly so that the subsequent 'blr' in - * __kretprobe_trampoline jumps back to the right instruction. - * - * For nip, we should set the address to the previous instruction since - * we end up emulating it in kprobe_handler(), which increments the nip - * again. - */ - regs_set_return_ip(regs, orig_ret_address - 4); - regs->link = orig_ret_address; - - return 0; -} -NOKPROBE_SYMBOL(trampoline_probe_handler); - -/* * Called after single-stepping. p->addr is the address of the * instruction whose first byte has been replaced by the "breakpoint" * instruction. To avoid the SMP problems that can occur when we @@ -559,19 +484,9 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) } NOKPROBE_SYMBOL(kprobe_fault_handler); -static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, - .pre_handler = trampoline_probe_handler -}; - -int __init arch_init_kprobes(void) -{ - return register_kprobe(&trampoline_p); -} - int arch_trampoline_kprobe(struct kprobe *p) { - if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) + if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline) return 1; return 0; diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 2eabb15687a6..acb727f54e9d 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -28,32 +28,6 @@ .text /* - * This returns the high 64 bits of the product of two 64-bit numbers. - */ -_GLOBAL(mulhdu) - cmpwi r6,0 - cmpwi cr1,r3,0 - mr r10,r4 - mulhwu r4,r4,r5 - beq 1f - mulhwu r0,r10,r6 - mullw r7,r10,r5 - addc r7,r0,r7 - addze r4,r4 -1: beqlr cr1 /* all done if high part of A is 0 */ - mullw r9,r3,r5 - mulhwu r10,r3,r5 - beq 2f - mullw r0,r3,r6 - mulhwu r8,r3,r6 - addc r7,r0,r7 - adde r4,r4,r8 - addze r10,r10 -2: addc r4,r4,r9 - addze r3,r10 - blr - -/* * reloc_got2 runs through the .got2 section adding an offset * to each entry. */ @@ -176,46 +150,6 @@ _GLOBAL(low_choose_7447a_dfs) #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */ -#ifdef CONFIG_40x - -/* - * Do an IO access in real mode - */ -_GLOBAL(real_readb) - mfmsr r7 - rlwinm r0,r7,0,~MSR_DR - sync - mtmsr r0 - sync - isync - lbz r3,0(r3) - sync - mtmsr r7 - sync - isync - blr -_ASM_NOKPROBE_SYMBOL(real_readb) - - /* - * Do an IO access in real mode - */ -_GLOBAL(real_writeb) - mfmsr r7 - rlwinm r0,r7,0,~MSR_DR - sync - mtmsr r0 - sync - isync - stb r3,0(r4) - sync - mtmsr r7 - sync - isync - blr -_ASM_NOKPROBE_SYMBOL(real_writeb) - -#endif /* CONFIG_40x */ - /* * Copy a whole page. We use the dcbz instruction on the destination * to reduce memory traffic (it eliminates the unnecessary reads of diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 1a8cdafd68e8..a997c7f43dc0 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -74,7 +74,7 @@ _GLOBAL(rmci_off) blr #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ -#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) +#ifdef CONFIG_PPC_PMAC /* * Do an IO access in real mode @@ -137,7 +137,7 @@ _GLOBAL(real_writeb) sync isync blr -#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ +#endif // CONFIG_PPC_PMAC #ifdef CONFIG_PPC_PASEMI @@ -174,7 +174,7 @@ _GLOBAL(real_205_writeb) #endif /* CONFIG_PPC_PASEMI */ -#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE) +#ifdef CONFIG_CPU_FREQ_PMAC64 /* * SCOM access functions for 970 (FX only for now) * @@ -192,7 +192,7 @@ _GLOBAL(scom970_read) xori r0,r0,MSR_EE mtmsrd r0,1 - /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits + /* rotate 24 bits SCOM address 8 bits left and mask out its low 8 bits * (including parity). On current CPUs they must be 0'd, * and finally or in RW bit */ @@ -226,7 +226,7 @@ _GLOBAL(scom970_write) xori r0,r0,MSR_EE mtmsrd r0,1 - /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits + /* rotate 24 bits SCOM address 8 bits left and mask out its low 8 bits * (including parity). On current CPUs they must be 0'd. */ @@ -243,7 +243,7 @@ _GLOBAL(scom970_write) /* restore interrupts */ mtmsrd r5,1 blr -#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */ +#endif // CONFIG_CPU_FREQ_PMAC64 /* kexec_wait(phys_cpu) * diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index f6d6ae0a1692..baeb24c102c8 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -7,7 +7,6 @@ #include <linux/elf.h> #include <linux/moduleloader.h> #include <linux/err.h> -#include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/bug.h> #include <asm/module.h> @@ -17,8 +16,6 @@ #include <asm/setup.h> #include <asm/sections.h> -static LIST_HEAD(module_bug_list); - static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, const char *name) @@ -88,40 +85,3 @@ int module_finalize(const Elf_Ehdr *hdr, return 0; } - -static __always_inline void * -__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn) -{ - pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC; - gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0); - - /* - * Don't do huge page allocations for modules yet until more testing - * is done. STRICT_MODULE_RWX may require extra work to support this - * too. - */ - return __vmalloc_node_range(size, 1, start, end, gfp, prot, - VM_FLUSH_RESET_PERMS, - NUMA_NO_NODE, __builtin_return_address(0)); -} - -void *module_alloc(unsigned long size) -{ -#ifdef MODULES_VADDR - unsigned long limit = (unsigned long)_etext - SZ_32M; - void *ptr = NULL; - - BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); - - /* First try within 32M limit from _etext to avoid branch trampolines */ - if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) - ptr = __module_alloc(size, limit, MODULES_END, true); - - if (!ptr) - ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false); - - return ptr; -#else - return __module_alloc(size, VMALLOC_START, VMALLOC_END, false); -#endif -} diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 816a63fd71fb..f930e3395a7f 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -18,7 +18,7 @@ #include <linux/bug.h> #include <linux/sort.h> #include <asm/setup.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> /* Count how many different relocations (different symbol, different addend) */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 7112adc597a8..126bf3b06ab7 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -17,7 +17,7 @@ #include <linux/kernel.h> #include <asm/module.h> #include <asm/firmware.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <linux/sort.h> #include <asm/setup.h> #include <asm/sections.h> @@ -205,7 +205,9 @@ static int relacmp(const void *_x, const void *_y) /* Get size of potential trampolines required. */ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, - const Elf64_Shdr *sechdrs) + const Elf64_Shdr *sechdrs, + char *secstrings, + struct module *me) { /* One extra reloc so it's always 0-addr terminated */ unsigned long relocs = 1; @@ -241,13 +243,21 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, } } -#ifdef CONFIG_DYNAMIC_FTRACE - /* make the trampoline to the ftrace_caller */ - relocs++; -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - /* an additional one for ftrace_regs_caller */ - relocs++; -#endif + /* stubs for ftrace_caller and ftrace_regs_caller */ + relocs += IS_ENABLED(CONFIG_DYNAMIC_FTRACE) + IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS); + +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + /* stubs for the function tracer */ + for (i = 1; i < hdr->e_shnum; i++) { + if (!strcmp(secstrings + sechdrs[i].sh_name, "__patchable_function_entries")) { + me->arch.ool_stub_count = sechdrs[i].sh_size / sizeof(unsigned long); + me->arch.ool_stub_index = 0; + relocs += roundup(me->arch.ool_stub_count * sizeof(struct ftrace_ool_stub), + sizeof(struct ppc64_stub_entry)) / + sizeof(struct ppc64_stub_entry); + break; + } + } #endif pr_debug("Looks like a total of %lu stubs, max\n", relocs); @@ -355,6 +365,24 @@ static void dedotify_versions(struct modversion_info *vers, } } +/* Same as normal versions, remove a leading dot if present. */ +static void dedotify_ext_version_names(char *str_seq, unsigned long size) +{ + unsigned long out = 0; + unsigned long in; + char last = '\0'; + + for (in = 0; in < size; in++) { + /* Skip one leading dot */ + if (last == '\0' && str_seq[in] == '.') + in++; + last = str_seq[in]; + str_seq[out++] = last; + } + /* Zero the trailing portion of the names table for robustness */ + memset(&str_seq[out], 0, size - out); +} + /* * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC. * seem to be defined (value set later). @@ -424,10 +452,12 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, me->arch.toc_section = i; if (sechdrs[i].sh_addralign < 8) sechdrs[i].sh_addralign = 8; - } - else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) + } else if (strcmp(secstrings + sechdrs[i].sh_name, "__versions") == 0) dedotify_versions((void *)hdr + sechdrs[i].sh_offset, sechdrs[i].sh_size); + else if (strcmp(secstrings + sechdrs[i].sh_name, "__version_ext_names") == 0) + dedotify_ext_version_names((void *)hdr + sechdrs[i].sh_offset, + sechdrs[i].sh_size); if (sechdrs[i].sh_type == SHT_SYMTAB) dedotify((void *)hdr + sechdrs[i].sh_offset, @@ -460,7 +490,7 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, #endif /* Override the stubs size */ - sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs); + sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs, secstrings, me); return 0; } @@ -651,12 +681,11 @@ static inline int create_stub(const Elf64_Shdr *sechdrs, // func_desc_t is 8 bytes if ABIv2, else 16 bytes desc = func_desc(addr); for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) { - if (patch_instruction(((u32 *)&entry->funcdata) + i, - ppc_inst(((u32 *)(&desc))[i]))) + if (patch_u32(((u32 *)&entry->funcdata) + i, ((u32 *)&desc)[i])) return 0; } - if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC))) + if (patch_u32(&entry->magic, STUB_MAGIC)) return 0; return 1; @@ -1086,6 +1115,37 @@ int module_trampoline_target(struct module *mod, unsigned long addr, return 0; } +static int setup_ftrace_ool_stubs(const Elf64_Shdr *sechdrs, unsigned long addr, struct module *me) +{ +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + unsigned int i, total_stubs, num_stubs; + struct ppc64_stub_entry *stub; + + total_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stub); + num_stubs = roundup(me->arch.ool_stub_count * sizeof(struct ftrace_ool_stub), + sizeof(struct ppc64_stub_entry)) / sizeof(struct ppc64_stub_entry); + + /* Find the next available entry */ + stub = (void *)sechdrs[me->arch.stubs_section].sh_addr; + for (i = 0; stub_func_addr(stub[i].funcdata); i++) + if (WARN_ON(i >= total_stubs)) + return -1; + + if (WARN_ON(i + num_stubs > total_stubs)) + return -1; + + stub += i; + me->arch.ool_stubs = (struct ftrace_ool_stub *)stub; + + /* reserve stubs */ + for (i = 0; i < num_stubs; i++) + if (patch_u32((void *)&stub->funcdata, PPC_RAW_NOP())) + return -1; +#endif + + return 0; +} + int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) { mod->arch.tramp = stub_for_addr(sechdrs, @@ -1104,6 +1164,9 @@ int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) if (!mod->arch.tramp) return -ENOENT; + if (setup_ftrace_ool_stubs(sechdrs, mod->arch.tramp, mod)) + return -ENOENT; + return 0; } #endif diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index e385d3164648..f9c6568a9137 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -73,7 +73,7 @@ static const char *nvram_os_partitions[] = { }; static void oops_to_nvram(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason); + struct kmsg_dump_detail *detail); static struct kmsg_dumper nvram_kmsg_dumper = { .dump = oops_to_nvram @@ -643,7 +643,7 @@ void __init nvram_init_oops_partition(int rtas_partition_exists) * partition. If that's too much, go back and capture uncompressed text. */ static void oops_to_nvram(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason) + struct kmsg_dump_detail *detail) { struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; static unsigned int oops_count = 0; @@ -655,7 +655,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ; int rc = -1; - switch (reason) { + switch (detail->reason) { case KMSG_DUMP_SHUTDOWN: /* These are almost always orderly shutdowns. */ return; @@ -671,7 +671,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, break; default: pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", - __func__, (int) reason); + __func__, (int) detail->reason); return; } diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c deleted file mode 100644 index adc76fa58d1e..000000000000 --- a/arch/powerpc/kernel/of_platform.c +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. - * <benh@kernel.crashing.org> - * and Arnd Bergmann, IBM Corp. - */ - -#undef DEBUG - -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/export.h> -#include <linux/mod_devicetable.h> -#include <linux/pci.h> -#include <linux/platform_device.h> -#include <linux/atomic.h> - -#include <asm/errno.h> -#include <asm/topology.h> -#include <asm/pci-bridge.h> -#include <asm/ppc-pci.h> -#include <asm/eeh.h> - -#ifdef CONFIG_PPC_OF_PLATFORM_PCI - -/* The probing of PCI controllers from of_platform is currently - * 64 bits only, mostly due to gratuitous differences between - * the 32 and 64 bits PCI code on PowerPC and the 32 bits one - * lacking some bits needed here. - */ - -static int of_pci_phb_probe(struct platform_device *dev) -{ - struct pci_controller *phb; - - /* Check if we can do that ... */ - if (ppc_md.pci_setup_phb == NULL) - return -ENODEV; - - pr_info("Setting up PCI bus %pOF\n", dev->dev.of_node); - - /* Alloc and setup PHB data structure */ - phb = pcibios_alloc_controller(dev->dev.of_node); - if (!phb) - return -ENODEV; - - /* Setup parent in sysfs */ - phb->parent = &dev->dev; - - /* Setup the PHB using arch provided callback */ - if (ppc_md.pci_setup_phb(phb)) { - pcibios_free_controller(phb); - return -ENODEV; - } - - /* Process "ranges" property */ - pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0); - - /* Init pci_dn data structures */ - pci_devs_phb_init_dynamic(phb); - - /* Create EEH PE for the PHB */ - eeh_phb_pe_create(phb); - - /* Scan the bus */ - pcibios_scan_phb(phb); - if (phb->bus == NULL) - return -ENXIO; - - /* Claim resources. This might need some rework as well depending - * whether we are doing probe-only or not, like assigning unassigned - * resources etc... - */ - pcibios_claim_one_bus(phb->bus); - - /* Add probed PCI devices to the device model */ - pci_bus_add_devices(phb->bus); - - return 0; -} - -static const struct of_device_id of_pci_phb_ids[] = { - { .type = "pci", }, - { .type = "pcix", }, - { .type = "pcie", }, - { .type = "pciex", }, - { .type = "ht", }, - {} -}; - -static struct platform_driver of_pci_phb_driver = { - .probe = of_pci_phb_probe, - .driver = { - .name = "of-pci", - .of_match_table = of_pci_phb_ids, - }, -}; - -builtin_platform_driver(of_pci_phb_driver); - -#endif /* CONFIG_PPC_OF_PLATFORM_PCI */ diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 004fae2044a3..2e83702bf9ba 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -13,7 +13,7 @@ #include <asm/kprobes.h> #include <asm/ptrace.h> #include <asm/cacheflush.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/sstep.h> #include <asm/ppc-opcode.h> #include <asm/inst.h> @@ -56,7 +56,7 @@ static unsigned long can_optimize(struct kprobe *p) * has a 'nop' instruction, which can be emulated. * So further checks can be skipped. */ - if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) + if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline) return addr + sizeof(kprobe_opcode_t); /* diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index d95a48eff412..eac84d687b53 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -517,7 +517,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma) } /* - * This one is used by /dev/mem and fbdev who have no clue about the + * This one is used by /dev/mem and video who have no clue about the * PCI device, it tries to find the PCI device first and calls the * above routine */ diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c index 0fe251c6ac2c..9ea74973d78d 100644 --- a/arch/powerpc/kernel/pci-hotplug.c +++ b/arch/powerpc/kernel/pci-hotplug.c @@ -93,6 +93,36 @@ void pci_hp_remove_devices(struct pci_bus *bus) } EXPORT_SYMBOL_GPL(pci_hp_remove_devices); +static void traverse_siblings_and_scan_slot(struct device_node *start, struct pci_bus *bus) +{ + struct device_node *dn; + int slotno; + + u32 class = 0; + + if (!of_property_read_u32(start->child, "class-code", &class)) { + /* Call of pci_scan_slot for non-bridge/EP case */ + if (!((class >> 8) == PCI_CLASS_BRIDGE_PCI)) { + slotno = PCI_SLOT(PCI_DN(start->child)->devfn); + pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); + return; + } + } + + /* Iterate all siblings */ + for_each_child_of_node(start, dn) { + class = 0; + + if (!of_property_read_u32(start->child, "class-code", &class)) { + /* Call of pci_scan_slot on each sibling-nodes/bridge-ports */ + if ((class >> 8) == PCI_CLASS_BRIDGE_PCI) { + slotno = PCI_SLOT(PCI_DN(dn)->devfn); + pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); + } + } + } +} + /** * pci_hp_add_devices - adds new pci devices to bus * @bus: the indicated PCI bus @@ -106,7 +136,7 @@ EXPORT_SYMBOL_GPL(pci_hp_remove_devices); */ void pci_hp_add_devices(struct pci_bus *bus) { - int slotno, mode, max; + int mode, max; struct pci_dev *dev; struct pci_controller *phb; struct device_node *dn = pci_bus_to_OF_node(bus); @@ -129,8 +159,7 @@ void pci_hp_add_devices(struct pci_bus *bus) * order for fully rescan all the way down to pick them up. * They can have been removed during partial hotplug. */ - slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); - pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); + traverse_siblings_and_scan_slot(dn, bus); max = bus->busn_res.start; /* * Scan bridges that are already configured. We don't touch diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index ce0c8623e563..f8a3bd8cfae4 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -213,11 +213,8 @@ pci_create_OF_bus_map(void) struct property* of_prop; struct device_node *dn; - of_prop = memblock_alloc(sizeof(struct property) + 256, + of_prop = memblock_alloc_or_panic(sizeof(struct property) + 256, SMP_CACHE_BYTES); - if (!of_prop) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct property) + 256); dn = of_find_node_by_path("/"); if (dn) { memset(of_prop, -1, sizeof(struct property) + 256); diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c index b109cd7b5d01..d083b4517065 100644 --- a/arch/powerpc/kernel/proc_powerpc.c +++ b/arch/powerpc/kernel/proc_powerpc.c @@ -4,17 +4,20 @@ */ #include <linux/init.h> +#include <linux/memblock.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/kernel.h> #include <linux/of.h> +#include <linux/string.h> #include <asm/machdep.h> #include <asm/vdso_datapage.h> #include <asm/rtas.h> +#include <asm/systemcfg.h> #include <linux/uaccess.h> -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG static loff_t page_map_seek(struct file *file, loff_t off, int whence) { @@ -33,10 +36,9 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) return -EINVAL; - remap_pfn_range(vma, vma->vm_start, - __pa(pde_data(file_inode(file))) >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot); - return 0; + return remap_pfn_range(vma, vma->vm_start, + __pa(pde_data(file_inode(file))) >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot); } static const struct proc_ops page_map_proc_ops = { @@ -45,13 +47,35 @@ static const struct proc_ops page_map_proc_ops = { .proc_mmap = page_map_mmap, }; +static union { + struct systemcfg data; + u8 page[PAGE_SIZE]; +} systemcfg_data_store __page_aligned_data; +struct systemcfg *systemcfg = &systemcfg_data_store.data; static int __init proc_ppc64_init(void) { struct proc_dir_entry *pde; + strscpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); + systemcfg->version.major = SYSTEMCFG_MAJOR; + systemcfg->version.minor = SYSTEMCFG_MINOR; + systemcfg->processor = mfspr(SPRN_PVR); + /* + * Fake the old platform number for pSeries and add + * in LPAR bit if necessary + */ + systemcfg->platform = 0x100; + if (firmware_has_feature(FW_FEATURE_LPAR)) + systemcfg->platform |= 1; + systemcfg->physicalMemorySize = memblock_phys_mem_size(); + systemcfg->dcache_size = ppc64_caches.l1d.size; + systemcfg->dcache_line_size = ppc64_caches.l1d.line_size; + systemcfg->icache_size = ppc64_caches.l1i.size; + systemcfg->icache_line_size = ppc64_caches.l1i.line_size; + pde = proc_create_data("powerpc/systemcfg", S_IFREG | 0444, NULL, - &page_map_proc_ops, vdso_data); + &page_map_proc_ops, systemcfg); if (!pde) return 1; proc_set_size(pde, PAGE_SIZE); @@ -60,7 +84,7 @@ static int __init proc_ppc64_init(void) } __initcall(proc_ppc64_init); -#endif /* CONFIG_PPC64 */ +#endif /* CONFIG_PPC64_PROC_SYSTEMCFG */ /* * Create the ppc64 and ppc64/rtas directories early. This allows us to diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 9452a54d356c..855e09886503 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -54,7 +54,7 @@ #include <asm/firmware.h> #include <asm/hw_irq.h> #endif -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/exec.h> #include <asm/livepatch.h> #include <asm/cpu_has_feature.h> @@ -72,8 +72,6 @@ #define TM_DEBUG(x...) do { } while(0) #endif -extern unsigned long _get_SP(void); - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Are we running in "Suspend disabled" mode? If so we have to block any @@ -1002,7 +1000,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk) WARN_ON(tm_suspend_disabled); - TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " + TM_DEBUG("---- tm_reclaim on pid %d (NIP=%lx, " "ccr=%lx, msr=%lx, trap=%lx)\n", tsk->pid, thr->regs->nip, thr->regs->ccr, thr->regs->msr, @@ -1010,7 +1008,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk) tm_reclaim_thread(thr, TM_CAUSE_RESCHED); - TM_DEBUG("--- tm_reclaim on pid %d complete\n", + TM_DEBUG("---- tm_reclaim on pid %d complete\n", tsk->pid); out_and_saveregs: @@ -1185,6 +1183,9 @@ static inline void save_sprs(struct thread_struct *t) if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) t->hashkeyr = mfspr(SPRN_HASHKEYR); + + if (cpu_has_feature(CPU_FTR_ARCH_31)) + t->dexcr = mfspr(SPRN_DEXCR); #endif } @@ -1267,6 +1268,10 @@ static inline void restore_sprs(struct thread_struct *old_thread, if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) && old_thread->hashkeyr != new_thread->hashkeyr) mtspr(SPRN_HASHKEYR, new_thread->hashkeyr); + + if (cpu_has_feature(CPU_FTR_ARCH_31) && + old_thread->dexcr != new_thread->dexcr) + mtspr(SPRN_DEXCR, new_thread->dexcr); #endif } @@ -1566,7 +1571,7 @@ static void __show_regs(struct pt_regs *regs) if (trap == INTERRUPT_MACHINE_CHECK || trap == INTERRUPT_DATA_STORAGE || trap == INTERRUPT_ALIGNMENT) { - if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE)) + if (IS_ENABLED(CONFIG_BOOKE)) pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr); else pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); @@ -1634,6 +1639,13 @@ void arch_setup_new_exec(void) current->thread.regs->amr = default_amr; current->thread.regs->iamr = default_iamr; #endif + +#ifdef CONFIG_PPC_BOOK3S_64 + if (cpu_has_feature(CPU_FTR_ARCH_31)) { + current->thread.dexcr = current->thread.dexcr_onexec; + mtspr(SPRN_DEXCR, current->thread.dexcr); + } +#endif /* CONFIG_PPC_BOOK3S_64 */ } #ifdef CONFIG_PPC64 @@ -1647,7 +1659,7 @@ void arch_setup_new_exec(void) * cases will happen: * * 1. The correct thread is running, the wrong thread is not - * In this situation, the correct thread is woken and proceeds to pass it's + * In this situation, the correct thread is woken and proceeds to pass its * condition check. * * 2. Neither threads are running @@ -1657,15 +1669,15 @@ void arch_setup_new_exec(void) * for the wrong thread, or they will execute the condition check immediately. * * 3. The wrong thread is running, the correct thread is not - * The wrong thread will be woken, but will fail it's condition check and + * The wrong thread will be woken, but will fail its condition check and * re-execute wait. The correct thread, when scheduled, will execute either - * it's condition check (which will pass), or wait, which returns immediately - * when called the first time after the thread is scheduled, followed by it's + * its condition check (which will pass), or wait, which returns immediately + * when called the first time after the thread is scheduled, followed by its * condition check (which will pass). * * 4. Both threads are running - * Both threads will be woken. The wrong thread will fail it's condition check - * and execute another wait, while the correct thread will pass it's condition + * Both threads will be woken. The wrong thread will fail its condition check + * and execute another wait, while the correct thread will pass its condition * check. * * @t: the task to set the thread ID for @@ -1861,7 +1873,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP) p->thread.kuap = KUAP_NONE; #endif -#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP) +#if defined(CONFIG_BOOKE) && defined(CONFIG_PPC_KUAP) p->thread.pid = MMU_NO_CONTEXT; #endif @@ -1878,6 +1890,9 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) #ifdef CONFIG_PPC_BOOK3S_64 if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) p->thread.hashkeyr = current->thread.hashkeyr; + + if (cpu_has_feature(CPU_FTR_ARCH_31)) + p->thread.dexcr = mfspr(SPRN_DEXCR); #endif return 0; } @@ -1945,8 +1960,8 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) * address of _start and the second entry is the TOC * value we need to use. */ - __get_user(entry, (unsigned long __user *)start); - __get_user(toc, (unsigned long __user *)start+1); + get_user(entry, (unsigned long __user *)start); + get_user(toc, (unsigned long __user *)start+1); /* Check whether the e_entry function descriptor entries * need to be relocated before we can use them. @@ -2160,10 +2175,10 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, return 0; } +#ifdef CONFIG_PPC64 static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, unsigned long nbytes) { -#ifdef CONFIG_PPC64 unsigned long stack_page; unsigned long cpu = task_cpu(p); @@ -2191,10 +2206,26 @@ static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) return 1; # endif -#endif return 0; } +#else +static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, + unsigned long nbytes) +{ + unsigned long stack_page; + unsigned long cpu = task_cpu(p); + + if (!IS_ENABLED(CONFIG_VMAP_STACK)) + return 0; + + stack_page = (unsigned long)emergency_ctx[cpu] - THREAD_SIZE; + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; + + return 0; +} +#endif /* * validate the stack frame of a particular minimum size, used for when we are @@ -2336,14 +2367,14 @@ void __no_sanitize_address show_stack(struct task_struct *tsk, (sp + STACK_INT_FRAME_REGS); lr = regs->link; - printk("%s--- interrupt: %lx at %pS\n", + printk("%s---- interrupt: %lx at %pS\n", loglvl, regs->trap, (void *)regs->nip); // Detect the case of an empty pt_regs at the very base // of the stack and suppress showing it in full. if (!empty_user_regs(regs, tsk)) { __show_regs(regs); - printk("%s--- interrupt: %lx\n", loglvl, regs->trap); + printk("%s---- interrupt: %lx\n", loglvl, regs->trap); } firstframe = 1; diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 1dc32a058156..9ed9dde7d231 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -331,6 +331,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *cpu_version = NULL; const __be32 *prop; const __be32 *intserv; int i, nthreads; @@ -420,7 +421,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, prop = of_get_flat_dt_prop(node, "cpu-version", NULL); if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) { identify_cpu(0, be32_to_cpup(prop)); - seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop)); + cpu_version = prop; } check_cpu_feature_properties(node); @@ -431,6 +432,12 @@ static int __init early_init_dt_scan_cpus(unsigned long node, } identical_pvr_fixup(node); + + // We can now add the CPU name & PVR to the hardware description + seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR)); + if (cpu_version) + seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(cpu_version)); + init_mmu_slb_size(node); #ifdef CONFIG_PPC64 @@ -475,7 +482,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node, tce_alloc_end = *lprop; #endif -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_RESERVE lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); if (lprop) crashk_res.start = *lprop; @@ -779,12 +786,12 @@ static inline void save_fscr_to_task(void) {} void __init early_init_devtree(void *params) { - phys_addr_t limit; + phys_addr_t int_vector_size; DBG(" -> early_init_devtree(%px)\n", params); /* Too early to BUG_ON(), do it by hand */ - if (!early_init_dt_verify(params)) + if (!early_init_dt_verify(params, __pa(params))) panic("BUG: Failed verifying flat device tree, bad version?"); of_scan_flat_dt(early_init_dt_scan_model, NULL); @@ -813,6 +820,9 @@ void __init early_init_devtree(void *params) */ of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line); + /* Append additional parameters passed for fadump capture kernel */ + fadump_append_bootargs(); + /* Scan memory nodes and rebuild MEMBLOCKs */ early_init_dt_scan_root(); early_init_dt_scan_memory_ppc(); @@ -832,9 +842,16 @@ void __init early_init_devtree(void *params) setup_initial_memory_limit(memstart_addr, first_memblock_size); /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START); +#ifdef CONFIG_PPC64 + /* If relocatable, reserve at least 32k for interrupt vectors etc. */ + int_vector_size = __end_interrupts - _stext; + int_vector_size = max_t(phys_addr_t, SZ_32K, int_vector_size); +#else /* If relocatable, reserve first 32k for interrupt vectors etc. */ + int_vector_size = SZ_32K; +#endif if (PHYSICAL_START > MEMORY_START) - memblock_reserve(MEMORY_START, 0x8000); + memblock_reserve(MEMORY_START, int_vector_size); reserve_kdump_trampoline(); #if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP) /* @@ -843,12 +860,15 @@ void __init early_init_devtree(void *params) */ if (fadump_reserve_mem() == 0) #endif - reserve_crashkernel(); + arch_reserve_crashkernel(); early_reserve_mem(); - /* Ensure that total memory size is page-aligned. */ - limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); - memblock_enforce_memory_limit(limit); + if (memory_limit > memblock_phys_mem_size()) + memory_limit = 0; + + /* Align down to 16 MB which is large page size with hash page translation */ + memory_limit = ALIGN_DOWN(memory_limit ?: memblock_phys_mem_size(), SZ_16M); + memblock_enforce_memory_limit(memory_limit); #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES) if (!early_radix_enabled()) @@ -868,9 +888,6 @@ void __init early_init_devtree(void *params) dt_cpu_ftrs_scan(); - // We can now add the CPU name & PVR to the hardware description - seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR)); - /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ @@ -891,6 +908,9 @@ void __init early_init_devtree(void *params) mmu_early_init_devtree(); + /* Setup param area for passing additional parameters to fadump capture kernel. */ + fadump_setup_param_area(); + #ifdef CONFIG_PPC_POWERNV /* Scan and build the list of machine check recoverable ranges */ of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL); diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 0ef358285337..827c958677f8 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -817,8 +817,8 @@ static void __init early_cmdline_parse(void) opt += 4; prom_memory_limit = prom_memparse(opt, (const char **)&opt); #ifdef CONFIG_PPC64 - /* Align to 16 MB == size of ppc64 large page */ - prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); + /* Align down to 16 MB which is large page size with hash page translation */ + prom_memory_limit = ALIGN_DOWN(prom_memory_limit, SZ_16M); #endif } @@ -1061,7 +1061,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { .virt_base = cpu_to_be32(0xffffffff), .virt_size = cpu_to_be32(0xffffffff), .load_base = cpu_to_be32(0xffffffff), - .min_rma = cpu_to_be32(512), /* 512MB min RMA */ + .min_rma = cpu_to_be32(MIN_RMA), .min_load = cpu_to_be32(0xffffffff), /* full client load */ .min_rma_percent = 0, /* min RMA percentage of total RAM */ .max_pft_size = 48, /* max log_2(hash table size) */ @@ -2792,91 +2792,6 @@ static void __init flatten_device_tree(void) dt_struct_start, dt_struct_end); } -#ifdef CONFIG_PPC_MAPLE -/* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. - * The values are bad, and it doesn't even have the right number of cells. */ -static void __init fixup_device_tree_maple(void) -{ - phandle isa; - u32 rloc = 0x01002000; /* IO space; PCI device = 4 */ - u32 isa_ranges[6]; - char *name; - - name = "/ht@0/isa@4"; - isa = call_prom("finddevice", 1, 1, ADDR(name)); - if (!PHANDLE_VALID(isa)) { - name = "/ht@0/isa@6"; - isa = call_prom("finddevice", 1, 1, ADDR(name)); - rloc = 0x01003000; /* IO space; PCI device = 6 */ - } - if (!PHANDLE_VALID(isa)) - return; - - if (prom_getproplen(isa, "ranges") != 12) - return; - if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) - == PROM_ERROR) - return; - - if (isa_ranges[0] != 0x1 || - isa_ranges[1] != 0xf4000000 || - isa_ranges[2] != 0x00010000) - return; - - prom_printf("Fixing up bogus ISA range on Maple/Apache...\n"); - - isa_ranges[0] = 0x1; - isa_ranges[1] = 0x0; - isa_ranges[2] = rloc; - isa_ranges[3] = 0x0; - isa_ranges[4] = 0x0; - isa_ranges[5] = 0x00010000; - prom_setprop(isa, name, "ranges", - isa_ranges, sizeof(isa_ranges)); -} - -#define CPC925_MC_START 0xf8000000 -#define CPC925_MC_LENGTH 0x1000000 -/* The values for memory-controller don't have right number of cells */ -static void __init fixup_device_tree_maple_memory_controller(void) -{ - phandle mc; - u32 mc_reg[4]; - char *name = "/hostbridge@f8000000"; - u32 ac, sc; - - mc = call_prom("finddevice", 1, 1, ADDR(name)); - if (!PHANDLE_VALID(mc)) - return; - - if (prom_getproplen(mc, "reg") != 8) - return; - - prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac)); - prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc)); - if ((ac != 2) || (sc != 2)) - return; - - if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) - return; - - if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) - return; - - prom_printf("Fixing up bogus hostbridge on Maple...\n"); - - mc_reg[0] = 0x0; - mc_reg[1] = CPC925_MC_START; - mc_reg[2] = 0x0; - mc_reg[3] = CPC925_MC_LENGTH; - prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); -} -#else -#define fixup_device_tree_maple() -#define fixup_device_tree_maple_memory_controller() -#endif - -#ifdef CONFIG_PPC_CHRP /* * Pegasos and BriQ lacks the "ranges" property in the isa node * Pegasos needs decimal IRQ 14/15, not hexadecimal @@ -2927,12 +2842,8 @@ static void __init fixup_device_tree_chrp(void) } } } -#else -#define fixup_device_tree_chrp() -#endif -#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) -static void __init fixup_device_tree_pmac(void) +static void __init fixup_device_tree_pmac64(void) { phandle u3, i2c, mpic; u32 u3_rev; @@ -2971,11 +2882,27 @@ static void __init fixup_device_tree_pmac(void) prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", &parent, sizeof(parent)); } -#else -#define fixup_device_tree_pmac() -#endif -#ifdef CONFIG_PPC_EFIKA +static void __init fixup_device_tree_pmac(void) +{ + __be32 val = 1; + char type[8]; + phandle node; + + // Some pmacs are missing #size-cells on escc or i2s nodes + for (node = 0; prom_next_node(&node); ) { + type[0] = '\0'; + prom_getprop(node, "device_type", type, sizeof(type)); + if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s")) + continue; + + if (prom_getproplen(node, "#size-cells") != PROM_ERROR) + continue; + + prom_setprop(node, NULL, "#size-cells", &val, sizeof(val)); + } +} + /* * The MPC5200 FEC driver requires an phy-handle property to tell it how * to talk to the phy. If the phy-handle property is missing, then this @@ -3107,11 +3034,7 @@ static void __init fixup_device_tree_efika(void) /* Make sure ethernet phy-handle property exists */ fixup_device_tree_efika_add_phy(); } -#else -#define fixup_device_tree_efika() -#endif -#ifdef CONFIG_PPC_PASEMI_NEMO /* * CFE supplied on Nemo is broken in several ways, biggest * problem is that it reassigns ISA interrupts to unused mpic ints. @@ -3187,18 +3110,23 @@ static void __init fixup_device_tree_pasemi(void) prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); } -#else /* !CONFIG_PPC_PASEMI_NEMO */ -static inline void fixup_device_tree_pasemi(void) { } -#endif static void __init fixup_device_tree(void) { - fixup_device_tree_maple(); - fixup_device_tree_maple_memory_controller(); - fixup_device_tree_chrp(); - fixup_device_tree_pmac(); - fixup_device_tree_efika(); - fixup_device_tree_pasemi(); + if (IS_ENABLED(CONFIG_PPC_CHRP)) + fixup_device_tree_chrp(); + + if (IS_ENABLED(CONFIG_PPC_PMAC)) + fixup_device_tree_pmac(); + + if (IS_ENABLED(CONFIG_PPC_PMAC) && IS_ENABLED(CONFIG_PPC64)) + fixup_device_tree_pmac64(); + + if (IS_ENABLED(CONFIG_PPC_EFIKA)) + fixup_device_tree_efika(); + + if (IS_ENABLED(CONFIG_PPC_PASEMI_NEMO)) + fixup_device_tree_pasemi(); } static void __init prom_find_boot_cpu(void) diff --git a/arch/powerpc/kernel/ptrace/ptrace-tm.c b/arch/powerpc/kernel/ptrace/ptrace-tm.c index 210ea834e603..447bff87fd21 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-tm.c +++ b/arch/powerpc/kernel/ptrace/ptrace-tm.c @@ -12,7 +12,7 @@ void flush_tmregs_to_thread(struct task_struct *tsk) { /* * If task is not current, it will have been flushed already to - * it's thread_struct during __switch_to(). + * its thread_struct during __switch_to(). * * A reclaim flushes ALL the state or if not in TM save TM SPRs * in the appropriate thread structures from live. diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c index 584cf5c3df50..c1819e0a6684 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-view.c +++ b/arch/powerpc/kernel/ptrace/ptrace-view.c @@ -469,12 +469,7 @@ static int dexcr_get(struct task_struct *target, const struct user_regset *regse if (!cpu_has_feature(CPU_FTR_ARCH_31)) return -ENODEV; - /* - * The DEXCR is currently static across all CPUs, so we don't - * store the target's value anywhere, but the static value - * will also be correct. - */ - membuf_store(&to, (u64)lower_32_bits(DEXCR_INIT)); + membuf_store(&to, (u64)lower_32_bits(target->thread.dexcr)); /* * Technically the HDEXCR is per-cpu, but a hypervisor can't reasonably diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index 727ed4a14545..c6997df63287 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -215,7 +215,7 @@ static int do_seccomp(struct pt_regs *regs) * have already loaded -ENOSYS into r3, or seccomp has put * something else in r3 (via SECCOMP_RET_ERRNO/TRACE). */ - if (__secure_computing(NULL)) + if (__secure_computing()) return -1; /* diff --git a/arch/powerpc/kernel/rethook.c b/arch/powerpc/kernel/rethook.c new file mode 100644 index 000000000000..5f5f47ae82cf --- /dev/null +++ b/arch/powerpc/kernel/rethook.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PowerPC implementation of rethook. This depends on kprobes. + */ + +#include <linux/kprobes.h> +#include <linux/rethook.h> + +/* + * Function return trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe + * causes the handlers to fire + */ +asm(".global arch_rethook_trampoline\n" + ".type arch_rethook_trampoline, @function\n" + "arch_rethook_trampoline:\n" + "nop\n" + "blr\n" + ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n"); + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int trampoline_rethook_handler(struct kprobe *p, struct pt_regs *regs) +{ + return !rethook_trampoline_handler(regs, regs->gpr[1]); +} +NOKPROBE_SYMBOL(trampoline_rethook_handler); + +void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) +{ + rh->ret_addr = regs->link; + rh->frame = regs->gpr[1]; + + /* Replace the return addr with trampoline addr */ + regs->link = (unsigned long)arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_rethook_prepare); + +/* This is called from rethook_trampoline_handler(). */ +void arch_rethook_fixup_return(struct pt_regs *regs, unsigned long orig_ret_address) +{ + /* + * We get here through one of two paths: + * 1. by taking a trap -> kprobe_handler() -> here + * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here + * + * When going back through (1), we need regs->nip to be setup properly + * as it is used to determine the return address from the trap. + * For (2), since nip is not honoured with optprobes, we instead setup + * the link register properly so that the subsequent 'blr' in + * arch_rethook_trampoline jumps back to the right instruction. + * + * For nip, we should set the address to the previous instruction since + * we end up emulating it in kprobe_handler(), which increments the nip + * again. + */ + regs_set_return_ip(regs, orig_ret_address - 4); + regs->link = orig_ret_address; +} +NOKPROBE_SYMBOL(arch_rethook_fixup_return); + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) &arch_rethook_trampoline, + .pre_handler = trampoline_rethook_handler +}; + +/* rethook initializer */ +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 8064d9c3de86..e61245c4468e 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -19,6 +19,7 @@ #include <linux/lockdep.h> #include <linux/memblock.h> #include <linux/mutex.h> +#include <linux/nospec.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/reboot.h> @@ -91,12 +92,12 @@ struct rtas_function { * Per-function locks for sequence-based RTAS functions. */ static DEFINE_MUTEX(rtas_ibm_activate_firmware_lock); -static DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock); -static DEFINE_MUTEX(rtas_ibm_get_indices_lock); static DEFINE_MUTEX(rtas_ibm_lpar_perftools_lock); -static DEFINE_MUTEX(rtas_ibm_physical_attestation_lock); -static DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock); +DEFINE_MUTEX(rtas_ibm_physical_attestation_lock); DEFINE_MUTEX(rtas_ibm_get_vpd_lock); +DEFINE_MUTEX(rtas_ibm_get_indices_lock); +DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock); +DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock); static struct rtas_function rtas_function_table[] __ro_after_init = { [RTAS_FNIDX__CHECK_EXCEPTION] = { @@ -797,66 +798,6 @@ void __init udbg_init_rtas_panel(void) udbg_putc = call_rtas_display_status_delay; } -#ifdef CONFIG_UDBG_RTAS_CONSOLE - -/* If you think you're dying before early_init_dt_scan_rtas() does its - * work, you can hard code the token values for your firmware here and - * hardcode rtas.base/entry etc. - */ -static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE; -static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE; - -static void udbg_rtascon_putc(char c) -{ - int tries; - - if (!rtas.base) - return; - - /* Add CRs before LFs */ - if (c == '\n') - udbg_rtascon_putc('\r'); - - /* if there is more than one character to be displayed, wait a bit */ - for (tries = 0; tries < 16; tries++) { - if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0) - break; - udelay(1000); - } -} - -static int udbg_rtascon_getc_poll(void) -{ - int c; - - if (!rtas.base) - return -1; - - if (rtas_call(rtas_getchar_token, 0, 2, &c)) - return -1; - - return c; -} - -static int udbg_rtascon_getc(void) -{ - int c; - - while ((c = udbg_rtascon_getc_poll()) == -1) - ; - - return c; -} - - -void __init udbg_init_rtas_console(void) -{ - udbg_putc = udbg_rtascon_putc; - udbg_getc = udbg_rtascon_getc; - udbg_getc_poll = udbg_rtascon_getc_poll; -} -#endif /* CONFIG_UDBG_RTAS_CONSOLE */ - void rtas_progress(char *s, unsigned short hex) { struct device_node *root; @@ -1389,21 +1330,14 @@ bool __ref rtas_busy_delay(int status) */ ms = clamp(ms, 1U, 1000U); /* - * The delay hint is an order-of-magnitude suggestion, not - * a minimum. It is fine, possibly even advantageous, for - * us to pause for less time than hinted. For small values, - * use usleep_range() to ensure we don't sleep much longer - * than actually needed. - * - * See Documentation/timers/timers-howto.rst for - * explanation of the threshold used here. In effect we use - * usleep_range() for 9900 and 9901, msleep() for - * 9902-9905. + * The delay hint is an order-of-magnitude suggestion, not a + * minimum. It is fine, possibly even advantageous, for us to + * pause for less time than hinted. To make sure pause time will + * not be way longer than requested independent of HZ + * configuration, use fsleep(). See fsleep() for details of + * used sleeping functions. */ - if (ms <= 20) - usleep_range(ms * 100, ms * 1000); - else - msleep(ms); + fsleep(ms * 1000); break; case RTAS_BUSY: ret = true; @@ -1916,6 +1850,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) || nargs + nret > ARRAY_SIZE(args.args)) return -EINVAL; + nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args)); + nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs); + /* Copy in args. */ if (copy_from_user(args.args, uargs->args, nargs * sizeof(rtas_arg_t)) != 0) @@ -2138,21 +2075,6 @@ int __init early_init_dt_scan_rtas(unsigned long node, rtas.size = *sizep; } -#ifdef CONFIG_UDBG_RTAS_CONSOLE - basep = of_get_flat_dt_prop(node, "put-term-char", NULL); - if (basep) - rtas_putchar_token = *basep; - - basep = of_get_flat_dt_prop(node, "get-term-char", NULL); - if (basep) - rtas_getchar_token = *basep; - - if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE && - rtas_getchar_token != RTAS_UNKNOWN_SERVICE) - udbg_init_rtas_console(); - -#endif - /* break now */ return 1; } diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 359577ec1680..5407024881e5 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -773,4 +773,5 @@ static void __exit rtas_flash_cleanup(void) module_init(rtas_flash_init); module_exit(rtas_flash_cleanup); +MODULE_DESCRIPTION("PPC procfs firmware flash interface"); MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/kernel/secure_boot.c b/arch/powerpc/kernel/secure_boot.c index 9e0efb657f39..3a28795b4ed8 100644 --- a/arch/powerpc/kernel/secure_boot.c +++ b/arch/powerpc/kernel/secure_boot.c @@ -5,6 +5,7 @@ */ #include <linux/types.h> #include <linux/of.h> +#include <linux/string_choices.h> #include <asm/secure_boot.h> static struct device_node *get_ppc_fw_sb_node(void) @@ -38,7 +39,7 @@ bool is_ppc_secureboot_enabled(void) of_node_put(node); out: - pr_info("Secure boot mode %s\n", enabled ? "enabled" : "disabled"); + pr_info("Secure boot mode %s\n", str_enabled_disabled(enabled)); return enabled; } @@ -62,7 +63,7 @@ bool is_ppc_trustedboot_enabled(void) of_node_put(node); out: - pr_info("Trusted boot mode %s\n", enabled ? "enabled" : "disabled"); + pr_info("Trusted boot mode %s\n", str_enabled_disabled(enabled)); return enabled; } diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 4856e1a5161c..fbb7ebd8aa08 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -14,7 +14,7 @@ #include <linux/debugfs.h> #include <asm/asm-prototypes.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/security_features.h> #include <asm/sections.h> #include <asm/setup.h> diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c index eb3c053f323f..afb690a172b4 100644 --- a/arch/powerpc/kernel/secvar-sysfs.c +++ b/arch/powerpc/kernel/secvar-sysfs.c @@ -52,7 +52,7 @@ static ssize_t size_show(struct kobject *kobj, struct kobj_attribute *attr, } static ssize_t data_read(struct file *filep, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { char *data; @@ -85,7 +85,7 @@ data_fail: } static ssize_t update_write(struct file *filep, struct kobject *kobj, - struct bin_attribute *attr, char *buf, loff_t off, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { int rc; @@ -104,11 +104,11 @@ static struct kobj_attribute format_attr = __ATTR_RO(format); static struct kobj_attribute size_attr = __ATTR_RO(size); -static struct bin_attribute data_attr = __BIN_ATTR_RO(data, 0); +static struct bin_attribute data_attr __ro_after_init = __BIN_ATTR_RO(data, 0); -static struct bin_attribute update_attr = __BIN_ATTR_WO(update, 0); +static struct bin_attribute update_attr __ro_after_init = __BIN_ATTR_WO(update, 0); -static struct bin_attribute *secvar_bin_attrs[] = { +static const struct bin_attribute *const secvar_bin_attrs[] = { &data_attr, &update_attr, NULL, @@ -121,16 +121,16 @@ static struct attribute *secvar_attrs[] = { static const struct attribute_group secvar_attr_group = { .attrs = secvar_attrs, - .bin_attrs = secvar_bin_attrs, + .bin_attrs_new = secvar_bin_attrs, }; __ATTRIBUTE_GROUPS(secvar_attr); -static struct kobj_type secvar_ktype = { +static const struct kobj_type secvar_ktype = { .sysfs_ops = &kobj_sysfs_ops, .default_groups = secvar_attr_groups, }; -static int update_kobj_size(void) +static __init int update_kobj_size(void) { u64 varsize; @@ -145,7 +145,7 @@ static int update_kobj_size(void) return 0; } -static int secvar_sysfs_config(struct kobject *kobj) +static __init int secvar_sysfs_config(struct kobject *kobj) { struct attribute_group config_group = { .name = "config", @@ -158,7 +158,7 @@ static int secvar_sysfs_config(struct kobject *kobj) return 0; } -static int add_var(const char *name) +static __init int add_var(const char *name) { struct kobject *kobj; int rc; @@ -181,7 +181,7 @@ static int add_var(const char *name) return 0; } -static int secvar_sysfs_load(void) +static __init int secvar_sysfs_load(void) { u64 namesize = 0; char *name; @@ -209,7 +209,7 @@ static int secvar_sysfs_load(void) return rc; } -static int secvar_sysfs_load_static(void) +static __init int secvar_sysfs_load_static(void) { const char * const *name_ptr = secvar_ops->var_names; int rc; @@ -224,7 +224,7 @@ static int secvar_sysfs_load_static(void) return 0; } -static int secvar_sysfs_init(void) +static __init int secvar_sysfs_init(void) { u64 max_size; int rc; diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 2add292da494..68d47c53876c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -67,6 +67,7 @@ #include <asm/cpu_has_feature.h> #include <asm/kasan.h> #include <asm/mce.h> +#include <asm/systemcfg.h> #include "setup.h" @@ -110,7 +111,7 @@ int ppc_do_canonicalize_irqs; EXPORT_SYMBOL(ppc_do_canonicalize_irqs); #endif -#ifdef CONFIG_VMCORE_INFO +#ifdef CONFIG_CRASH_DUMP /* This keeps a track of which one is the crashing cpu. */ int crashing_cpu = -1; #endif @@ -405,7 +406,7 @@ static void __init cpu_init_thread_core_maps(int tpc) cpumask_set_cpu(i, &threads_core_mask); printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", - tpc, tpc > 1 ? "s" : ""); + tpc, str_plural(tpc)); printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift); } @@ -457,11 +458,8 @@ void __init smp_setup_cpu_maps(void) DBG("smp_setup_cpu_maps()\n"); - cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), + cpu_to_phys_id = memblock_alloc_or_panic(nr_cpu_ids * sizeof(u32), __alignof__(u32)); - if (!cpu_to_phys_id) - panic("%s: Failed to allocate %zu bytes align=0x%zx\n", - __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32)); for_each_node_by_type(dn, "cpu") { const __be32 *intserv; @@ -560,7 +558,9 @@ void __init smp_setup_cpu_maps(void) out: of_node_put(dn); } - vdso_data->processorCount = num_present_cpus(); +#endif +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG + systemcfg->processorCount = num_present_cpus(); #endif /* CONFIG_PPC64 */ /* Initialize CPU <=> thread mapping/ @@ -831,8 +831,8 @@ static int __init check_cache_coherency(void) if (devtree_coherency != KERNEL_COHERENCY) { printk(KERN_ERR "kernel coherency:%s != device tree_coherency:%s\n", - KERNEL_COHERENCY ? "on" : "off", - devtree_coherency ? "on" : "off"); + str_on_off(KERNEL_COHERENCY), + str_on_off(devtree_coherency)); BUG(); } @@ -957,8 +957,7 @@ void __init setup_arch(char **cmdline_p) /* Parse memory topology */ mem_topology_setup(); - /* Set max_mapnr before paging_init() */ - set_max_mapnr(max_pfn); + high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); /* * Release secondary cpus out of their spinloops at 0x60 now that @@ -996,9 +995,11 @@ void __init setup_arch(char **cmdline_p) initmem_init(); /* - * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must - * be called after initmem_init(), so that pageblock_order is initialised. + * Reserve large chunks of memory for use by CMA for fadump, KVM and + * hugetlb. These must be called after initmem_init(), so that + * pageblock_order is initialised. */ + fadump_cma_init(); kvm_cma_reserve(); gigantic_hugetlb_cma_reserve(); diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h index 7912bb50a7cb..385a00a2e2ca 100644 --- a/arch/powerpc/kernel/setup.h +++ b/arch/powerpc/kernel/setup.h @@ -29,7 +29,7 @@ void setup_tlb_core_data(void); static inline void setup_tlb_core_data(void) { } #endif -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE void exc_lvl_early_init(void); #else static inline void exc_lvl_early_init(void) { } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index b761cc1a403c..5a1bf501fbe1 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -40,7 +40,7 @@ #include <asm/time.h> #include <asm/serial.h> #include <asm/udbg.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/cpu_has_feature.h> #include <asm/asm-prototypes.h> #include <asm/kdump.h> @@ -140,13 +140,7 @@ arch_initcall(ppc_init); static void *__init alloc_stack(void) { - void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN); - - if (!ptr) - panic("cannot allocate %d bytes for stack at %pS\n", - THREAD_SIZE, (void *)_RET_IP_); - - return ptr; + return memblock_alloc_or_panic(THREAD_SIZE, THREAD_ALIGN); } void __init irqstack_early_init(void) @@ -176,7 +170,7 @@ void __init emergency_stack_init(void) } #endif -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE void __init exc_lvl_early_init(void) { unsigned int i, hw_cpu; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 2f19d5e94485..7284c8021eeb 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -60,7 +60,7 @@ #include <asm/xmon.h> #include <asm/udbg.h> #include <asm/kexec.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/ftrace.h> #include <asm/opal.h> #include <asm/cputhreads.h> @@ -696,11 +696,7 @@ __init u64 ppc64_bolted_size(void) { #ifdef CONFIG_PPC_BOOK3E_64 /* Freescale BookE bolts the entire linear mapping */ - /* XXX: BookE ppc64_rma_limit setup seems to disagree? */ - if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) - return linear_map_top; - /* Other BookE, we assume the first GB is bolted */ - return 1ul << 30; + return linear_map_top; #else /* BookS radix, does not take faults on linear mapping */ if (early_radix_enabled()) @@ -834,6 +830,7 @@ static __init int pcpu_cpu_to_node(int cpu) unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); +DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged); void __init setup_per_cpu_areas(void) { @@ -876,6 +873,7 @@ void __init setup_per_cpu_areas(void) if (rc < 0) panic("cannot initialize percpu area (err=%d)", rc); + static_key_enable(&__percpu_first_chunk_is_paged.key); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; @@ -894,7 +892,7 @@ unsigned long memory_block_size_bytes(void) } #endif -#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO) +#ifdef CONFIG_PPC_INDIRECT_PIO struct ppc_pci_io ppc_pci_io; EXPORT_SYMBOL(ppc_pci_io); #endif @@ -922,6 +920,7 @@ static int __init disable_hardlockup_detector(void) hardlockup_detector_disable(); #else if (firmware_has_feature(FW_FEATURE_LPAR)) { + check_kvm_guest(); if (is_kvm_guest()) hardlockup_detector_disable(); } diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a60e4139214b..5ac7084eebc0 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -61,6 +61,7 @@ #include <asm/ftrace.h> #include <asm/kup.h> #include <asm/fadump.h> +#include <asm/systemcfg.h> #include <trace/events/ipi.h> @@ -588,7 +589,7 @@ void smp_send_debugger_break(void) } #endif -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) { int cpu; @@ -631,7 +632,7 @@ void crash_smp_send_stop(void) stopped = true; -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP if (kexec_crash_image) { crash_kexec_prepare(); return; @@ -1166,7 +1167,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) cpu_smt_set_num_threads(num_threads, threads_per_core); } -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { BUG_ON(smp_processor_id() != boot_cpuid); #ifdef CONFIG_PPC64 @@ -1186,8 +1187,8 @@ int generic_cpu_disable(void) return -EBUSY; set_cpu_online(cpu, false); -#ifdef CONFIG_PPC64 - vdso_data->processorCount--; +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG + systemcfg->processorCount--; #endif /* Update affinity of all IRQs previously aimed at this CPU */ irq_migrate_all_off_this_cpu(); @@ -1567,7 +1568,7 @@ static void add_cpu_to_masks(int cpu) /* * This CPU will not be in the online mask yet so we need to manually - * add it to it's own thread sibling mask. + * add it to its own thread sibling mask. */ map_cpu_to_node(cpu, cpu_to_node(cpu)); cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); @@ -1642,10 +1643,12 @@ void start_secondary(void *unused) secondary_cpu_time_init(); -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG if (system_state == SYSTEM_RUNNING) - vdso_data->processorCount++; + systemcfg->processorCount++; +#endif +#ifdef CONFIG_PPC64 vdso_getcpu_init(); #endif set_numa_node(numa_cpu_lookup_table[cpu]); diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index e6a958a5da27..90882b5175cd 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -21,6 +21,7 @@ #include <asm/processor.h> #include <linux/ftrace.h> #include <asm/kprobes.h> +#include <linux/rethook.h> #include <asm/paca.h> @@ -133,12 +134,13 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum * arch-dependent code, they are generic. */ ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack); -#ifdef CONFIG_KPROBES + /* * Mark stacktraces with kretprobed functions on them * as unreliable. */ - if (ip == (unsigned long)__kretprobe_trampoline) +#ifdef CONFIG_RETHOOK + if (ip == (unsigned long)arch_rethook_trampoline) return -EINVAL; #endif diff --git a/arch/powerpc/kernel/static_call.c b/arch/powerpc/kernel/static_call.c index 863a7aa24650..ec3101f95e53 100644 --- a/arch/powerpc/kernel/static_call.c +++ b/arch/powerpc/kernel/static_call.c @@ -2,32 +2,60 @@ #include <linux/memory.h> #include <linux/static_call.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) { int err; bool is_ret0 = (func == __static_call_return0); - unsigned long target = (unsigned long)(is_ret0 ? tramp + PPC_SCT_RET0 : func); - bool is_short = is_offset_in_branch_range((long)target - (long)tramp); - - if (!tramp) - return; + unsigned long _tramp = (unsigned long)tramp; + unsigned long _func = (unsigned long)func; + unsigned long _ret0 = _tramp + PPC_SCT_RET0; + bool is_short = is_offset_in_branch_range((long)func - (long)(site ? : tramp)); mutex_lock(&text_mutex); - if (func && !is_short) { - err = patch_instruction(tramp + PPC_SCT_DATA, ppc_inst(target)); - if (err) - goto out; + if (site && tail) { + if (!func) + err = patch_instruction(site, ppc_inst(PPC_RAW_BLR())); + else if (is_ret0) + err = patch_branch(site, _ret0, 0); + else if (is_short) + err = patch_branch(site, _func, 0); + else if (tramp) + err = patch_branch(site, _tramp, 0); + else + err = 0; + } else if (site) { + if (!func) + err = patch_instruction(site, ppc_inst(PPC_RAW_NOP())); + else if (is_ret0) + err = patch_instruction(site, ppc_inst(PPC_RAW_LI(_R3, 0))); + else if (is_short) + err = patch_branch(site, _func, BRANCH_SET_LINK); + else if (tramp) + err = patch_branch(site, _tramp, BRANCH_SET_LINK); + else + err = 0; + } else if (tramp) { + if (func && !is_short) { + err = patch_ulong(tramp + PPC_SCT_DATA, _func); + if (err) + goto out; + } + + if (!func) + err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR())); + else if (is_ret0) + err = patch_branch(tramp, _ret0, 0); + else if (is_short) + err = patch_branch(tramp, _func, 0); + else + err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP())); + } else { + err = 0; } - if (!func) - err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR())); - else if (is_short) - err = patch_branch(tramp, target, 0); - else - err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP())); out: mutex_unlock(&text_mutex); diff --git a/arch/powerpc/kernel/switch.S b/arch/powerpc/kernel/switch.S index 608c0ce7cec6..59e3ee99db0e 100644 --- a/arch/powerpc/kernel/switch.S +++ b/arch/powerpc/kernel/switch.S @@ -39,7 +39,6 @@ flush_branch_caches: // Flush the link stack .rept 64 - ANNOTATE_INTRA_FUNCTION_CALL bl .+4 .endr b 1f diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c index f6f868e817e6..be159ad4b77b 100644 --- a/arch/powerpc/kernel/syscall.c +++ b/arch/powerpc/kernel/syscall.c @@ -27,7 +27,7 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) trace_hardirqs_off(); /* finish reconciling */ - CT_WARN_ON(ct_state() == CONTEXT_KERNEL); + CT_WARN_ON(ct_state() == CT_STATE_KERNEL); user_exit_irqoff(); BUG_ON(regs_is_unrecoverable(regs)); diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index 17173b82ca21..9a084bdb8926 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -230,8 +230,10 @@ 178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64 179 64 pread64 sys_pread64 +179 spu pread64 sys_pread64 180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64 180 64 pwrite64 sys_pwrite64 +180 spu pwrite64 sys_pwrite64 181 common chown sys_chown 182 common getcwd sys_getcwd 183 common capget sys_capget @@ -246,6 +248,7 @@ 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit 191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead 191 64 readahead sys_readahead +191 spu readahead sys_readahead 192 32 mmap2 sys_mmap2 compat_sys_mmap2 193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64 194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64 @@ -293,6 +296,7 @@ 232 nospu set_tid_address sys_set_tid_address 233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64 233 64 fadvise64 sys_fadvise64 +233 spu fadvise64 sys_fadvise64 234 nospu exit_group sys_exit_group 235 nospu lookup_dcookie sys_ni_syscall 236 common epoll_create sys_epoll_create @@ -502,7 +506,7 @@ 412 32 utimensat_time64 sys_utimensat sys_utimensat 413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend 419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive @@ -548,3 +552,9 @@ 459 common lsm_get_self_attr sys_lsm_get_self_attr 460 common lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal +463 common setxattrat sys_setxattrat +464 common getxattrat sys_getxattrat +465 common listxattrat sys_listxattrat +466 common removexattrat sys_removexattrat +467 common open_tree_attr sys_open_tree_attr diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 0f39a6b84132..6b3dd6decdf9 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -17,6 +17,7 @@ #include <asm/hvcall.h> #include <asm/machdep.h> #include <asm/smp.h> +#include <asm/time.h> #include <asm/pmc.h> #include <asm/firmware.h> #include <asm/idle.h> @@ -139,7 +140,7 @@ static unsigned long dscr_default; * @val: Returned cpu specific DSCR default value * * This function returns the per cpu DSCR default value - * for any cpu which is contained in it's PACA structure. + * for any cpu which is contained in its PACA structure. */ static void read_dscr(void *val) { @@ -152,7 +153,7 @@ static void read_dscr(void *val) * @val: New cpu specific DSCR default value to update * * This function updates the per cpu DSCR default value - * for any cpu which is contained in it's PACA structure. + * for any cpu which is contained in its PACA structure. */ static void write_dscr(void *val) { diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index df20cf201f74..8224381c1dba 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -71,11 +71,11 @@ #include <asm/vdso_datapage.h> #include <asm/firmware.h> #include <asm/mce.h> +#include <asm/systemcfg.h> /* powerpc clocksource/clockevent code */ #include <linux/clockchips.h> -#include <linux/timekeeper_internal.h> static u64 timebase_read(struct clocksource *); static struct clocksource clocksource_timebase = { @@ -354,6 +354,28 @@ void vtime_flush(struct task_struct *tsk) acct->hardirq_time = 0; acct->softirq_time = 0; } + +/* + * Called from the context switch with interrupts disabled, to charge all + * accumulated times to the current process, and to prepare accounting on + * the next process. + */ +void vtime_task_switch(struct task_struct *prev) +{ + if (is_idle_task(prev)) + vtime_account_idle(prev); + else + vtime_account_kernel(prev); + + vtime_flush(prev); + + if (!IS_ENABLED(CONFIG_PPC64)) { + struct cpu_accounting_data *acct = get_accounting(current); + struct cpu_accounting_data *acct0 = get_accounting(prev); + + acct->starttime = acct0->starttime; + } +} #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ void __no_kcsan __delay(unsigned long loops) @@ -673,7 +695,7 @@ static int __init get_freq(char *name, int cells, unsigned long *val) static void start_cpu_decrementer(void) { -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE unsigned int tcr; /* Clear any pending timer interrupts */ @@ -879,6 +901,38 @@ void secondary_cpu_time_init(void) register_decrementer_clockevent(smp_processor_id()); } +/* + * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit + * result. + */ +static __init void div128_by_32(u64 dividend_high, u64 dividend_low, + unsigned int divisor, struct div_result *dr) +{ + unsigned long a, b, c, d; + unsigned long w, x, y, z; + u64 ra, rb, rc; + + a = dividend_high >> 32; + b = dividend_high & 0xffffffff; + c = dividend_low >> 32; + d = dividend_low & 0xffffffff; + + w = a / divisor; + ra = ((u64)(a - (w * divisor)) << 32) + b; + + rb = ((u64)do_div(ra, divisor) << 32) + c; + x = ra; + + rc = ((u64)do_div(rb, divisor) << 32) + d; + y = rb; + + do_div(rc, divisor); + z = rc; + + dr->result_high = ((u64)w << 32) + x; + dr->result_low = ((u64)y << 32) + z; +} + /* This function is only called on the boot processor */ void __init time_init(void) { @@ -928,7 +982,10 @@ void __init time_init(void) sys_tz.tz_dsttime = 0; } - vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; + vdso_k_arch_data->tb_ticks_per_sec = tb_ticks_per_sec; +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG + systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; +#endif /* initialise and enable the large decrementer (if we have one) */ set_decrementer_max(); @@ -949,39 +1006,6 @@ void __init time_init(void) enable_sched_clock_irqtime(); } -/* - * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit - * result. - */ -void div128_by_32(u64 dividend_high, u64 dividend_low, - unsigned divisor, struct div_result *dr) -{ - unsigned long a, b, c, d; - unsigned long w, x, y, z; - u64 ra, rb, rc; - - a = dividend_high >> 32; - b = dividend_high & 0xffffffff; - c = dividend_low >> 32; - d = dividend_low & 0xffffffff; - - w = a / divisor; - ra = ((u64)(a - (w * divisor)) << 32) + b; - - rb = ((u64) do_div(ra, divisor) << 32) + c; - x = ra; - - rc = ((u64) do_div(rb, divisor) << 32) + d; - y = rb; - - do_div(rc, divisor); - z = rc; - - dr->result_high = ((u64)w << 32) + x; - dr->result_low = ((u64)y << 32) + z; - -} - /* We don't need to calibrate delay, we use the CPU timebase for that */ void calibrate_delay(void) { diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile index 125f4ca588b9..d6c3885453bd 100644 --- a/arch/powerpc/kernel/trace/Makefile +++ b/arch/powerpc/kernel/trace/Makefile @@ -9,12 +9,15 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_ftrace_64_pg.o = $(CC_FLAGS_FTRACE) endif -obj32-$(CONFIG_FUNCTION_TRACER) += ftrace.o ftrace_entry.o -ifdef CONFIG_MPROFILE_KERNEL -obj64-$(CONFIG_FUNCTION_TRACER) += ftrace.o ftrace_entry.o +ifdef CONFIG_FUNCTION_TRACER +obj32-y += ftrace.o ftrace_entry.o +ifeq ($(CONFIG_MPROFILE_KERNEL)$(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY),) +obj64-y += ftrace_64_pg.o ftrace_64_pg_entry.o else -obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_pg.o ftrace_64_pg_entry.o +obj64-y += ftrace.o ftrace_entry.o +endif endif + obj-$(CONFIG_TRACING) += trace_clock.o obj-$(CONFIG_PPC64) += $(obj64-y) diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index d8d6b4fd9a14..6dca92d5a6e8 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -23,7 +23,7 @@ #include <linux/list.h> #include <asm/cacheflush.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/ftrace.h> #include <asm/syscall.h> #include <asm/inst.h> @@ -37,8 +37,12 @@ unsigned long ftrace_call_adjust(unsigned long addr) if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end) return 0; - if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) + if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY) && + !IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { addr += MCOUNT_INSN_SIZE; + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + addr += MCOUNT_INSN_SIZE; + } return addr; } @@ -82,7 +86,7 @@ static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_ { int ret = ftrace_validate_inst(ip, old); - if (!ret) + if (!ret && !ppc_inst_equal(old, new)) ret = patch_instruction((u32 *)ip, new); return ret; @@ -106,28 +110,66 @@ static unsigned long find_ftrace_tramp(unsigned long ip) return 0; } +#ifdef CONFIG_MODULES +static unsigned long ftrace_lookup_module_stub(unsigned long ip, unsigned long addr) +{ + struct module *mod = NULL; + + scoped_guard(rcu) + mod = __module_text_address(ip); + if (!mod) + pr_err("No module loaded at addr=%lx\n", ip); + + return (addr == (unsigned long)ftrace_caller ? mod->arch.tramp : mod->arch.tramp_regs); +} +#else +static unsigned long ftrace_lookup_module_stub(unsigned long ip, unsigned long addr) +{ + return 0; +} +#endif + +static unsigned long ftrace_get_ool_stub(struct dyn_ftrace *rec) +{ +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + return rec->arch.ool_stub; +#else + BUILD_BUG(); +#endif +} + static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst) { - unsigned long ip = rec->ip; + unsigned long ip; unsigned long stub; - if (is_offset_in_branch_range(addr - ip)) { + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + ip = ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE; /* second instruction in stub */ + else + ip = rec->ip; + + if (!is_offset_in_branch_range(addr - ip) && addr != FTRACE_ADDR && + addr != FTRACE_REGS_ADDR) { + /* This can only happen with ftrace direct */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)) { + pr_err("0x%lx (0x%lx): Unexpected target address 0x%lx\n", + ip, rec->ip, addr); + return -EINVAL; + } + addr = FTRACE_ADDR; + } + + if (is_offset_in_branch_range(addr - ip)) /* Within range */ stub = addr; -#ifdef CONFIG_MODULES - } else if (rec->arch.mod) { - /* Module code would be going to one of the module stubs */ - stub = (addr == (unsigned long)ftrace_caller ? rec->arch.mod->arch.tramp : - rec->arch.mod->arch.tramp_regs); -#endif - } else if (core_kernel_text(ip)) { + else if (core_kernel_text(ip)) /* We would be branching to one of our ftrace stubs */ stub = find_ftrace_tramp(ip); - if (!stub) { - pr_err("0x%lx: No ftrace stubs reachable\n", ip); - return -EINVAL; - } - } else { + else + stub = ftrace_lookup_module_stub(ip, addr); + + if (!stub) { + pr_err("0x%lx (0x%lx): No ftrace stubs reachable\n", ip, rec->ip); return -EINVAL; } @@ -135,6 +177,145 @@ static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_ return 0; } +static int ftrace_init_ool_stub(struct module *mod, struct dyn_ftrace *rec) +{ +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + static int ool_stub_text_index, ool_stub_text_end_index, ool_stub_inittext_index; + int ret = 0, ool_stub_count, *ool_stub_index; + ppc_inst_t inst; + /* + * See ftrace_entry.S if changing the below instruction sequence, as we rely on + * decoding the last branch instruction here to recover the correct function ip. + */ + struct ftrace_ool_stub *ool_stub, ool_stub_template = { + .insn = { + PPC_RAW_MFLR(_R0), + PPC_RAW_NOP(), /* bl ftrace_caller */ + PPC_RAW_MTLR(_R0), + PPC_RAW_NOP() /* b rec->ip + 4 */ + } + }; + + WARN_ON(rec->arch.ool_stub); + + if (is_kernel_inittext(rec->ip)) { + ool_stub = ftrace_ool_stub_inittext; + ool_stub_index = &ool_stub_inittext_index; + ool_stub_count = ftrace_ool_stub_inittext_count; + } else if (is_kernel_text(rec->ip)) { + /* + * ftrace records are sorted, so we first use up the stub area within .text + * (ftrace_ool_stub_text) before using the area at the end of .text + * (ftrace_ool_stub_text_end), unless the stub is out of range of the record. + */ + if (ool_stub_text_index >= ftrace_ool_stub_text_count || + !is_offset_in_branch_range((long)rec->ip - + (long)&ftrace_ool_stub_text[ool_stub_text_index])) { + ool_stub = ftrace_ool_stub_text_end; + ool_stub_index = &ool_stub_text_end_index; + ool_stub_count = ftrace_ool_stub_text_end_count; + } else { + ool_stub = ftrace_ool_stub_text; + ool_stub_index = &ool_stub_text_index; + ool_stub_count = ftrace_ool_stub_text_count; + } +#ifdef CONFIG_MODULES + } else if (mod) { + ool_stub = mod->arch.ool_stubs; + ool_stub_index = &mod->arch.ool_stub_index; + ool_stub_count = mod->arch.ool_stub_count; +#endif + } else { + return -EINVAL; + } + + ool_stub += (*ool_stub_index)++; + + if (WARN_ON(*ool_stub_index > ool_stub_count)) + return -EINVAL; + + if (!is_offset_in_branch_range((long)rec->ip - (long)&ool_stub->insn[0]) || + !is_offset_in_branch_range((long)(rec->ip + MCOUNT_INSN_SIZE) - + (long)&ool_stub->insn[3])) { + pr_err("%s: ftrace ool stub out of range (%p -> %p).\n", + __func__, (void *)rec->ip, (void *)&ool_stub->insn[0]); + return -EINVAL; + } + + rec->arch.ool_stub = (unsigned long)&ool_stub->insn[0]; + + /* bl ftrace_caller */ + if (!mod) + ret = ftrace_get_call_inst(rec, (unsigned long)ftrace_caller, &inst); +#ifdef CONFIG_MODULES + else + /* + * We can't use ftrace_get_call_inst() since that uses + * __module_text_address(rec->ip) to look up the module. + * But, since the module is not fully formed at this stage, + * the lookup fails. We know the target though, so generate + * the branch inst directly. + */ + inst = ftrace_create_branch_inst(ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE, + mod->arch.tramp, 1); +#endif + ool_stub_template.insn[1] = ppc_inst_val(inst); + + /* b rec->ip + 4 */ + if (!ret && create_branch(&inst, &ool_stub->insn[3], rec->ip + MCOUNT_INSN_SIZE, 0)) + return -EINVAL; + ool_stub_template.insn[3] = ppc_inst_val(inst); + + if (!ret) + ret = patch_instructions((u32 *)ool_stub, (u32 *)&ool_stub_template, + sizeof(ool_stub_template), false); + + return ret; +#else /* !CONFIG_PPC_FTRACE_OUT_OF_LINE */ + BUILD_BUG(); +#endif +} + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS +static const struct ftrace_ops *powerpc_rec_get_ops(struct dyn_ftrace *rec) +{ + const struct ftrace_ops *ops = NULL; + + if (rec->flags & FTRACE_FL_CALL_OPS_EN) { + ops = ftrace_find_unique_ops(rec); + WARN_ON_ONCE(!ops); + } + + if (!ops) + ops = &ftrace_list_ops; + + return ops; +} + +static int ftrace_rec_set_ops(struct dyn_ftrace *rec, const struct ftrace_ops *ops) +{ + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + return patch_ulong((void *)(ftrace_get_ool_stub(rec) - sizeof(unsigned long)), + (unsigned long)ops); + else + return patch_ulong((void *)(rec->ip - MCOUNT_INSN_SIZE - sizeof(unsigned long)), + (unsigned long)ops); +} + +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, &ftrace_nop_ops); +} + +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) +{ + return ftrace_rec_set_ops(rec, powerpc_rec_get_ops(rec)); +} +#else +static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; } +static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; } +#endif + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { @@ -147,18 +328,33 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { ppc_inst_t old, new; - int ret; + unsigned long ip = rec->ip; + int ret = 0; /* This can only ever be called during module load */ - if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(rec->ip))) + if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(ip))) return -EINVAL; old = ppc_inst(PPC_RAW_NOP()); - ret = ftrace_get_call_inst(rec, addr, &new); + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { + ip = ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE; /* second instruction in stub */ + ret = ftrace_get_call_inst(rec, (unsigned long)ftrace_caller, &old); + } + + ret |= ftrace_get_call_inst(rec, addr, &new); + + if (!ret) + ret = ftrace_modify_code(ip, old, new); + + ret = ftrace_rec_update_ops(rec); if (ret) return ret; - return ftrace_modify_code(rec->ip, old, new); + if (!ret && IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + ret = ftrace_modify_code(rec->ip, ppc_inst(PPC_RAW_NOP()), + ppc_inst(PPC_RAW_BRANCH((long)ftrace_get_ool_stub(rec) - (long)rec->ip))); + + return ret; } int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) @@ -191,6 +387,13 @@ void ftrace_replace_code(int enable) new_addr = ftrace_get_addr_new(rec); update = ftrace_update_record(rec, enable); + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) && update != FTRACE_UPDATE_IGNORE) { + ip = ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE; + ret = ftrace_get_call_inst(rec, (unsigned long)ftrace_caller, &nop_inst); + if (ret) + goto out; + } + switch (update) { case FTRACE_UPDATE_IGNORE: default: @@ -198,16 +401,19 @@ void ftrace_replace_code(int enable) case FTRACE_UPDATE_MODIFY_CALL: ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst); ret |= ftrace_get_call_inst(rec, addr, &call_inst); + ret |= ftrace_rec_update_ops(rec); old = call_inst; new = new_call_inst; break; case FTRACE_UPDATE_MAKE_NOP: ret = ftrace_get_call_inst(rec, addr, &call_inst); + ret |= ftrace_rec_set_nop_ops(rec); old = call_inst; new = nop_inst; break; case FTRACE_UPDATE_MAKE_CALL: ret = ftrace_get_call_inst(rec, new_addr, &call_inst); + ret |= ftrace_rec_update_ops(rec); old = nop_inst; new = call_inst; break; @@ -215,6 +421,24 @@ void ftrace_replace_code(int enable) if (!ret) ret = ftrace_modify_code(ip, old, new); + + if (!ret && IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) && + (update == FTRACE_UPDATE_MAKE_NOP || update == FTRACE_UPDATE_MAKE_CALL)) { + /* Update the actual ftrace location */ + call_inst = ppc_inst(PPC_RAW_BRANCH((long)ftrace_get_ool_stub(rec) - + (long)rec->ip)); + nop_inst = ppc_inst(PPC_RAW_NOP()); + ip = rec->ip; + + if (update == FTRACE_UPDATE_MAKE_NOP) + ret = ftrace_modify_code(ip, call_inst, nop_inst); + else + ret = ftrace_modify_code(ip, nop_inst, call_inst); + + if (ret) + goto out; + } + if (ret) goto out; } @@ -234,20 +458,27 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) /* Verify instructions surrounding the ftrace location */ if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) { /* Expect nops */ - ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP())); + if (!IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP())); if (!ret) ret = ftrace_validate_inst(ip, ppc_inst(PPC_RAW_NOP())); } else if (IS_ENABLED(CONFIG_PPC32)) { /* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */ ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0))); - if (!ret) - ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4))); + if (ret) + return ret; + ret = ftrace_modify_code(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)), + ppc_inst(PPC_RAW_NOP())); } else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) { /* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */ ret = ftrace_read_inst(ip - 4, &old); if (!ret && !ppc_inst_equal(old, ppc_inst(PPC_RAW_MFLR(_R0)))) { + /* Gcc v5.x emit the additional 'std' instruction, gcc v6.x don't */ ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0))); - ret |= ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16))); + if (ret) + return ret; + ret = ftrace_modify_code(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)), + ppc_inst(PPC_RAW_NOP())); } } else { return -EINVAL; @@ -256,13 +487,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) if (ret) return ret; - if (!core_kernel_text(ip)) { - if (!mod) { - pr_err("0x%lx: No module provided for non-kernel address\n", ip); - return -EFAULT; - } - rec->arch.mod = mod; - } + /* Set up out-of-line stub */ + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + return ftrace_init_ool_stub(mod, rec); /* Nop-out the ftrace location */ new = ppc_inst(PPC_RAW_NOP()); @@ -302,6 +529,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ppc_inst_t old, new; int ret; + /* + * When using CALL_OPS, the function to call is associated with the + * call site, and we don't have a global function pointer to update. + */ + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) + return 0; + old = ppc_inst_read((u32 *)&ftrace_call); new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1); ret = ftrace_modify_code(ip, old, new); @@ -421,8 +655,7 @@ int __init ftrace_dyn_arch_init(void) void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) { - unsigned long sp = fregs->regs.gpr[1]; - int bit; + unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1]; if (unlikely(ftrace_graph_is_dead())) goto out; @@ -430,15 +663,10 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; - bit = ftrace_test_recursion_trylock(ip, parent_ip); - if (bit < 0) - goto out; - - if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp)) + if (!function_graph_enter_regs(parent_ip, ip, 0, (unsigned long *)sp, fregs)) parent_ip = ppc_function_entry(return_to_handler); - ftrace_test_recursion_unlock(bit); out: - fregs->regs.link = parent_ip; + arch_ftrace_regs(fregs)->regs.link = parent_ip; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.c b/arch/powerpc/kernel/trace/ftrace_64_pg.c index 12fab1803bcf..5c6e545d1708 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_pg.c +++ b/arch/powerpc/kernel/trace/ftrace_64_pg.c @@ -23,7 +23,7 @@ #include <linux/list.h> #include <asm/cacheflush.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/ftrace.h> #include <asm/syscall.h> #include <asm/inst.h> @@ -116,6 +116,18 @@ static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op) } #ifdef CONFIG_MODULES +static struct module *ftrace_lookup_module(struct dyn_ftrace *rec) +{ + struct module *mod; + + scoped_guard(rcu) + mod = __module_text_address(rec->ip); + if (!mod) + pr_err("No module loaded at addr=%lx\n", rec->ip); + + return mod; +} + static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) @@ -124,6 +136,12 @@ __ftrace_make_nop(struct module *mod, unsigned long ip = rec->ip; ppc_inst_t op, pop; + if (!mod) { + mod = ftrace_lookup_module(rec); + if (!mod) + return -EINVAL; + } + /* read where this goes */ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { pr_err("Fetching opcode failed.\n"); @@ -366,27 +384,6 @@ int ftrace_make_nop(struct module *mod, return -EINVAL; } - /* - * Out of range jumps are called from modules. - * We should either already have a pointer to the module - * or it has been passed in. - */ - if (!rec->arch.mod) { - if (!mod) { - pr_err("No module loaded addr=%lx\n", addr); - return -EFAULT; - } - rec->arch.mod = mod; - } else if (mod) { - if (mod != rec->arch.mod) { - pr_err("Record mod %p not equal to passed in mod %p\n", - rec->arch.mod, mod); - return -EINVAL; - } - /* nothing to do if mod == rec->arch.mod */ - } else - mod = rec->arch.mod; - return __ftrace_make_nop(mod, rec, addr); } @@ -411,7 +408,10 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ppc_inst_t op[2]; void *ip = (void *)rec->ip; unsigned long entry, ptr, tramp; - struct module *mod = rec->arch.mod; + struct module *mod = ftrace_lookup_module(rec); + + if (!mod) + return -EINVAL; /* read where this goes */ if (copy_inst_from_kernel_nofault(op, ip)) @@ -533,16 +533,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EINVAL; } - /* - * Out of range jumps are called from modules. - * Being that we are converting from nop, it had better - * already have a module defined. - */ - if (!rec->arch.mod) { - pr_err("No module loaded\n"); - return -EINVAL; - } - return __ftrace_make_call(rec, addr); } @@ -555,7 +545,10 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, ppc_inst_t op; unsigned long ip = rec->ip; unsigned long entry, ptr, tramp; - struct module *mod = rec->arch.mod; + struct module *mod = ftrace_lookup_module(rec); + + if (!mod) + return -EINVAL; /* If we never set up ftrace trampolines, then bail */ if (!mod->arch.tramp || !mod->arch.tramp_regs) { @@ -668,14 +661,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, return -EINVAL; } - /* - * Out of range jumps are called from modules. - */ - if (!rec->arch.mod) { - pr_err("No module loaded\n"); - return -EINVAL; - } - return __ftrace_modify_call(rec, old_addr, addr); } #endif @@ -800,10 +785,10 @@ int ftrace_disable_ftrace_graph_caller(void) * in current thread info. Return the address we want to divert to. */ static unsigned long -__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp) +__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp, + struct ftrace_regs *fregs) { unsigned long return_hooker; - int bit; if (unlikely(ftrace_graph_is_dead())) goto out; @@ -811,16 +796,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; - bit = ftrace_test_recursion_trylock(ip, parent); - if (bit < 0) - goto out; - return_hooker = ppc_function_entry(return_to_handler); - if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp)) + if (!function_graph_enter_regs(parent, ip, 0, (unsigned long *)sp, fregs)) parent = return_hooker; - ftrace_test_recursion_unlock(bit); out: return parent; } @@ -829,13 +809,14 @@ out: void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) { - fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]); + arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip, + arch_ftrace_regs(fregs)->regs.gpr[1], fregs); } #else unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp) { - return __prepare_ftrace_return(parent, ip, sp); + return __prepare_ftrace_return(parent, ip, sp, NULL); } #endif #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/powerpc/kernel/trace/ftrace_entry.S b/arch/powerpc/kernel/trace/ftrace_entry.S index 76dbe9fd2c0f..3565c67fc638 100644 --- a/arch/powerpc/kernel/trace/ftrace_entry.S +++ b/arch/powerpc/kernel/trace/ftrace_entry.S @@ -39,13 +39,37 @@ /* Create our stack frame + pt_regs */ PPC_STLU r1,-SWITCH_FRAME_SIZE(r1) + .if \allregs == 1 + SAVE_GPRS(11, 12, r1) + .endif + + /* Get the _mcount() call site out of LR */ + mflr r11 + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + /* Load the ftrace_op */ + PPC_LL r12, -(MCOUNT_INSN_SIZE*2 + SZL)(r11) + + /* Load direct_call from the ftrace_op */ + PPC_LL r12, FTRACE_OPS_DIRECT_CALL(r12) + PPC_LCMPI r12, 0 + .if \allregs == 1 + bne .Lftrace_direct_call_regs + .else + bne .Lftrace_direct_call + .endif +#endif + + /* Save the previous LR in pt_regs->link */ + PPC_STL r0, _LINK(r1) + /* Also save it in A's stack frame */ + PPC_STL r0, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE+LRSAVE(r1) + /* Save all gprs to pt_regs */ SAVE_GPR(0, r1) SAVE_GPRS(3, 10, r1) #ifdef CONFIG_PPC64 - /* Save the original return address in A's stack frame */ - std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1) /* Ok to continue? */ lbz r3, PACA_FTRACE_ENABLED(r13) cmpdi r3, 0 @@ -54,9 +78,9 @@ .if \allregs == 1 SAVE_GPR(2, r1) - SAVE_GPRS(11, 31, r1) + SAVE_GPRS(13, 31, r1) .else -#ifdef CONFIG_LIVEPATCH_64 +#if defined(CONFIG_LIVEPATCH_64) || defined(CONFIG_PPC_FTRACE_OUT_OF_LINE) SAVE_GPR(14, r1) #endif .endif @@ -67,80 +91,143 @@ .if \allregs == 1 /* Load special regs for save below */ + mfcr r7 mfmsr r8 mfctr r9 mfxer r10 - mfcr r11 .else /* Clear MSR to flag as ftrace_caller versus frace_regs_caller */ li r8, 0 .endif - /* Get the _mcount() call site out of LR */ - mflr r7 - /* Save it as pt_regs->nip */ - PPC_STL r7, _NIP(r1) - /* Also save it in B's stackframe header for proper unwind */ - PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1) - /* Save the read LR in pt_regs->link */ - PPC_STL r0, _LINK(r1) - #ifdef CONFIG_PPC64 /* Save callee's TOC in the ABI compliant location */ std r2, STK_GOT(r1) LOAD_PACA_TOC() /* get kernel TOC in r2 */ +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + /* r11 points to the instruction following the call to ftrace */ + PPC_LL r5, -(MCOUNT_INSN_SIZE*2 + SZL)(r11) + PPC_LL r12, FTRACE_OPS_FUNC(r5) + mtctr r12 +#else /* !CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS */ +#ifdef CONFIG_PPC64 LOAD_REG_ADDR(r3, function_trace_op) ld r5,0(r3) #else lis r3,function_trace_op@ha lwz r5,function_trace_op@l(r3) #endif - -#ifdef CONFIG_LIVEPATCH_64 - mr r14, r7 /* remember old NIP */ #endif - /* Calculate ip from nip-4 into r3 for call below */ - subi r3, r7, MCOUNT_INSN_SIZE - - /* Put the original return address in r4 as parent_ip */ - mr r4, r0 - /* Save special regs */ PPC_STL r8, _MSR(r1) .if \allregs == 1 + PPC_STL r7, _CCR(r1) PPC_STL r9, _CTR(r1) PPC_STL r10, _XER(r1) - PPC_STL r11, _CCR(r1) .endif +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + /* Clear orig_gpr3 to later detect ftrace_direct call */ + li r7, 0 + PPC_STL r7, ORIG_GPR3(r1) +#endif + +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + /* Save our real return address in nvr for return */ + .if \allregs == 0 + SAVE_GPR(15, r1) + .endif + mr r15, r11 + /* + * We want the ftrace location in the function, but our lr (in r11) + * points at the 'mtlr r0' instruction in the out of line stub. To + * recover the ftrace location, we read the branch instruction in the + * stub, and adjust our lr by the branch offset. + * + * See ftrace_init_ool_stub() for the profile sequence. + */ + lwz r8, MCOUNT_INSN_SIZE(r11) + slwi r8, r8, 6 + srawi r8, r8, 6 + add r3, r11, r8 + /* + * Override our nip to point past the branch in the original function. + * This allows reliable stack trace and the ftrace stack tracer to work as-is. + */ + addi r11, r3, MCOUNT_INSN_SIZE +#else + /* Calculate ip from nip-4 into r3 for call below */ + subi r3, r11, MCOUNT_INSN_SIZE +#endif + + /* Save NIP as pt_regs->nip */ + PPC_STL r11, _NIP(r1) + /* Also save it in B's stackframe header for proper unwind */ + PPC_STL r11, LRSAVE+SWITCH_FRAME_SIZE(r1) +#if defined(CONFIG_LIVEPATCH_64) || defined(CONFIG_PPC_FTRACE_OUT_OF_LINE) + mr r14, r11 /* remember old NIP */ +#endif + + /* Put the original return address in r4 as parent_ip */ + mr r4, r0 + /* Load &pt_regs in r6 for call below */ addi r6, r1, STACK_INT_FRAME_REGS .endm .macro ftrace_regs_exit allregs - /* Load ctr with the possibly modified NIP */ - PPC_LL r3, _NIP(r1) +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + /* Check orig_gpr3 to detect ftrace_direct call */ + PPC_LL r3, ORIG_GPR3(r1) + PPC_LCMPI cr1, r3, 0 mtctr r3 +#endif + /* Restore possibly modified LR */ + PPC_LL r0, _LINK(r1) + +#ifndef CONFIG_PPC_FTRACE_OUT_OF_LINE + /* Load ctr with the possibly modified NIP */ + PPC_LL r3, _NIP(r1) #ifdef CONFIG_LIVEPATCH_64 cmpd r14, r3 /* has NIP been altered? */ #endif +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + beq cr1,2f + mtlr r3 + b 3f +#endif +2: mtctr r3 + mtlr r0 +3: + +#else /* !CONFIG_PPC_FTRACE_OUT_OF_LINE */ + /* Load LR with the possibly modified NIP */ + PPC_LL r3, _NIP(r1) + cmpd r14, r3 /* has NIP been altered? */ + bne- 1f + + mr r3, r15 +1: mtlr r3 + .if \allregs == 0 + REST_GPR(15, r1) + .endif +#endif + /* Restore gprs */ .if \allregs == 1 REST_GPRS(2, 31, r1) .else REST_GPRS(3, 10, r1) -#ifdef CONFIG_LIVEPATCH_64 +#if defined(CONFIG_LIVEPATCH_64) || defined(CONFIG_PPC_FTRACE_OUT_OF_LINE) REST_GPR(14, r1) #endif .endif - /* Restore possibly modified LR */ - PPC_LL r0, _LINK(r1) - mtlr r0 - #ifdef CONFIG_PPC64 /* Restore callee's TOC */ ld r2, STK_GOT(r1) @@ -153,23 +240,46 @@ /* Based on the cmpd above, if the NIP was altered handle livepatch */ bne- livepatch_handler #endif - bctr /* jump after _mcount site */ + + /* jump after _mcount site */ +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + bnectr cr1 +#endif + /* + * Return with blr to keep the link stack balanced. The function profiling sequence + * uses 'mtlr r0' to restore LR. + */ + blr +#else + bctr +#endif .endm -_GLOBAL(ftrace_regs_caller) - ftrace_regs_entry 1 - /* ftrace_call(r3, r4, r5, r6) */ +.macro ftrace_regs_func allregs +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS + bctrl +#else + .if \allregs == 1 .globl ftrace_regs_call ftrace_regs_call: + .else +.globl ftrace_call +ftrace_call: + .endif + /* ftrace_call(r3, r4, r5, r6) */ bl ftrace_stub +#endif +.endm + +_GLOBAL(ftrace_regs_caller) + ftrace_regs_entry 1 + ftrace_regs_func 1 ftrace_regs_exit 1 _GLOBAL(ftrace_caller) ftrace_regs_entry 0 - /* ftrace_call(r3, r4, r5, r6) */ -.globl ftrace_call -ftrace_call: - bl ftrace_stub + ftrace_regs_func 0 ftrace_regs_exit 0 _GLOBAL(ftrace_stub) @@ -177,6 +287,11 @@ _GLOBAL(ftrace_stub) #ifdef CONFIG_PPC64 ftrace_no_trace: +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + REST_GPR(3, r1) + addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE + blr +#else mflr r3 mtctr r3 REST_GPR(3, r1) @@ -184,6 +299,22 @@ ftrace_no_trace: mtlr r0 bctr #endif +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +.Lftrace_direct_call_regs: + mtctr r12 + REST_GPRS(11, 12, r1) + addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE + bctr +.Lftrace_direct_call: + mtctr r12 + addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE + bctr +SYM_FUNC_START(ftrace_stub_direct_tramp) + blr +SYM_FUNC_END(ftrace_stub_direct_tramp) +#endif #ifdef CONFIG_LIVEPATCH_64 /* @@ -194,11 +325,17 @@ ftrace_no_trace: * We get here when a function A, calls another function B, but B has * been live patched with a new function C. * - * On entry: - * - we have no stack frame and can not allocate one + * On entry, we have no stack frame and can not allocate one. + * + * With PPC_FTRACE_OUT_OF_LINE=n, on entry: * - LR points back to the original caller (in A) * - CTR holds the new NIP in C * - r0, r11 & r12 are free + * + * With PPC_FTRACE_OUT_OF_LINE=y, on entry: + * - r0 points back to the original caller (in A) + * - LR holds the new NIP in C + * - r11 & r12 are free */ livepatch_handler: ld r12, PACA_THREAD_INFO(r13) @@ -208,18 +345,23 @@ livepatch_handler: addi r11, r11, 24 std r11, TI_livepatch_sp(r12) - /* Save toc & real LR on livepatch stack */ - std r2, -24(r11) - mflr r12 - std r12, -16(r11) - /* Store stack end marker */ lis r12, STACK_END_MAGIC@h ori r12, r12, STACK_END_MAGIC@l std r12, -8(r11) - /* Put ctr in r12 for global entry and branch there */ + /* Save toc & real LR on livepatch stack */ + std r2, -24(r11) +#ifndef CONFIG_PPC_FTRACE_OUT_OF_LINE + mflr r12 + std r12, -16(r11) mfctr r12 +#else + std r0, -16(r11) + mflr r12 + /* Put ctr in r12 for global entry and branch there */ + mtctr r12 +#endif bctrl /* @@ -308,6 +450,14 @@ _GLOBAL(return_to_handler) blr #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE +SYM_DATA(ftrace_ool_stub_text_count, .long CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE) + +SYM_START(ftrace_ool_stub_text, SYM_L_GLOBAL, .balign SZL) + .space CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE * FTRACE_OOL_STUB_SIZE +SYM_CODE_END(ftrace_ool_stub_text) +#endif + .pushsection ".tramp.ftrace.text","aw",@progbits; .globl ftrace_tramp_text ftrace_tramp_text: diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f23430adb68a..cb8e9357383e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -121,7 +121,7 @@ static void pmac_backlight_unblank(void) props = &pmac_backlight->props; props->brightness = props->max_brightness; - props->power = FB_BLANK_UNBLANK; + props->power = BACKLIGHT_POWER_ON; backlight_update_status(pmac_backlight); } mutex_unlock(&pmac_backlight_mutex); @@ -263,10 +263,9 @@ static int __die(const char *str, struct pt_regs *regs, long err) { printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); - printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n", + printk("%s PAGE_SIZE=%luK%s %s%s%s%s %s\n", IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", PAGE_SIZE / 1024, get_mmu_str(), - IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", IS_ENABLED(CONFIG_SMP) ? " SMP" : "", IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", @@ -2244,7 +2243,7 @@ void __noreturn unrecoverable_exception(struct pt_regs *regs) ; } -#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) +#ifdef CONFIG_BOOKE_WDT DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException) { printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 92b3fc258d11..862b22b2b616 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -36,12 +36,6 @@ void __init udbg_early_init(void) #elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL) /* RTAS panel debug */ udbg_init_rtas_panel(); -#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE) - /* RTAS console debug */ - udbg_init_rtas_console(); -#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) - /* Maple real mode debug */ - udbg_init_maple_realmode(); #elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE) udbg_init_pas_realmode(); #elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX) @@ -49,9 +43,6 @@ void __init udbg_early_init(void) #elif defined(CONFIG_PPC_EARLY_DEBUG_44x) /* PPC44x debug */ udbg_init_44x_as1(); -#elif defined(CONFIG_PPC_EARLY_DEBUG_40x) - /* PPC40x debug */ - udbg_init_40x_realmode(); #elif defined(CONFIG_PPC_EARLY_DEBUG_CPM) udbg_init_cpm(); #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index a0467e528b70..dfe8ed2192e8 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -205,29 +205,6 @@ void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride) udbg_use_uart(); } -#ifdef CONFIG_PPC_MAPLE - -#define UDBG_UART_MAPLE_ADDR ((void __iomem *)0xf40003f8) - -static u8 udbg_uart_in_maple(unsigned int reg) -{ - return real_readb(UDBG_UART_MAPLE_ADDR + reg); -} - -static void udbg_uart_out_maple(unsigned int reg, u8 val) -{ - real_writeb(val, UDBG_UART_MAPLE_ADDR + reg); -} - -void __init udbg_init_maple_realmode(void) -{ - udbg_uart_in = udbg_uart_in_maple; - udbg_uart_out = udbg_uart_out_maple; - udbg_use_uart(); -} - -#endif /* CONFIG_PPC_MAPLE */ - #ifdef CONFIG_PPC_PASEMI #define UDBG_UART_PAS_ADDR ((void __iomem *)0xfcff03f8UL) @@ -274,29 +251,6 @@ void __init udbg_init_44x_as1(void) #endif /* CONFIG_PPC_EARLY_DEBUG_44x */ -#ifdef CONFIG_PPC_EARLY_DEBUG_40x - -static u8 udbg_uart_in_40x(unsigned int reg) -{ - return real_readb((void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR - + reg); -} - -static void udbg_uart_out_40x(unsigned int reg, u8 val) -{ - real_writeb(val, (void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR - + reg); -} - -void __init udbg_init_40x_realmode(void) -{ - udbg_uart_in = udbg_uart_in_40x; - udbg_uart_out = udbg_uart_out_40x; - udbg_use_uart(); -} - -#endif /* CONFIG_PPC_EARLY_DEBUG_40x */ - #ifdef CONFIG_PPC_EARLY_DEBUG_16550 static void __iomem *udbg_uart_early_addr; diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 7a2ff9010f17..219d67bcf747 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -16,9 +16,8 @@ #include <linux/user.h> #include <linux/elf.h> #include <linux/security.h> -#include <linux/memblock.h> #include <linux/syscalls.h> -#include <linux/time_namespace.h> +#include <linux/vdso_datastore.h> #include <vdso/datapage.h> #include <asm/syscall.h> @@ -33,6 +32,8 @@ #include <asm/vdso_datapage.h> #include <asm/setup.h> +static_assert(__VDSO_PAGES == VDSO_NR_PAGES); + /* The alignment of the vDSO */ #define VDSO_ALIGNMENT (1 << 16) @@ -41,23 +42,6 @@ extern char vdso64_start, vdso64_end; long sys_ni_syscall(void); -/* - * The vdso data page (aka. systemcfg for old ppc64 fans) is here. - * Once the early boot kernel code no longer needs to muck around - * with it, it will become dynamically allocated - */ -static union { - struct vdso_arch_data data; - u8 page[PAGE_SIZE]; -} vdso_data_store __page_aligned_data; -struct vdso_arch_data *vdso_data = &vdso_data_store.data; - -enum vvar_pages { - VVAR_DATA_PAGE_OFFSET, - VVAR_TIMENS_PAGE_OFFSET, - VVAR_NR_PAGES, -}; - static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma, unsigned long text_size) { @@ -81,88 +65,33 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start); } -static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, - struct vm_area_struct *vma, struct vm_fault *vmf); +static void vdso_close(const struct vm_special_mapping *sm, struct vm_area_struct *vma) +{ + struct mm_struct *mm = vma->vm_mm; + + /* + * close() is called for munmap() but also for mremap(). In the mremap() + * case the vdso pointer has already been updated by the mremap() hook + * above, so it must not be set to NULL here. + */ + if (vma->vm_start != (unsigned long)mm->context.vdso) + return; -static struct vm_special_mapping vvar_spec __ro_after_init = { - .name = "[vvar]", - .fault = vvar_fault, -}; + mm->context.vdso = NULL; +} static struct vm_special_mapping vdso32_spec __ro_after_init = { .name = "[vdso]", .mremap = vdso32_mremap, + .close = vdso_close, }; static struct vm_special_mapping vdso64_spec __ro_after_init = { .name = "[vdso]", .mremap = vdso64_mremap, + .close = vdso_close, }; -#ifdef CONFIG_TIME_NS -struct vdso_data *arch_get_vdso_data(void *vvar_page) -{ - return ((struct vdso_arch_data *)vvar_page)->data; -} - -/* - * The vvar mapping contains data for a specific time namespace, so when a task - * changes namespace we must unmap its vvar data for the old namespace. - * Subsequent faults will map in data for the new namespace. - * - * For more details see timens_setup_vdso_data(). - */ -int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) -{ - struct mm_struct *mm = task->mm; - VMA_ITERATOR(vmi, mm, 0); - struct vm_area_struct *vma; - - mmap_read_lock(mm); - for_each_vma(vmi, vma) { - if (vma_is_special_mapping(vma, &vvar_spec)) - zap_vma_pages(vma); - } - mmap_read_unlock(mm); - - return 0; -} -#endif - -static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, - struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct page *timens_page = find_timens_vvar_page(vma); - unsigned long pfn; - - switch (vmf->pgoff) { - case VVAR_DATA_PAGE_OFFSET: - if (timens_page) - pfn = page_to_pfn(timens_page); - else - pfn = virt_to_pfn(vdso_data); - break; -#ifdef CONFIG_TIME_NS - case VVAR_TIMENS_PAGE_OFFSET: - /* - * If a task belongs to a time namespace then a namespace - * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and - * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET - * offset. - * See also the comment near timens_setup_vdso_data(). - */ - if (!timens_page) - return VM_FAULT_SIGBUS; - pfn = virt_to_pfn(vdso_data); - break; -#endif /* CONFIG_TIME_NS */ - default: - return VM_FAULT_SIGBUS; - } - - return vmf_insert_pfn(vma, vmf->address, pfn); -} - /* * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree @@ -171,7 +100,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int { unsigned long vdso_size, vdso_base, mappings_size; struct vm_special_mapping *vdso_spec; - unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE; + unsigned long vvar_size = VDSO_NR_PAGES * PAGE_SIZE; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -197,16 +126,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int /* Add required alignment. */ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); - /* - * Put vDSO base into mm struct. We need to do this before calling - * install_special_mapping or the perf counter mmap tracking code - * will fail to recognise it as a vDSO. - */ - mm->context.vdso = (void __user *)vdso_base + vvar_size; - - vma = _install_special_mapping(mm, vdso_base, vvar_size, - VM_READ | VM_MAYREAD | VM_IO | - VM_DONTDUMP | VM_PFNMAP, &vvar_spec); + vma = vdso_install_vvar_mapping(mm, vdso_base); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -223,10 +143,15 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, vdso_spec); - if (IS_ERR(vma)) + if (IS_ERR(vma)) { do_munmap(mm, vdso_base, vvar_size, NULL); + return PTR_ERR(vma); + } + + // Now that the mappings are in place, set the mm VDSO pointer + mm->context.vdso = (void __user *)vdso_base + vvar_size; - return PTR_ERR_OR_ZERO(vma); + return 0; } int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) @@ -240,8 +165,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -EINTR; rc = __arch_setup_additional_pages(bprm, uses_interp); - if (rc) - mm->context.vdso = NULL; mmap_write_unlock(mm); return rc; @@ -283,10 +206,10 @@ static void __init vdso_setup_syscall_map(void) for (i = 0; i < NR_syscalls; i++) { if (sys_call_table[i] != (void *)&sys_ni_syscall) - vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); + vdso_k_arch_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); if (IS_ENABLED(CONFIG_COMPAT) && compat_sys_call_table[i] != (void *)&sys_ni_syscall) - vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); + vdso_k_arch_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); } } @@ -336,29 +259,10 @@ static struct page ** __init vdso_setup_pages(void *start, void *end) static int __init vdso_init(void) { #ifdef CONFIG_PPC64 - /* - * Fill up the "systemcfg" stuff for backward compatibility - */ - strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64"); - vdso_data->version.major = SYSTEMCFG_MAJOR; - vdso_data->version.minor = SYSTEMCFG_MINOR; - vdso_data->processor = mfspr(SPRN_PVR); - /* - * Fake the old platform number for pSeries and add - * in LPAR bit if necessary - */ - vdso_data->platform = 0x100; - if (firmware_has_feature(FW_FEATURE_LPAR)) - vdso_data->platform |= 1; - vdso_data->physicalMemorySize = memblock_phys_mem_size(); - vdso_data->dcache_size = ppc64_caches.l1d.size; - vdso_data->dcache_line_size = ppc64_caches.l1d.line_size; - vdso_data->icache_size = ppc64_caches.l1i.size; - vdso_data->icache_line_size = ppc64_caches.l1i.line_size; - vdso_data->dcache_block_size = ppc64_caches.l1d.block_size; - vdso_data->icache_block_size = ppc64_caches.l1i.block_size; - vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size; - vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size; + vdso_k_arch_data->dcache_block_size = ppc64_caches.l1d.block_size; + vdso_k_arch_data->icache_block_size = ppc64_caches.l1i.block_size; + vdso_k_arch_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size; + vdso_k_arch_data->icache_log_block_size = ppc64_caches.l1i.log_block_size; #endif /* CONFIG_PPC64 */ vdso_setup_syscall_map(); diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile index 1b93655c2857..8834dfe9d727 100644 --- a/arch/powerpc/kernel/vdso/Makefile +++ b/arch/powerpc/kernel/vdso/Makefile @@ -3,35 +3,26 @@ # List of files in the vdso, has to be asm only for now # Include the generic Makefile to check the built vdso. -include $(srctree)/lib/vdso/Makefile +include $(srctree)/lib/vdso/Makefile.include obj-vdso32 = sigtramp32-32.o gettimeofday-32.o datapage-32.o cacheflush-32.o note-32.o getcpu-32.o obj-vdso64 = sigtramp64-64.o gettimeofday-64.o datapage-64.o cacheflush-64.o note-64.o getcpu-64.o +obj-vdso32 += getrandom-32.o vgetrandom-chacha-32.o +obj-vdso64 += getrandom-64.o vgetrandom-chacha-64.o + ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday-32.o += -include $(c-gettimeofday-y) - CFLAGS_vgettimeofday-32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_vgettimeofday-32.o += $(call cc-option, -fno-stack-protector) - CFLAGS_vgettimeofday-32.o += -DDISABLE_BRANCH_PROFILING - CFLAGS_vgettimeofday-32.o += -ffreestanding -fasynchronous-unwind-tables - CFLAGS_REMOVE_vgettimeofday-32.o = $(CC_FLAGS_FTRACE) - CFLAGS_REMOVE_vgettimeofday-32.o += -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc - # This flag is supported by clang for 64-bit but not 32-bit so it will cause - # an unused command line flag warning for this file. - ifdef CONFIG_CC_IS_CLANG - CFLAGS_REMOVE_vgettimeofday-32.o += -fno-stack-clash-protection - endif - CFLAGS_vgettimeofday-64.o += -include $(c-gettimeofday-y) - CFLAGS_vgettimeofday-64.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_vgettimeofday-64.o += $(call cc-option, -fno-stack-protector) - CFLAGS_vgettimeofday-64.o += -DDISABLE_BRANCH_PROFILING - CFLAGS_vgettimeofday-64.o += -ffreestanding -fasynchronous-unwind-tables - CFLAGS_REMOVE_vgettimeofday-64.o = $(CC_FLAGS_FTRACE) # Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true # by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is # compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code # generation is minimal, it will just use r29 instead. - CFLAGS_vgettimeofday-64.o += $(call cc-option, -ffixed-r30) + CFLAGS_vgettimeofday-64.o += -include $(c-gettimeofday-y) $(call cc-option, -ffixed-r30) +endif + +ifneq ($(c-getrandom-y),) + CFLAGS_vgetrandom-32.o += -include $(c-getrandom-y) + CFLAGS_vgetrandom-64.o += -include $(c-getrandom-y) endif # Build rules @@ -42,26 +33,36 @@ else VDSOCC := $(CC) endif -targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday-32.o +targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday-32.o vgetrandom-32.o +targets += crtsavres-32.o obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) -targets += $(obj-vdso64) vdso64.so.dbg vgettimeofday-64.o +targets += $(obj-vdso64) vdso64.so.dbg vgettimeofday-64.o vgetrandom-64.o obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) -GCOV_PROFILE := n -KCOV_INSTRUMENT := n -UBSAN_SANITIZE := n -KASAN_SANITIZE := n -KCSAN_SANITIZE := n - -ccflags-y := -fno-common -fno-builtin +ccflags-y := -fno-common -fno-builtin -DBUILD_VDSO +ccflags-y += $(DISABLE_LATENT_ENTROPY_PLUGIN) +ccflags-y += $(call cc-option, -fno-stack-protector) +ccflags-y += -DDISABLE_BRANCH_PROFILING +ccflags-y += -ffreestanding -fasynchronous-unwind-tables +ccflags-remove-y := $(CC_FLAGS_FTRACE) ldflags-y := -Wl,--hash-style=both -nostdlib -shared -z noexecstack $(CLANG_FLAGS) ldflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld) ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL) # Filter flags that clang will warn are unused for linking -ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CFLAGS)) +ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)) CC32FLAGS := -m32 +CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc -mpcrel +ifdef CONFIG_CC_IS_CLANG +# This flag is supported by clang for 64-bit but not 32-bit so it will cause +# an unused command line flag warning for this file. +CC32FLAGSREMOVE += -fno-stack-clash-protection +# -mstack-protector-guard values from the 64-bit build are not valid for the +# 32-bit one. clang validates the values passed to these arguments during +# parsing, even when -fno-stack-protector is passed afterwards. +CC32FLAGSREMOVE += -mstack-protector-guard% +endif LD32FLAGS := -Wl,-soname=linux-vdso32.so.1 AS32FLAGS := -D__VDSO32__ @@ -74,26 +75,32 @@ targets += vdso64.lds CPPFLAGS_vdso64.lds += -P -C # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o FORCE +$(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o $(obj)/vgetrandom-32.o $(obj)/crtsavres-32.o FORCE $(call if_changed,vdso32ld_and_check) -$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday-64.o FORCE +$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday-64.o $(obj)/vgetrandom-64.o FORCE $(call if_changed,vdso64ld_and_check) # assembly rules for the .S files $(obj-vdso32): %-32.o: %.S FORCE $(call if_changed_dep,vdso32as) +$(obj)/crtsavres-32.o: %-32.o: $(srctree)/arch/powerpc/lib/crtsavres.S FORCE + $(call if_changed_dep,vdso32as) $(obj)/vgettimeofday-32.o: %-32.o: %.c FORCE $(call if_changed_dep,vdso32cc) +$(obj)/vgetrandom-32.o: %-32.o: %.c FORCE + $(call if_changed_dep,vdso32cc) $(obj-vdso64): %-64.o: %.S FORCE $(call if_changed_dep,vdso64as) $(obj)/vgettimeofday-64.o: %-64.o: %.c FORCE $(call if_changed_dep,cc_o_c) +$(obj)/vgetrandom-64.o: %-64.o: %.c FORCE + $(call if_changed_dep,cc_o_c) # Generate VDSO offsets using helper script -gen-vdso32sym := $(srctree)/$(src)/gen_vdso32_offsets.sh +gen-vdso32sym := $(src)/gen_vdso32_offsets.sh quiet_cmd_vdso32sym = VDSO32SYM $@ cmd_vdso32sym = $(NM) $< | $(gen-vdso32sym) | LC_ALL=C sort > $@ -gen-vdso64sym := $(srctree)/$(src)/gen_vdso64_offsets.sh +gen-vdso64sym := $(src)/gen_vdso64_offsets.sh quiet_cmd_vdso64sym = VDSO64SYM $@ cmd_vdso64sym = $(NM) $< | $(gen-vdso64sym) | LC_ALL=C sort > $@ @@ -108,11 +115,9 @@ quiet_cmd_vdso32ld_and_check = VDSO32L $@ quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) $(AS32FLAGS) -c -o $@ $< quiet_cmd_vdso32cc = VDSO32C $@ - cmd_vdso32cc = $(VDSOCC) $(c_flags) $(CC32FLAGS) -c -o $@ $< + cmd_vdso32cc = $(VDSOCC) $(filter-out $(CC32FLAGSREMOVE), $(c_flags)) $(CC32FLAGS) -c -o $@ $< quiet_cmd_vdso64ld_and_check = VDSO64L $@ cmd_vdso64ld_and_check = $(VDSOCC) $(ldflags-y) $(LD64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^); $(cmd_vdso_check) quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(VDSOCC) $(a_flags) $(AS64FLAGS) -c -o $@ $< - -OBJECT_FILES_NON_STANDARD := y diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S index 0085ae464dac..488d3ade11e6 100644 --- a/arch/powerpc/kernel/vdso/cacheflush.S +++ b/arch/powerpc/kernel/vdso/cacheflush.S @@ -30,7 +30,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) #ifdef CONFIG_PPC64 mflr r12 .cfi_register lr,r12 - get_datapage r10 + get_datapage r10 vdso_u_arch_data mtlr r12 .cfi_restore lr #endif diff --git a/arch/powerpc/kernel/vdso/datapage.S b/arch/powerpc/kernel/vdso/datapage.S index db8e167f0166..d23b2e8e2a34 100644 --- a/arch/powerpc/kernel/vdso/datapage.S +++ b/arch/powerpc/kernel/vdso/datapage.S @@ -28,7 +28,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) mflr r12 .cfi_register lr,r12 mr. r4,r3 - get_datapage r3 + get_datapage r3 vdso_u_arch_data mtlr r12 #ifdef __powerpc64__ addi r3,r3,CFG_SYSCALL_MAP64 @@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq) .cfi_startproc mflr r12 .cfi_register lr,r12 - get_datapage r3 + get_datapage r3 vdso_u_arch_data #ifndef __powerpc64__ lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3) #endif diff --git a/arch/powerpc/kernel/vdso/getrandom.S b/arch/powerpc/kernel/vdso/getrandom.S new file mode 100644 index 000000000000..a80d9fb436f7 --- /dev/null +++ b/arch/powerpc/kernel/vdso/getrandom.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Userland implementation of getrandom() for processes + * for use in the vDSO + * + * Copyright (C) 2024 Christophe Leroy <christophe.leroy@csgroup.eu>, CS GROUP France + */ +#include <asm/processor.h> +#include <asm/ppc_asm.h> +#include <asm/vdso.h> +#include <asm/vdso_datapage.h> +#include <asm/asm-offsets.h> +#include <asm/unistd.h> + +/* + * The macro sets two stack frames, one for the caller and one for the callee + * because there are no requirement for the caller to set a stack frame when + * calling VDSO so it may have omitted to set one, especially on PPC64 + */ + +.macro cvdso_call funct + .cfi_startproc + PPC_STLU r1, -PPC_MIN_STKFRM(r1) + .cfi_adjust_cfa_offset PPC_MIN_STKFRM + mflr r0 + PPC_STLU r1, -PPC_MIN_STKFRM(r1) + .cfi_adjust_cfa_offset PPC_MIN_STKFRM + PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) + .cfi_rel_offset lr, PPC_MIN_STKFRM + PPC_LR_STKOFF +#ifdef __powerpc64__ + PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1) + .cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT +#endif + bl CFUNC(DOTSYM(\funct)) + PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) +#ifdef __powerpc64__ + PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) + .cfi_restore r2 +#endif + cmpwi r3, 0 + mtlr r0 + addi r1, r1, 2 * PPC_MIN_STKFRM + .cfi_restore lr + .cfi_def_cfa_offset 0 + crclr so + bgelr+ + crset so + neg r3, r3 + blr + .cfi_endproc +.endm + + .text +V_FUNCTION_BEGIN(__kernel_getrandom) + cvdso_call __c_kernel_getrandom +V_FUNCTION_END(__kernel_getrandom) diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S index 48fc6658053a..79c967212444 100644 --- a/arch/powerpc/kernel/vdso/gettimeofday.S +++ b/arch/powerpc/kernel/vdso/gettimeofday.S @@ -32,17 +32,12 @@ PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1) .cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT #endif - get_datapage r5 .ifeq \call_time - addi r5, r5, VDSO_DATA_OFFSET + get_datapage r5 vdso_u_time_data .else - addi r4, r5, VDSO_DATA_OFFSET + get_datapage r4 vdso_u_time_data .endif -#ifdef __powerpc64__ bl CFUNC(DOTSYM(\funct)) -#else - bl \funct -#endif PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) #ifdef __powerpc64__ PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) @@ -118,16 +113,3 @@ V_FUNCTION_END(__kernel_clock_getres) V_FUNCTION_BEGIN(__kernel_time) cvdso_call __c_kernel_time call_time=1 V_FUNCTION_END(__kernel_time) - -/* Routines for restoring integer registers, called by the compiler. */ -/* Called with r11 pointing to the stack header word of the caller of the */ -/* function, just beyond the end of the integer restore area. */ -#ifndef __powerpc64__ -_GLOBAL(_restgpr_31_x) -_GLOBAL(_rest32gpr_31_x) - lwz r0,4(r11) - lwz r31,-4(r11) - mtlr r0 - mr r1,r11 - blr -#endif diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S index 426e1ccc6971..72a1012b8a20 100644 --- a/arch/powerpc/kernel/vdso/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso/vdso32.lds.S @@ -6,6 +6,7 @@ #include <asm/vdso.h> #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> +#include <vdso/datapage.h> #ifdef __LITTLE_ENDIAN__ OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle") @@ -16,7 +17,8 @@ OUTPUT_ARCH(powerpc:common) SECTIONS { - PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE); + VDSO_VVAR_SYMS + . = SIZEOF_HEADERS; .hash : { *(.hash) } :text @@ -74,6 +76,8 @@ SECTIONS .got : { *(.got) } :text .plt : { *(.plt) } + .rela.dyn : { *(.rela .rela*) } + _end = .; __end = .; PROVIDE(end = .); @@ -87,7 +91,7 @@ SECTIONS *(.branch_lt) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) - *(.got1 .glink .iplt .rela*) + *(.got1 .glink .iplt) } } @@ -128,6 +132,7 @@ VERSION #if defined(CONFIG_PPC64) || !defined(CONFIG_SMP) __kernel_getcpu; #endif + __kernel_getrandom; local: *; }; diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S index bda6c8cdd459..32102a05eaa7 100644 --- a/arch/powerpc/kernel/vdso/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso/vdso64.lds.S @@ -6,6 +6,7 @@ #include <asm/vdso.h> #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> +#include <vdso/datapage.h> #ifdef __LITTLE_ENDIAN__ OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle") @@ -16,7 +17,8 @@ OUTPUT_ARCH(powerpc:common64) SECTIONS { - PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE); + VDSO_VVAR_SYMS + . = SIZEOF_HEADERS; .hash : { *(.hash) } :text @@ -69,7 +71,7 @@ SECTIONS .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text .gcc_except_table : { *(.gcc_except_table) } - .rela.dyn ALIGN(8) : { *(.rela.dyn) } + .rela.dyn ALIGN(8) : { *(.rela .rela*) } .got ALIGN(8) : { *(.got .toc) } @@ -86,7 +88,7 @@ SECTIONS *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) *(.opd) - *(.glink .iplt .plt .rela*) + *(.glink .iplt .plt) } } @@ -123,6 +125,7 @@ VERSION __kernel_sigtramp_rt64; __kernel_getcpu; __kernel_time; + __kernel_getrandom; local: *; }; diff --git a/arch/powerpc/kernel/vdso/vgetrandom-chacha.S b/arch/powerpc/kernel/vdso/vgetrandom-chacha.S new file mode 100644 index 000000000000..7f9061a9e8b4 --- /dev/null +++ b/arch/powerpc/kernel/vdso/vgetrandom-chacha.S @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Christophe Leroy <christophe.leroy@csgroup.eu>, CS GROUP France + */ + +#include <linux/linkage.h> + +#include <asm/ppc_asm.h> + +#define dst_bytes r3 +#define key r4 +#define counter r5 +#define nblocks r6 + +#define idx_r0 r0 +#define val4 r4 + +#define const0 0x61707865 +#define const1 0x3320646e +#define const2 0x79622d32 +#define const3 0x6b206574 + +#define key0 r5 +#define key1 r6 +#define key2 r7 +#define key3 r8 +#define key4 r9 +#define key5 r10 +#define key6 r11 +#define key7 r12 + +#define counter0 r14 +#define counter1 r15 + +#define state0 r16 +#define state1 r17 +#define state2 r18 +#define state3 r19 +#define state4 r20 +#define state5 r21 +#define state6 r22 +#define state7 r23 +#define state8 r24 +#define state9 r25 +#define state10 r26 +#define state11 r27 +#define state12 r28 +#define state13 r29 +#define state14 r30 +#define state15 r31 + +.macro quarterround4 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 a4 b4 c4 d4 + add \a1, \a1, \b1 + add \a2, \a2, \b2 + add \a3, \a3, \b3 + add \a4, \a4, \b4 + xor \d1, \d1, \a1 + xor \d2, \d2, \a2 + xor \d3, \d3, \a3 + xor \d4, \d4, \a4 + rotlwi \d1, \d1, 16 + rotlwi \d2, \d2, 16 + rotlwi \d3, \d3, 16 + rotlwi \d4, \d4, 16 + add \c1, \c1, \d1 + add \c2, \c2, \d2 + add \c3, \c3, \d3 + add \c4, \c4, \d4 + xor \b1, \b1, \c1 + xor \b2, \b2, \c2 + xor \b3, \b3, \c3 + xor \b4, \b4, \c4 + rotlwi \b1, \b1, 12 + rotlwi \b2, \b2, 12 + rotlwi \b3, \b3, 12 + rotlwi \b4, \b4, 12 + add \a1, \a1, \b1 + add \a2, \a2, \b2 + add \a3, \a3, \b3 + add \a4, \a4, \b4 + xor \d1, \d1, \a1 + xor \d2, \d2, \a2 + xor \d3, \d3, \a3 + xor \d4, \d4, \a4 + rotlwi \d1, \d1, 8 + rotlwi \d2, \d2, 8 + rotlwi \d3, \d3, 8 + rotlwi \d4, \d4, 8 + add \c1, \c1, \d1 + add \c2, \c2, \d2 + add \c3, \c3, \d3 + add \c4, \c4, \d4 + xor \b1, \b1, \c1 + xor \b2, \b2, \c2 + xor \b3, \b3, \c3 + xor \b4, \b4, \c4 + rotlwi \b1, \b1, 7 + rotlwi \b2, \b2, 7 + rotlwi \b3, \b3, 7 + rotlwi \b4, \b4, 7 +.endm + +#define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4) \ + quarterround4 state##a1 state##b1 state##c1 state##d1 \ + state##a2 state##b2 state##c2 state##d2 \ + state##a3 state##b3 state##c3 state##d3 \ + state##a4 state##b4 state##c4 state##d4 + +/* + * Very basic 32 bits implementation of ChaCha20. Produces a given positive number + * of blocks of output with a nonce of 0, taking an input key and 8-byte + * counter. Importantly does not spill to the stack. Its arguments are: + * + * r3: output bytes + * r4: 32-byte key input + * r5: 8-byte counter input/output (saved on stack) + * r6: number of 64-byte blocks to write to output + * + * r0: counter of blocks (initialised with r6) + * r4: Value '4' after key has been read. + * r5-r12: key + * r14-r15: counter + * r16-r31: state + */ +SYM_FUNC_START(__arch_chacha20_blocks_nostack) +#ifdef __powerpc64__ + std counter, -216(r1) + + std r14, -144(r1) + std r15, -136(r1) + std r16, -128(r1) + std r17, -120(r1) + std r18, -112(r1) + std r19, -104(r1) + std r20, -96(r1) + std r21, -88(r1) + std r22, -80(r1) + std r23, -72(r1) + std r24, -64(r1) + std r25, -56(r1) + std r26, -48(r1) + std r27, -40(r1) + std r28, -32(r1) + std r29, -24(r1) + std r30, -16(r1) + std r31, -8(r1) +#else + stwu r1, -96(r1) + stw counter, 20(r1) +#ifdef __BIG_ENDIAN__ + stmw r14, 24(r1) +#else + stw r14, 24(r1) + stw r15, 28(r1) + stw r16, 32(r1) + stw r17, 36(r1) + stw r18, 40(r1) + stw r19, 44(r1) + stw r20, 48(r1) + stw r21, 52(r1) + stw r22, 56(r1) + stw r23, 60(r1) + stw r24, 64(r1) + stw r25, 68(r1) + stw r26, 72(r1) + stw r27, 76(r1) + stw r28, 80(r1) + stw r29, 84(r1) + stw r30, 88(r1) + stw r31, 92(r1) +#endif +#endif /* __powerpc64__ */ + + lwz counter0, 0(counter) + lwz counter1, 4(counter) +#ifdef __powerpc64__ + rldimi counter0, counter1, 32, 0 +#endif + mr idx_r0, nblocks + subi dst_bytes, dst_bytes, 4 + + lwz key0, 0(key) + lwz key1, 4(key) + lwz key2, 8(key) + lwz key3, 12(key) + lwz key4, 16(key) + lwz key5, 20(key) + lwz key6, 24(key) + lwz key7, 28(key) + + li val4, 4 +.Lblock: + li r31, 10 + + lis state0, const0@ha + lis state1, const1@ha + lis state2, const2@ha + lis state3, const3@ha + addi state0, state0, const0@l + addi state1, state1, const1@l + addi state2, state2, const2@l + addi state3, state3, const3@l + + mtctr r31 + + mr state4, key0 + mr state5, key1 + mr state6, key2 + mr state7, key3 + mr state8, key4 + mr state9, key5 + mr state10, key6 + mr state11, key7 + + mr state12, counter0 + mr state13, counter1 + + li state14, 0 + li state15, 0 + +.Lpermute: + QUARTERROUND4( 0, 4, 8,12, 1, 5, 9,13, 2, 6,10,14, 3, 7,11,15) + QUARTERROUND4( 0, 5,10,15, 1, 6,11,12, 2, 7, 8,13, 3, 4, 9,14) + + bdnz .Lpermute + + addis state0, state0, const0@ha + addis state1, state1, const1@ha + addis state2, state2, const2@ha + addis state3, state3, const3@ha + addi state0, state0, const0@l + addi state1, state1, const1@l + addi state2, state2, const2@l + addi state3, state3, const3@l + + add state4, state4, key0 + add state5, state5, key1 + add state6, state6, key2 + add state7, state7, key3 + add state8, state8, key4 + add state9, state9, key5 + add state10, state10, key6 + add state11, state11, key7 + + add state12, state12, counter0 + add state13, state13, counter1 + +#ifdef __BIG_ENDIAN__ + stwbrx state0, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state1, 0, dst_bytes + stwbrx state2, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state3, 0, dst_bytes + stwbrx state4, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state5, 0, dst_bytes + stwbrx state6, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state7, 0, dst_bytes + stwbrx state8, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state9, 0, dst_bytes + stwbrx state10, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state11, 0, dst_bytes + stwbrx state12, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state13, 0, dst_bytes + stwbrx state14, val4, dst_bytes + addi dst_bytes, dst_bytes, 8 + stwbrx state15, 0, dst_bytes +#else + stw state0, 4(dst_bytes) + stw state1, 8(dst_bytes) + stw state2, 12(dst_bytes) + stw state3, 16(dst_bytes) + stw state4, 20(dst_bytes) + stw state5, 24(dst_bytes) + stw state6, 28(dst_bytes) + stw state7, 32(dst_bytes) + stw state8, 36(dst_bytes) + stw state9, 40(dst_bytes) + stw state10, 44(dst_bytes) + stw state11, 48(dst_bytes) + stw state12, 52(dst_bytes) + stw state13, 56(dst_bytes) + stw state14, 60(dst_bytes) + stwu state15, 64(dst_bytes) +#endif + + subic. idx_r0, idx_r0, 1 /* subi. can't use r0 as source */ + +#ifdef __powerpc64__ + addi counter0, counter0, 1 + srdi counter1, counter0, 32 +#else + addic counter0, counter0, 1 + addze counter1, counter1 +#endif + + bne .Lblock + +#ifdef __powerpc64__ + ld counter, -216(r1) +#else + lwz counter, 20(r1) +#endif + stw counter0, 0(counter) + stw counter1, 4(counter) + + li r6, 0 + li r7, 0 + li r8, 0 + li r9, 0 + li r10, 0 + li r11, 0 + li r12, 0 + +#ifdef __powerpc64__ + ld r14, -144(r1) + ld r15, -136(r1) + ld r16, -128(r1) + ld r17, -120(r1) + ld r18, -112(r1) + ld r19, -104(r1) + ld r20, -96(r1) + ld r21, -88(r1) + ld r22, -80(r1) + ld r23, -72(r1) + ld r24, -64(r1) + ld r25, -56(r1) + ld r26, -48(r1) + ld r27, -40(r1) + ld r28, -32(r1) + ld r29, -24(r1) + ld r30, -16(r1) + ld r31, -8(r1) +#else +#ifdef __BIG_ENDIAN__ + lmw r14, 24(r1) +#else + lwz r14, 24(r1) + lwz r15, 28(r1) + lwz r16, 32(r1) + lwz r17, 36(r1) + lwz r18, 40(r1) + lwz r19, 44(r1) + lwz r20, 48(r1) + lwz r21, 52(r1) + lwz r22, 56(r1) + lwz r23, 60(r1) + lwz r24, 64(r1) + lwz r25, 68(r1) + lwz r26, 72(r1) + lwz r27, 76(r1) + lwz r28, 80(r1) + lwz r29, 84(r1) + lwz r30, 88(r1) + lwz r31, 92(r1) +#endif + addi r1, r1, 96 +#endif /* __powerpc64__ */ + blr +SYM_FUNC_END(__arch_chacha20_blocks_nostack) diff --git a/arch/powerpc/kernel/vdso/vgetrandom.c b/arch/powerpc/kernel/vdso/vgetrandom.c new file mode 100644 index 000000000000..cc79b960a541 --- /dev/null +++ b/arch/powerpc/kernel/vdso/vgetrandom.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Powerpc userspace implementation of getrandom() + * + * Copyright (C) 2024 Christophe Leroy <christophe.leroy@csgroup.eu>, CS GROUP France + */ +#include <linux/time.h> +#include <linux/types.h> + +ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, + size_t opaque_len) +{ + return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len); +} diff --git a/arch/powerpc/kernel/vdso/vgettimeofday.c b/arch/powerpc/kernel/vdso/vgettimeofday.c index 55a287c9a736..6f5167d81af5 100644 --- a/arch/powerpc/kernel/vdso/vgettimeofday.c +++ b/arch/powerpc/kernel/vdso/vgettimeofday.c @@ -7,43 +7,43 @@ #ifdef __powerpc64__ int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_gettime_data(vd, clock, ts); } int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_getres_data(vd, clock_id, res); } #else int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_gettime32_data(vd, clock, ts); } int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_gettime_data(vd, clock, ts); } int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_getres_time32_data(vd, clock_id, res); } #endif int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_gettimeofday_data(vd, tv, tz); } -__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_data *vd) +__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_time_data *vd) { return __cvdso_time_data(vd, time); } diff --git a/arch/powerpc/kernel/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32_wrapper.S index 10f92f265d51..20bca3548b44 100644 --- a/arch/powerpc/kernel/vdso32_wrapper.S +++ b/arch/powerpc/kernel/vdso32_wrapper.S @@ -2,7 +2,7 @@ #include <linux/linkage.h> #include <asm/page.h> - __PAGE_ALIGNED_DATA + .section ".data..ro_after_init", "aw" .globl vdso32_start, vdso32_end .balign PAGE_SIZE diff --git a/arch/powerpc/kernel/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64_wrapper.S index 839d1a61411d..1912936fa227 100644 --- a/arch/powerpc/kernel/vdso64_wrapper.S +++ b/arch/powerpc/kernel/vdso64_wrapper.S @@ -2,7 +2,7 @@ #include <linux/linkage.h> #include <asm/page.h> - __PAGE_ALIGNED_DATA + .section ".data..ro_after_init", "aw" .globl vdso64_start, vdso64_end .balign PAGE_SIZE diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index f420df7888a7..de6ee7d35cff 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -1,10 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifdef CONFIG_PPC64 -#define PROVIDE32(x) PROVIDE(__unused__##x) -#else -#define PROVIDE32(x) PROVIDE(x) -#endif - #define BSS_FIRST_SECTIONS *(.bss.prominit) #define EMITS_PT_NOTE #define RO_EXCEPTION_TABLE_ALIGN 0 @@ -123,13 +117,10 @@ SECTIONS */ *(.sfpr); *(.text.asan.* .text.tsan.*) - MEM_KEEP(init.text) - MEM_KEEP(exit.text) } :text . = ALIGN(PAGE_SIZE); _etext = .; - PROVIDE32 (etext = .); /* Read-only data */ RO_DATA(PAGE_SIZE) @@ -267,14 +258,13 @@ SECTIONS .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; INIT_TEXT - + *(.tramp.ftrace.init); /* *.init.text might be RO so we must ensure this section ends on * a page boundary. */ . = ALIGN(PAGE_SIZE); _einittext = .; - *(.tramp.ftrace.init); } :text /* .exit.text is discarded at runtime, not link time, @@ -397,7 +387,6 @@ SECTIONS . = ALIGN(PAGE_SIZE); _edata = .; - PROVIDE32 (edata = .); /* * And finally the bss @@ -407,7 +396,6 @@ SECTIONS . = ALIGN(PAGE_SIZE); _end = . ; - PROVIDE32 (end = .); DWARF_DEBUG ELF_DETAILS diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 8c464a5d8246..2429cb1c7baa 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -495,8 +495,7 @@ static void start_watchdog(void *arg) *this_cpu_ptr(&wd_timer_tb) = get_tb(); - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hrtimer->function = watchdog_timer_fn; + hrtimer_setup(hrtimer, watchdog_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms), HRTIMER_MODE_REL_PINNED); } diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile index 91e96f5168b7..470eb0453e17 100644 --- a/arch/powerpc/kexec/Makefile +++ b/arch/powerpc/kexec/Makefile @@ -3,12 +3,13 @@ # Makefile for the linux kernel. # -obj-y += core.o crash.o core_$(BITS).o +obj-y += core.o core_$(BITS).o ranges.o obj-$(CONFIG_PPC32) += relocate_32.o -obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o +obj-$(CONFIG_KEXEC_FILE) += file_load.o file_load_$(BITS).o elf_$(BITS).o obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o +obj-$(CONFIG_CRASH_DUMP) += crash.o # Disable GCOV, KCOV & sanitizers in odd or sensitive code GCOV_PROFILE_core_$(BITS).o := n diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index 3ff4411ed496..00e9c267b912 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -22,32 +22,12 @@ #include <asm/setup.h> #include <asm/firmware.h> -void machine_kexec_mask_interrupts(void) { - unsigned int i; - struct irq_desc *desc; - - for_each_irq_desc(i, desc) { - struct irq_chip *chip; - - chip = irq_desc_get_chip(desc); - if (!chip) - continue; - - if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) - chip->irq_eoi(&desc->irq_data); - - if (chip->irq_mask) - chip->irq_mask(&desc->irq_data); - - if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) - chip->irq_disable(&desc->irq_data); - } -} - +#ifdef CONFIG_CRASH_DUMP void machine_crash_shutdown(struct pt_regs *regs) { default_machine_crash_shutdown(regs); } +#endif void machine_kexec_cleanup(struct kimage *image) { @@ -77,38 +57,21 @@ void machine_kexec(struct kimage *image) for(;;); } -void __init reserve_crashkernel(void) -{ - unsigned long long crash_size, crash_base, total_mem_sz; - int ret; - - total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size(); - /* use common parsing */ - ret = parse_crashkernel(boot_command_line, total_mem_sz, - &crash_size, &crash_base, NULL, NULL); - if (ret == 0 && crash_size > 0) { - crashk_res.start = crash_base; - crashk_res.end = crash_base + crash_size - 1; - } - - if (crashk_res.end == crashk_res.start) { - crashk_res.start = crashk_res.end = 0; - return; - } +#ifdef CONFIG_CRASH_RESERVE - /* We might have got these values via the command line or the - * device tree, either way sanitise them now. */ - - crash_size = resource_size(&crashk_res); +static unsigned long long __init get_crash_base(unsigned long long crash_base) +{ #ifndef CONFIG_NONSTATIC_KERNEL - if (crashk_res.start != KDUMP_KERNELBASE) + if (crash_base != KDUMP_KERNELBASE) printk("Crash kernel location must be 0x%x\n", KDUMP_KERNELBASE); - crashk_res.start = KDUMP_KERNELBASE; + return KDUMP_KERNELBASE; #else - if (!crashk_res.start) { + unsigned long long crash_base_align; + + if (!crash_base) { #ifdef CONFIG_PPC64 /* * On the LPAR platform place the crash kernel to mid of @@ -120,53 +83,51 @@ void __init reserve_crashkernel(void) * kernel starts at 128MB offset on other platforms. */ if (firmware_has_feature(FW_FEATURE_LPAR)) - crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_512M); + crash_base = min_t(u64, ppc64_rma_size / 2, SZ_512M); else - crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_128M); + crash_base = min_t(u64, ppc64_rma_size / 2, SZ_128M); #else - crashk_res.start = KDUMP_KERNELBASE; + crash_base = KDUMP_KERNELBASE; #endif } - crash_base = PAGE_ALIGN(crashk_res.start); - if (crash_base != crashk_res.start) { - printk("Crash kernel base must be aligned to 0x%lx\n", - PAGE_SIZE); - crashk_res.start = crash_base; - } + crash_base_align = PAGE_ALIGN(crash_base); + if (crash_base != crash_base_align) + pr_warn("Crash kernel base must be aligned to 0x%lx\n", PAGE_SIZE); + return crash_base_align; #endif - crash_size = PAGE_ALIGN(crash_size); - crashk_res.end = crashk_res.start + crash_size - 1; +} - /* The crash region must not overlap the current kernel */ - if (overlaps_crashkernel(__pa(_stext), _end - _stext)) { - printk(KERN_WARNING - "Crash kernel can not overlap current kernel\n"); - crashk_res.start = crashk_res.end = 0; +void __init arch_reserve_crashkernel(void) +{ + unsigned long long crash_size, crash_base, crash_end; + unsigned long long kernel_start, kernel_size; + unsigned long long total_mem_sz; + int ret; + + total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size(); + + /* use common parsing */ + ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size, + &crash_base, NULL, NULL); + + if (ret) return; - } - /* Crash kernel trumps memory limit */ - if (memory_limit && memory_limit <= crashk_res.end) { - memory_limit = crashk_res.end + 1; - total_mem_sz = memory_limit; - printk("Adjusted memory limit for crashkernel, now 0x%llx\n", - memory_limit); - } + crash_base = get_crash_base(crash_base); + crash_end = crash_base + crash_size - 1; - printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " - "for crashkernel (System RAM: %ldMB)\n", - (unsigned long)(crash_size >> 20), - (unsigned long)(crashk_res.start >> 20), - (unsigned long)(total_mem_sz >> 20)); + kernel_start = __pa(_stext); + kernel_size = _end - _stext; - if (!memblock_is_region_memory(crashk_res.start, crash_size) || - memblock_reserve(crashk_res.start, crash_size)) { - pr_err("Failed to reserve memory for crashkernel!\n"); - crashk_res.start = crashk_res.end = 0; + /* The crash region must not overlap the current kernel */ + if ((kernel_start + kernel_size > crash_base) && (kernel_start <= crash_end)) { + pr_warn("Crash kernel can not overlap current kernel\n"); return; } + + reserve_crashkernel_generic(crash_size, crash_base, 0, false); } int __init overlaps_crashkernel(unsigned long start, unsigned long size) @@ -251,3 +212,4 @@ static int __init kexec_setup(void) return 0; } late_initcall(kexec_setup); +#endif /* CONFIG_CRASH_RESERVE */ diff --git a/arch/powerpc/kexec/core_32.c b/arch/powerpc/kexec/core_32.c index c95f96850c9e..deb28eb44f30 100644 --- a/arch/powerpc/kexec/core_32.c +++ b/arch/powerpc/kexec/core_32.c @@ -7,6 +7,7 @@ * Copyright (C) 2005 IBM Corporation. */ +#include <linux/irq.h> #include <linux/kexec.h> #include <linux/mm.h> #include <linux/string.h> diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index 762e4d09aacf..222aa326dace 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -17,6 +17,7 @@ #include <linux/cpu.h> #include <linux/hardirq.h> #include <linux/of.h> +#include <linux/libfdt.h> #include <asm/page.h> #include <asm/current.h> @@ -26,10 +27,12 @@ #include <asm/paca.h> #include <asm/mmu.h> #include <asm/sections.h> /* _end */ +#include <asm/setup.h> #include <asm/smp.h> #include <asm/hw_breakpoint.h> #include <asm/svm.h> #include <asm/ultravisor.h> +#include <asm/crashdump-ppc64.h> int machine_kexec_prepare(struct kimage *image) { @@ -315,6 +318,16 @@ void default_machine_kexec(struct kimage *image) if (!kdump_in_progress()) kexec_prepare_cpus(); +#ifdef CONFIG_PPC_PSERIES + /* + * This must be done after other CPUs have shut down, otherwise they + * could execute the 'scv' instruction, which is not supported with + * reloc disabled (see configure_exceptions()). + */ + if (firmware_has_feature(FW_FEATURE_SET_MODE)) + pseries_disable_reloc_on_exc(); +#endif + printk("kexec: Starting switchover sequence.\n"); /* switch to a staticly allocated stack. Based on irq stack code. @@ -419,3 +432,113 @@ static int __init export_htab_values(void) } late_initcall(export_htab_values); #endif /* CONFIG_PPC_64S_HASH_MMU */ + +#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP) +/** + * add_node_props - Reads node properties from device node structure and add + * them to fdt. + * @fdt: Flattened device tree of the kernel + * @node_offset: offset of the node to add a property at + * @dn: device node pointer + * + * Returns 0 on success, negative errno on error. + */ +static int add_node_props(void *fdt, int node_offset, const struct device_node *dn) +{ + int ret = 0; + struct property *pp; + + if (!dn) + return -EINVAL; + + for_each_property_of_node(dn, pp) { + ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length); + if (ret < 0) { + pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret)); + return ret; + } + } + return ret; +} + +/** + * update_cpus_node - Update cpus node of flattened device tree using of_root + * device node. + * @fdt: Flattened device tree of the kernel. + * + * Returns 0 on success, negative errno on error. + * + * Note: expecting no subnodes under /cpus/<node> with device_type == "cpu". + * If this changes, update this function to include them. + */ +int update_cpus_node(void *fdt) +{ + int prev_node_offset; + const char *device_type; + const struct fdt_property *prop; + struct device_node *cpus_node, *dn; + int cpus_offset, cpus_subnode_offset, ret = 0; + + cpus_offset = fdt_path_offset(fdt, "/cpus"); + if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) { + pr_err("Malformed device tree: error reading /cpus node: %s\n", + fdt_strerror(cpus_offset)); + return cpus_offset; + } + + prev_node_offset = cpus_offset; + /* Delete sub-nodes of /cpus node with device_type == "cpu" */ + for (cpus_subnode_offset = fdt_first_subnode(fdt, cpus_offset); cpus_subnode_offset >= 0;) { + /* Ignore nodes that do not have a device_type property or device_type != "cpu" */ + prop = fdt_get_property(fdt, cpus_subnode_offset, "device_type", NULL); + if (!prop || strcmp(prop->data, "cpu")) { + prev_node_offset = cpus_subnode_offset; + goto next_node; + } + + ret = fdt_del_node(fdt, cpus_subnode_offset); + if (ret < 0) { + pr_err("Failed to delete a cpus sub-node: %s\n", fdt_strerror(ret)); + return ret; + } +next_node: + if (prev_node_offset == cpus_offset) + cpus_subnode_offset = fdt_first_subnode(fdt, cpus_offset); + else + cpus_subnode_offset = fdt_next_subnode(fdt, prev_node_offset); + } + + cpus_node = of_find_node_by_path("/cpus"); + /* Fail here to avoid kexec/kdump kernel boot hung */ + if (!cpus_node) { + pr_err("No /cpus node found\n"); + return -EINVAL; + } + + /* Add all /cpus sub-nodes of device_type == "cpu" to FDT */ + for_each_child_of_node(cpus_node, dn) { + /* Ignore device nodes that do not have a device_type property + * or device_type != "cpu". + */ + device_type = of_get_property(dn, "device_type", NULL); + if (!device_type || strcmp(device_type, "cpu")) + continue; + + cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name); + if (cpus_subnode_offset < 0) { + pr_err("Unable to add %s subnode: %s\n", dn->full_name, + fdt_strerror(cpus_subnode_offset)); + ret = cpus_subnode_offset; + goto out; + } + + ret = add_node_props(fdt, cpus_subnode_offset, dn); + if (ret < 0) + goto out; + } +out: + of_node_put(cpus_node); + of_node_put(dn); + return ret; +} +#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */ diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c index ef5c2d25ec39..a325c1c02f96 100644 --- a/arch/powerpc/kexec/crash.c +++ b/arch/powerpc/kexec/crash.c @@ -16,6 +16,8 @@ #include <linux/delay.h> #include <linux/irq.h> #include <linux/types.h> +#include <linux/libfdt.h> +#include <linux/memory.h> #include <asm/processor.h> #include <asm/machdep.h> @@ -24,6 +26,7 @@ #include <asm/setjmp.h> #include <asm/debug.h> #include <asm/interrupt.h> +#include <asm/kexec_ranges.h> /* * The primary CPU waits a while for all secondary CPUs to enter. This is to @@ -356,7 +359,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs) if (TRAP(regs) == INTERRUPT_SYSTEM_RESET) is_via_system_reset = 1; - crash_smp_send_stop(); + if (IS_ENABLED(CONFIG_SMP)) + crash_smp_send_stop(); + else + crash_kexec_prepare(); crash_save_cpu(regs, crashing_cpu); @@ -392,3 +398,195 @@ void default_machine_crash_shutdown(struct pt_regs *regs) if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 0); } + +#ifdef CONFIG_CRASH_HOTPLUG +#undef pr_fmt +#define pr_fmt(fmt) "crash hp: " fmt + +/* + * Advertise preferred elfcorehdr size to userspace via + * /sys/kernel/crash_elfcorehdr_size sysfs interface. + */ +unsigned int arch_crash_get_elfcorehdr_size(void) +{ + unsigned long phdr_cnt; + + /* A program header for possible CPUs + vmcoreinfo */ + phdr_cnt = num_possible_cpus() + 1; + if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) + phdr_cnt += CONFIG_CRASH_MAX_MEMORY_RANGES; + + return sizeof(struct elfhdr) + (phdr_cnt * sizeof(Elf64_Phdr)); +} + +/** + * update_crash_elfcorehdr() - Recreate the elfcorehdr and replace it with old + * elfcorehdr in the kexec segment array. + * @image: the active struct kimage + * @mn: struct memory_notify data handler + */ +static void update_crash_elfcorehdr(struct kimage *image, struct memory_notify *mn) +{ + int ret; + struct crash_mem *cmem = NULL; + struct kexec_segment *ksegment; + void *ptr, *mem, *elfbuf = NULL; + unsigned long elfsz, memsz, base_addr, size; + + ksegment = &image->segment[image->elfcorehdr_index]; + mem = (void *) ksegment->mem; + memsz = ksegment->memsz; + + ret = get_crash_memory_ranges(&cmem); + if (ret) { + pr_err("Failed to get crash mem range\n"); + return; + } + + /* + * The hot unplugged memory is part of crash memory ranges, + * remove it here. + */ + if (image->hp_action == KEXEC_CRASH_HP_REMOVE_MEMORY) { + base_addr = PFN_PHYS(mn->start_pfn); + size = mn->nr_pages * PAGE_SIZE; + ret = remove_mem_range(&cmem, base_addr, size); + if (ret) { + pr_err("Failed to remove hot-unplugged memory from crash memory ranges\n"); + goto out; + } + } + + ret = crash_prepare_elf64_headers(cmem, false, &elfbuf, &elfsz); + if (ret) { + pr_err("Failed to prepare elf header\n"); + goto out; + } + + /* + * It is unlikely that kernel hit this because elfcorehdr kexec + * segment (memsz) is built with addition space to accommodate growing + * number of crash memory ranges while loading the kdump kernel. It is + * Just to avoid any unforeseen case. + */ + if (elfsz > memsz) { + pr_err("Updated crash elfcorehdr elfsz %lu > memsz %lu", elfsz, memsz); + goto out; + } + + ptr = __va(mem); + if (ptr) { + /* Temporarily invalidate the crash image while it is replaced */ + xchg(&kexec_crash_image, NULL); + + /* Replace the old elfcorehdr with newly prepared elfcorehdr */ + memcpy((void *)ptr, elfbuf, elfsz); + + /* The crash image is now valid once again */ + xchg(&kexec_crash_image, image); + } +out: + kvfree(cmem); + kvfree(elfbuf); +} + +/** + * get_fdt_index - Loop through the kexec segment array and find + * the index of the FDT segment. + * @image: a pointer to kexec_crash_image + * + * Returns the index of FDT segment in the kexec segment array + * if found; otherwise -1. + */ +static int get_fdt_index(struct kimage *image) +{ + void *ptr; + unsigned long mem; + int i, fdt_index = -1; + + /* Find the FDT segment index in kexec segment array. */ + for (i = 0; i < image->nr_segments; i++) { + mem = image->segment[i].mem; + ptr = __va(mem); + + if (ptr && fdt_magic(ptr) == FDT_MAGIC) { + fdt_index = i; + break; + } + } + + return fdt_index; +} + +/** + * update_crash_fdt - updates the cpus node of the crash FDT. + * + * @image: a pointer to kexec_crash_image + */ +static void update_crash_fdt(struct kimage *image) +{ + void *fdt; + int fdt_index; + + fdt_index = get_fdt_index(image); + if (fdt_index < 0) { + pr_err("Unable to locate FDT segment.\n"); + return; + } + + fdt = __va((void *)image->segment[fdt_index].mem); + + /* Temporarily invalidate the crash image while it is replaced */ + xchg(&kexec_crash_image, NULL); + + /* update FDT to reflect changes in CPU resources */ + if (update_cpus_node(fdt)) + pr_err("Failed to update crash FDT"); + + /* The crash image is now valid once again */ + xchg(&kexec_crash_image, image); +} + +int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags) +{ +#ifdef CONFIG_KEXEC_FILE + if (image->file_mode) + return 1; +#endif + return kexec_flags & KEXEC_CRASH_HOTPLUG_SUPPORT; +} + +/** + * arch_crash_handle_hotplug_event - Handle crash CPU/Memory hotplug events to update the + * necessary kexec segments based on the hotplug event. + * @image: a pointer to kexec_crash_image + * @arg: struct memory_notify handler for memory hotplug case and NULL for CPU hotplug case. + * + * Update the kdump image based on the type of hotplug event, represented by image->hp_action. + * CPU add: Update the FDT segment to include the newly added CPU. + * CPU remove: No action is needed, with the assumption that it's okay to have offline CPUs + * part of the FDT. + * Memory add/remove: No action is taken as this is not yet supported. + */ +void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) +{ + struct memory_notify *mn; + + switch (image->hp_action) { + case KEXEC_CRASH_HP_REMOVE_CPU: + return; + + case KEXEC_CRASH_HP_ADD_CPU: + update_crash_fdt(image); + break; + + case KEXEC_CRASH_HP_REMOVE_MEMORY: + case KEXEC_CRASH_HP_ADD_MEMORY: + mn = (struct memory_notify *)arg; + update_crash_elfcorehdr(image, mn); + return; + default: + pr_warn_once("Unknown hotplug action\n"); + } +} +#endif /* CONFIG_CRASH_HOTPLUG */ diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c index 904016cf89ea..5d6d616404cf 100644 --- a/arch/powerpc/kexec/elf_64.c +++ b/arch/powerpc/kexec/elf_64.c @@ -23,6 +23,7 @@ #include <linux/of_fdt.h> #include <linux/slab.h> #include <linux/types.h> +#include <asm/kexec_ranges.h> static void *elf64_load(struct kimage *image, char *kernel_buf, unsigned long kernel_len, char *initrd, @@ -36,6 +37,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, const void *slave_code; struct elfhdr ehdr; char *modified_cmdline = NULL; + struct crash_mem *rmem = NULL; struct kexec_elf_info elf_info; struct kexec_buf kbuf = { .image = image, .buf_min = 0, .buf_max = ppc64_rma_size }; @@ -47,7 +49,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, if (ret) return ERR_PTR(ret); - if (image->type == KEXEC_TYPE_CRASH) { + if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) { /* min & max buffer values for kdump case */ kbuf.buf_min = pbuf.buf_min = crashk_res.start; kbuf.buf_max = pbuf.buf_max = @@ -70,7 +72,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, kexec_dprintk("Loaded purgatory at 0x%lx\n", pbuf.mem); /* Load additional segments needed for panic kernel */ - if (image->type == KEXEC_TYPE_CRASH) { + if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) { ret = load_crashdump_segments_ppc64(image, &kbuf); if (ret) { pr_err("Failed to load kdump kernel segments\n"); @@ -102,21 +104,25 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, kexec_dprintk("Loaded initrd at 0x%lx\n", initrd_load_addr); } + ret = get_reserved_memory_ranges(&rmem); + if (ret) + goto out; + fdt = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr, initrd_len, cmdline, - kexec_extra_fdt_size_ppc64(image)); + kexec_extra_fdt_size_ppc64(image, rmem)); if (!fdt) { pr_err("Error setting up the new device tree.\n"); ret = -EINVAL; goto out; } - ret = setup_new_fdt_ppc64(image, fdt, initrd_load_addr, - initrd_len, cmdline); + ret = setup_new_fdt_ppc64(image, fdt, rmem); if (ret) goto out_free_fdt; - fdt_pack(fdt); + if (!IS_ENABLED(CONFIG_CRASH_HOTPLUG) || image->type != KEXEC_TYPE_CRASH) + fdt_pack(fdt); kbuf.buffer = fdt; kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt); @@ -145,6 +151,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, out_free_fdt: kvfree(fdt); out: + kfree(rmem); kfree(modified_cmdline); kexec_free_elf_info(&elf_info); diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index 5b4c5cb23354..e7ef8b2a2554 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -18,6 +18,7 @@ #include <linux/of_fdt.h> #include <linux/libfdt.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/memblock.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -30,6 +31,7 @@ #include <asm/iommu.h> #include <asm/prom.h> #include <asm/plpks.h> +#include <asm/cputhreads.h> struct umem_info { __be64 *buf; /* data buffer for usable-memory property */ @@ -47,393 +49,21 @@ const struct kexec_file_ops * const kexec_file_loaders[] = { NULL }; -/** - * get_exclude_memory_ranges - Get exclude memory ranges. This list includes - * regions like opal/rtas, tce-table, initrd, - * kernel, htab which should be avoided while - * setting up kexec load segments. - * @mem_ranges: Range list to add the memory ranges to. - * - * Returns 0 on success, negative errno on error. - */ -static int get_exclude_memory_ranges(struct crash_mem **mem_ranges) -{ - int ret; - - ret = add_tce_mem_ranges(mem_ranges); - if (ret) - goto out; - - ret = add_initrd_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_htab_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_kernel_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_rtas_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_opal_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_reserved_mem_ranges(mem_ranges); - if (ret) - goto out; - - /* exclude memory ranges should be sorted for easy lookup */ - sort_memory_ranges(*mem_ranges, true); -out: - if (ret) - pr_err("Failed to setup exclude memory ranges\n"); - return ret; -} - -/** - * get_usable_memory_ranges - Get usable memory ranges. This list includes - * regions like crashkernel, opal/rtas & tce-table, - * that kdump kernel could use. - * @mem_ranges: Range list to add the memory ranges to. - * - * Returns 0 on success, negative errno on error. - */ -static int get_usable_memory_ranges(struct crash_mem **mem_ranges) -{ - int ret; - - /* - * Early boot failure observed on guests when low memory (first memory - * block?) is not added to usable memory. So, add [0, crashk_res.end] - * instead of [crashk_res.start, crashk_res.end] to workaround it. - * Also, crashed kernel's memory must be added to reserve map to - * avoid kdump kernel from using it. - */ - ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1); - if (ret) - goto out; - - ret = add_rtas_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_opal_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_tce_mem_ranges(mem_ranges); -out: - if (ret) - pr_err("Failed to setup usable memory ranges\n"); - return ret; -} - -/** - * get_crash_memory_ranges - Get crash memory ranges. This list includes - * first/crashing kernel's memory regions that - * would be exported via an elfcore. - * @mem_ranges: Range list to add the memory ranges to. - * - * Returns 0 on success, negative errno on error. - */ -static int get_crash_memory_ranges(struct crash_mem **mem_ranges) -{ - phys_addr_t base, end; - struct crash_mem *tmem; - u64 i; - int ret; - - for_each_mem_range(i, &base, &end) { - u64 size = end - base; - - /* Skip backup memory region, which needs a separate entry */ - if (base == BACKUP_SRC_START) { - if (size > BACKUP_SRC_SIZE) { - base = BACKUP_SRC_END + 1; - size -= BACKUP_SRC_SIZE; - } else - continue; - } - - ret = add_mem_range(mem_ranges, base, size); - if (ret) - goto out; - - /* Try merging adjacent ranges before reallocation attempt */ - if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges) - sort_memory_ranges(*mem_ranges, true); - } - - /* Reallocate memory ranges if there is no space to split ranges */ - tmem = *mem_ranges; - if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) { - tmem = realloc_mem_ranges(mem_ranges); - if (!tmem) - goto out; - } - - /* Exclude crashkernel region */ - ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end); - if (ret) - goto out; - - /* - * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL - * regions are exported to save their context at the time of - * crash, they should actually be backed up just like the - * first 64K bytes of memory. - */ - ret = add_rtas_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_opal_mem_range(mem_ranges); - if (ret) - goto out; - - /* create a separate program header for the backup region */ - ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE); - if (ret) - goto out; - - sort_memory_ranges(*mem_ranges, false); -out: - if (ret) - pr_err("Failed to setup crash memory ranges\n"); - return ret; -} - -/** - * get_reserved_memory_ranges - Get reserve memory ranges. This list includes - * memory regions that should be added to the - * memory reserve map to ensure the region is - * protected from any mischief. - * @mem_ranges: Range list to add the memory ranges to. - * - * Returns 0 on success, negative errno on error. - */ -static int get_reserved_memory_ranges(struct crash_mem **mem_ranges) -{ - int ret; - - ret = add_rtas_mem_range(mem_ranges); - if (ret) - goto out; - - ret = add_tce_mem_ranges(mem_ranges); - if (ret) - goto out; - - ret = add_reserved_mem_ranges(mem_ranges); -out: - if (ret) - pr_err("Failed to setup reserved memory ranges\n"); - return ret; -} - -/** - * __locate_mem_hole_top_down - Looks top down for a large enough memory hole - * in the memory regions between buf_min & buf_max - * for the buffer. If found, sets kbuf->mem. - * @kbuf: Buffer contents and memory parameters. - * @buf_min: Minimum address for the buffer. - * @buf_max: Maximum address for the buffer. - * - * Returns 0 on success, negative errno on error. - */ -static int __locate_mem_hole_top_down(struct kexec_buf *kbuf, - u64 buf_min, u64 buf_max) -{ - int ret = -EADDRNOTAVAIL; - phys_addr_t start, end; - u64 i; - - for_each_mem_range_rev(i, &start, &end) { - /* - * memblock uses [start, end) convention while it is - * [start, end] here. Fix the off-by-one to have the - * same convention. - */ - end -= 1; - - if (start > buf_max) - continue; - - /* Memory hole not found */ - if (end < buf_min) - break; - - /* Adjust memory region based on the given range */ - if (start < buf_min) - start = buf_min; - if (end > buf_max) - end = buf_max; - - start = ALIGN(start, kbuf->buf_align); - if (start < end && (end - start + 1) >= kbuf->memsz) { - /* Suitable memory range found. Set kbuf->mem */ - kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1, - kbuf->buf_align); - ret = 0; - break; - } - } - - return ret; -} - -/** - * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a - * suitable buffer with top down approach. - * @kbuf: Buffer contents and memory parameters. - * @buf_min: Minimum address for the buffer. - * @buf_max: Maximum address for the buffer. - * @emem: Exclude memory ranges. - * - * Returns 0 on success, negative errno on error. - */ -static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf, - u64 buf_min, u64 buf_max, - const struct crash_mem *emem) -{ - int i, ret = 0, err = -EADDRNOTAVAIL; - u64 start, end, tmin, tmax; - - tmax = buf_max; - for (i = (emem->nr_ranges - 1); i >= 0; i--) { - start = emem->ranges[i].start; - end = emem->ranges[i].end; - - if (start > tmax) - continue; - - if (end < tmax) { - tmin = (end < buf_min ? buf_min : end + 1); - ret = __locate_mem_hole_top_down(kbuf, tmin, tmax); - if (!ret) - return 0; - } - - tmax = start - 1; - - if (tmax < buf_min) { - ret = err; - break; - } - ret = 0; - } - - if (!ret) { - tmin = buf_min; - ret = __locate_mem_hole_top_down(kbuf, tmin, tmax); - } - return ret; -} - -/** - * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole - * in the memory regions between buf_min & buf_max - * for the buffer. If found, sets kbuf->mem. - * @kbuf: Buffer contents and memory parameters. - * @buf_min: Minimum address for the buffer. - * @buf_max: Maximum address for the buffer. - * - * Returns 0 on success, negative errno on error. - */ -static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf, - u64 buf_min, u64 buf_max) -{ - int ret = -EADDRNOTAVAIL; - phys_addr_t start, end; - u64 i; - - for_each_mem_range(i, &start, &end) { - /* - * memblock uses [start, end) convention while it is - * [start, end] here. Fix the off-by-one to have the - * same convention. - */ - end -= 1; - - if (end < buf_min) - continue; - - /* Memory hole not found */ - if (start > buf_max) - break; - - /* Adjust memory region based on the given range */ - if (start < buf_min) - start = buf_min; - if (end > buf_max) - end = buf_max; - - start = ALIGN(start, kbuf->buf_align); - if (start < end && (end - start + 1) >= kbuf->memsz) { - /* Suitable memory range found. Set kbuf->mem */ - kbuf->mem = start; - ret = 0; - break; - } - } - - return ret; -} - -/** - * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a - * suitable buffer with bottom up approach. - * @kbuf: Buffer contents and memory parameters. - * @buf_min: Minimum address for the buffer. - * @buf_max: Maximum address for the buffer. - * @emem: Exclude memory ranges. - * - * Returns 0 on success, negative errno on error. - */ -static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf, - u64 buf_min, u64 buf_max, - const struct crash_mem *emem) +int arch_check_excluded_range(struct kimage *image, unsigned long start, + unsigned long end) { - int i, ret = 0, err = -EADDRNOTAVAIL; - u64 start, end, tmin, tmax; - - tmin = buf_min; - for (i = 0; i < emem->nr_ranges; i++) { - start = emem->ranges[i].start; - end = emem->ranges[i].end; - - if (end < tmin) - continue; - - if (start > tmin) { - tmax = (start > buf_max ? buf_max : start - 1); - ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax); - if (!ret) - return 0; - } + struct crash_mem *emem; + int i; - tmin = end + 1; + emem = image->arch.exclude_ranges; + for (i = 0; i < emem->nr_ranges; i++) + if (start < emem->ranges[i].end && end > emem->ranges[i].start) + return 1; - if (tmin > buf_max) { - ret = err; - break; - } - ret = 0; - } - - if (!ret) { - tmax = buf_max; - ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax); - } - return ret; + return 0; } +#ifdef CONFIG_CRASH_DUMP /** * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries * @um_info: Usable memory buffer and ranges info. @@ -564,11 +194,10 @@ static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm, static int add_usable_mem_property(void *fdt, struct device_node *dn, struct umem_info *um_info) { - int n_mem_addr_cells, n_mem_size_cells, node; + int node; char path[NODE_PATH_LEN]; - int i, len, ranges, ret; - const __be32 *prop; - u64 base, end; + int i, ret; + u64 base, size; of_node_get(dn); @@ -587,41 +216,30 @@ static int add_usable_mem_property(void *fdt, struct device_node *dn, goto out; } - /* Get the address & size cells */ - n_mem_addr_cells = of_n_addr_cells(dn); - n_mem_size_cells = of_n_size_cells(dn); - kexec_dprintk("address cells: %d, size cells: %d\n", n_mem_addr_cells, - n_mem_size_cells); - um_info->idx = 0; if (!check_realloc_usable_mem(um_info, 2)) { ret = -ENOMEM; goto out; } - prop = of_get_property(dn, "reg", &len); - if (!prop || len <= 0) { - ret = 0; - goto out; - } - /* * "reg" property represents sequence of (addr,size) tuples * each representing a memory range. */ - ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); - - for (i = 0; i < ranges; i++) { - base = of_read_number(prop, n_mem_addr_cells); - prop += n_mem_addr_cells; - end = base + of_read_number(prop, n_mem_size_cells) - 1; - prop += n_mem_size_cells; + for (i = 0; ; i++) { + ret = of_property_read_reg(dn, i, &base, &size); + if (ret) + break; - ret = add_usable_mem(um_info, base, end); + ret = add_usable_mem(um_info, base, base + size - 1); if (ret) goto out; } + // No reg or empty reg? Skip this node. + if (i == 0) + goto out; + /* * No kdump kernel usable memory found in this memory node. * Write (0,0) tuple in linux,usable-memory property for @@ -783,6 +401,23 @@ static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr) } } +static unsigned int kdump_extra_elfcorehdr_size(struct crash_mem *cmem) +{ +#if defined(CONFIG_CRASH_HOTPLUG) && defined(CONFIG_MEMORY_HOTPLUG) + unsigned int extra_sz = 0; + + if (CONFIG_CRASH_MAX_MEMORY_RANGES > (unsigned int)PN_XNUM) + pr_warn("Number of Phdrs %u exceeds max\n", CONFIG_CRASH_MAX_MEMORY_RANGES); + else if (cmem->nr_ranges >= CONFIG_CRASH_MAX_MEMORY_RANGES) + pr_warn("Configured crash mem ranges may not be enough\n"); + else + extra_sz = (CONFIG_CRASH_MAX_MEMORY_RANGES - cmem->nr_ranges) * sizeof(Elf64_Phdr); + + return extra_sz; +#endif + return 0; +} + /** * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr * segment needed to load kdump kernel. @@ -814,7 +449,8 @@ static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf) kbuf->buffer = headers; kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; - kbuf->bufsz = kbuf->memsz = headers_sz; + kbuf->bufsz = headers_sz; + kbuf->memsz = headers_sz + kdump_extra_elfcorehdr_size(cmem); kbuf->top_down = false; ret = kexec_add_buffer(kbuf); @@ -863,6 +499,7 @@ int load_crashdump_segments_ppc64(struct kimage *image, return 0; } +#endif /** * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global @@ -916,13 +553,18 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, if (dn) { u64 val; - of_property_read_u64(dn, "opal-base-address", &val); + ret = of_property_read_u64(dn, "opal-base-address", &val); + if (ret) + goto out; + ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val, sizeof(val), false); if (ret) goto out; - of_property_read_u64(dn, "opal-entry-address", &val); + ret = of_property_read_u64(dn, "opal-entry-address", &val); + if (ret) + goto out; ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val, sizeof(val), false); } @@ -972,26 +614,16 @@ static unsigned int cpu_node_size(void) return size; } -/** - * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to - * setup FDT for kexec/kdump kernel. - * @image: kexec image being loaded. - * - * Returns the estimated extra size needed for kexec/kdump kernel FDT. - */ -unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) +static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image, unsigned int cpu_nodes) { - unsigned int cpu_nodes, extra_size = 0; - struct device_node *dn; + unsigned int extra_size = 0; u64 usm_entries; +#ifdef CONFIG_CRASH_HOTPLUG + unsigned int possible_cpu_nodes; +#endif - // Budget some space for the password blob. There's already extra space - // for the key name - if (plpks_is_available()) - extra_size += (unsigned int)plpks_get_passwordlen(); - - if (image->type != KEXEC_TYPE_CRASH) - return extra_size; + if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH) + return 0; /* * For kdump kernel, account for linux,usable-memory and @@ -1004,106 +636,53 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) extra_size += (unsigned int)(usm_entries * sizeof(u64)); } +#ifdef CONFIG_CRASH_HOTPLUG /* - * Get the number of CPU nodes in the current DT. This allows to - * reserve places for CPU nodes added since the boot time. + * Make sure enough space is reserved to accommodate possible CPU nodes + * in the crash FDT. This allows packing possible CPU nodes which are + * not yet present in the system without regenerating the entire FDT. */ - cpu_nodes = 0; - for_each_node_by_type(dn, "cpu") { - cpu_nodes++; + if (image->type == KEXEC_TYPE_CRASH) { + possible_cpu_nodes = num_possible_cpus() / threads_per_core; + if (possible_cpu_nodes > cpu_nodes) + extra_size += (possible_cpu_nodes - cpu_nodes) * cpu_node_size(); } - - if (cpu_nodes > boot_cpu_node_count) - extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size(); +#endif return extra_size; } /** - * add_node_props - Reads node properties from device node structure and add - * them to fdt. - * @fdt: Flattened device tree of the kernel - * @node_offset: offset of the node to add a property at - * @dn: device node pointer - * - * Returns 0 on success, negative errno on error. - */ -static int add_node_props(void *fdt, int node_offset, const struct device_node *dn) -{ - int ret = 0; - struct property *pp; - - if (!dn) - return -EINVAL; - - for_each_property_of_node(dn, pp) { - ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length); - if (ret < 0) { - pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret)); - return ret; - } - } - return ret; -} - -/** - * update_cpus_node - Update cpus node of flattened device tree using of_root - * device node. - * @fdt: Flattened device tree of the kernel. + * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to + * setup FDT for kexec/kdump kernel. + * @image: kexec image being loaded. * - * Returns 0 on success, negative errno on error. + * Returns the estimated extra size needed for kexec/kdump kernel FDT. */ -static int update_cpus_node(void *fdt) +unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image, struct crash_mem *rmem) { - struct device_node *cpus_node, *dn; - int cpus_offset, cpus_subnode_offset, ret = 0; - - cpus_offset = fdt_path_offset(fdt, "/cpus"); - if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) { - pr_err("Malformed device tree: error reading /cpus node: %s\n", - fdt_strerror(cpus_offset)); - return cpus_offset; - } + struct device_node *dn; + unsigned int cpu_nodes = 0, extra_size = 0; - if (cpus_offset > 0) { - ret = fdt_del_node(fdt, cpus_offset); - if (ret < 0) { - pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret)); - return -EINVAL; - } - } + // Budget some space for the password blob. There's already extra space + // for the key name + if (plpks_is_available()) + extra_size += (unsigned int)plpks_get_passwordlen(); - /* Add cpus node to fdt */ - cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus"); - if (cpus_offset < 0) { - pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset)); - return -EINVAL; + /* Get the number of CPU nodes in the current device tree */ + for_each_node_by_type(dn, "cpu") { + cpu_nodes++; } - /* Add cpus node properties */ - cpus_node = of_find_node_by_path("/cpus"); - ret = add_node_props(fdt, cpus_offset, cpus_node); - of_node_put(cpus_node); - if (ret < 0) - return ret; + /* Consider extra space for CPU nodes added since the boot time */ + if (cpu_nodes > boot_cpu_node_count) + extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size(); - /* Loop through all subnodes of cpus and add them to fdt */ - for_each_node_by_type(dn, "cpu") { - cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name); - if (cpus_subnode_offset < 0) { - pr_err("Unable to add %s subnode: %s\n", dn->full_name, - fdt_strerror(cpus_subnode_offset)); - ret = cpus_subnode_offset; - goto out; - } + /* Consider extra space for reserved memory ranges if any */ + if (rmem->nr_ranges > 0) + extra_size += sizeof(struct fdt_reserve_entry) * rmem->nr_ranges; - ret = add_node_props(fdt, cpus_subnode_offset, dn); - if (ret < 0) - goto out; - } -out: - of_node_put(dn); - return ret; + return extra_size + kdump_extra_fdt_size_ppc64(image, cpu_nodes); } static int copy_property(void *fdt, int node_offset, const struct device_node *dn, @@ -1157,20 +736,16 @@ static int update_pci_dma_nodes(void *fdt, const char *dmapropname) * being loaded. * @image: kexec image being loaded. * @fdt: Flattened device tree for the next kernel. - * @initrd_load_addr: Address where the next initrd will be loaded. - * @initrd_len: Size of the next initrd, or 0 if there will be none. - * @cmdline: Command line for the next kernel, or NULL if there will - * be none. + * @rmem: Reserved memory ranges. * * Returns 0 on success, negative errno on error. */ -int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, - unsigned long initrd_load_addr, - unsigned long initrd_len, const char *cmdline) +int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem *rmem) { - struct crash_mem *umem = NULL, *rmem = NULL; + struct crash_mem *umem = NULL; int i, nr_ranges, ret; +#ifdef CONFIG_CRASH_DUMP /* * Restrict memory usage for kdump kernel by setting up * usable memory ranges and memory reserve map. @@ -1207,6 +782,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, goto out; } } +#endif /* Update cpus nodes information to account hotplug CPUs. */ ret = update_cpus_node(fdt); @@ -1222,10 +798,6 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, goto out; /* Update memory reserve map */ - ret = get_reserved_memory_ranges(&rmem); - if (ret) - goto out; - nr_ranges = rmem ? rmem->nr_ranges : 0; for (i = 0; i < nr_ranges; i++) { u64 base, size; @@ -1245,70 +817,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, ret = plpks_populate_fdt(fdt); out: - kfree(rmem); kfree(umem); return ret; } /** - * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal, - * tce-table, reserved-ranges & such (exclude - * memory ranges) as they can't be used for kexec - * segment buffer. Sets kbuf->mem when a suitable - * memory hole is found. - * @kbuf: Buffer contents and memory parameters. - * - * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align. - * - * Returns 0 on success, negative errno on error. - */ -int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) -{ - struct crash_mem **emem; - u64 buf_min, buf_max; - int ret; - - /* Look up the exclude ranges list while locating the memory hole */ - emem = &(kbuf->image->arch.exclude_ranges); - if (!(*emem) || ((*emem)->nr_ranges == 0)) { - pr_warn("No exclude range list. Using the default locate mem hole method\n"); - return kexec_locate_mem_hole(kbuf); - } - - buf_min = kbuf->buf_min; - buf_max = kbuf->buf_max; - /* Segments for kdump kernel should be within crashkernel region */ - if (kbuf->image->type == KEXEC_TYPE_CRASH) { - buf_min = (buf_min < crashk_res.start ? - crashk_res.start : buf_min); - buf_max = (buf_max > crashk_res.end ? - crashk_res.end : buf_max); - } - - if (buf_min > buf_max) { - pr_err("Invalid buffer min and/or max values\n"); - return -EINVAL; - } - - if (kbuf->top_down) - ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max, - *emem); - else - ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max, - *emem); - - /* Add the buffer allocated to the exclude list for the next lookup */ - if (!ret) { - add_mem_range(emem, kbuf->mem, kbuf->memsz); - sort_memory_ranges(*emem, true); - } else { - pr_err("Failed to locate memory buffer of size %lu\n", - kbuf->memsz); - } - return ret; -} - -/** * arch_kexec_kernel_image_probe - Does additional handling needed to setup * kexec segments. * @image: kexec image being loaded. diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c index 33b780049aaf..3702b0bdab14 100644 --- a/arch/powerpc/kexec/ranges.c +++ b/arch/powerpc/kexec/ranges.c @@ -20,9 +20,13 @@ #include <linux/kexec.h> #include <linux/of.h> #include <linux/slab.h> +#include <linux/memblock.h> +#include <linux/crash_core.h> #include <asm/sections.h> #include <asm/kexec_ranges.h> +#include <asm/crashdump-ppc64.h> +#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP) /** * get_max_nr_ranges - Get the max no. of ranges crash_mem structure * could hold, given the size allocated for it. @@ -234,13 +238,16 @@ int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size) return __add_mem_range(mem_ranges, base, size); } +#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */ + +#ifdef CONFIG_KEXEC_FILE /** * add_tce_mem_ranges - Adds tce-table range to the given memory ranges list. * @mem_ranges: Range list to add the memory range(s) to. * * Returns 0 on success, negative errno on error. */ -int add_tce_mem_ranges(struct crash_mem **mem_ranges) +static int add_tce_mem_ranges(struct crash_mem **mem_ranges) { struct device_node *dn = NULL; int ret = 0; @@ -279,7 +286,7 @@ int add_tce_mem_ranges(struct crash_mem **mem_ranges) * * Returns 0 on success, negative errno on error. */ -int add_initrd_mem_range(struct crash_mem **mem_ranges) +static int add_initrd_mem_range(struct crash_mem **mem_ranges) { u64 base, end; int ret; @@ -296,7 +303,6 @@ int add_initrd_mem_range(struct crash_mem **mem_ranges) return ret; } -#ifdef CONFIG_PPC_64S_HASH_MMU /** * add_htab_mem_range - Adds htab range to the given memory ranges list, * if it exists @@ -304,14 +310,18 @@ int add_initrd_mem_range(struct crash_mem **mem_ranges) * * Returns 0 on success, negative errno on error. */ -int add_htab_mem_range(struct crash_mem **mem_ranges) +static int add_htab_mem_range(struct crash_mem **mem_ranges) { + +#ifdef CONFIG_PPC_64S_HASH_MMU if (!htab_address) return 0; return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes); -} +#else + return 0; #endif +} /** * add_kernel_mem_range - Adds kernel text region to the given @@ -320,18 +330,20 @@ int add_htab_mem_range(struct crash_mem **mem_ranges) * * Returns 0 on success, negative errno on error. */ -int add_kernel_mem_range(struct crash_mem **mem_ranges) +static int add_kernel_mem_range(struct crash_mem **mem_ranges) { return add_mem_range(mem_ranges, 0, __pa(_end)); } +#endif /* CONFIG_KEXEC_FILE */ +#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP) /** * add_rtas_mem_range - Adds RTAS region to the given memory ranges list. * @mem_ranges: Range list to add the memory range to. * * Returns 0 on success, negative errno on error. */ -int add_rtas_mem_range(struct crash_mem **mem_ranges) +static int add_rtas_mem_range(struct crash_mem **mem_ranges) { struct device_node *dn; u32 base, size; @@ -356,7 +368,7 @@ int add_rtas_mem_range(struct crash_mem **mem_ranges) * * Returns 0 on success, negative errno on error. */ -int add_opal_mem_range(struct crash_mem **mem_ranges) +static int add_opal_mem_range(struct crash_mem **mem_ranges) { struct device_node *dn; u64 base, size; @@ -374,7 +386,9 @@ int add_opal_mem_range(struct crash_mem **mem_ranges) of_node_put(dn); return ret; } +#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */ +#ifdef CONFIG_KEXEC_FILE /** * add_reserved_mem_ranges - Adds "/reserved-ranges" regions exported by f/w * to the given memory ranges list. @@ -382,7 +396,7 @@ int add_opal_mem_range(struct crash_mem **mem_ranges) * * Returns 0 on success, negative errno on error. */ -int add_reserved_mem_ranges(struct crash_mem **mem_ranges) +static int add_reserved_mem_ranges(struct crash_mem **mem_ranges) { int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0; struct device_node *root = of_find_node_by_path("/"); @@ -412,3 +426,283 @@ int add_reserved_mem_ranges(struct crash_mem **mem_ranges) return ret; } + +/** + * get_reserved_memory_ranges - Get reserve memory ranges. This list includes + * memory regions that should be added to the + * memory reserve map to ensure the region is + * protected from any mischief. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +int get_reserved_memory_ranges(struct crash_mem **mem_ranges) +{ + int ret; + + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_tce_mem_ranges(mem_ranges); + if (ret) + goto out; + + ret = add_reserved_mem_ranges(mem_ranges); +out: + if (ret) + pr_err("Failed to setup reserved memory ranges\n"); + return ret; +} + +/** + * get_exclude_memory_ranges - Get exclude memory ranges. This list includes + * regions like opal/rtas, tce-table, initrd, + * kernel, htab which should be avoided while + * setting up kexec load segments. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +int get_exclude_memory_ranges(struct crash_mem **mem_ranges) +{ + int ret; + + ret = add_tce_mem_ranges(mem_ranges); + if (ret) + goto out; + + ret = add_initrd_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_htab_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_kernel_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_opal_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_reserved_mem_ranges(mem_ranges); + if (ret) + goto out; + + /* exclude memory ranges should be sorted for easy lookup */ + sort_memory_ranges(*mem_ranges, true); +out: + if (ret) + pr_err("Failed to setup exclude memory ranges\n"); + return ret; +} + +#ifdef CONFIG_CRASH_DUMP +/** + * get_usable_memory_ranges - Get usable memory ranges. This list includes + * regions like crashkernel, opal/rtas & tce-table, + * that kdump kernel could use. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +int get_usable_memory_ranges(struct crash_mem **mem_ranges) +{ + int ret; + + /* + * Early boot failure observed on guests when low memory (first memory + * block?) is not added to usable memory. So, add [0, crashk_res.end] + * instead of [crashk_res.start, crashk_res.end] to workaround it. + * Also, crashed kernel's memory must be added to reserve map to + * avoid kdump kernel from using it. + */ + ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1); + if (ret) + goto out; + + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_opal_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_tce_mem_ranges(mem_ranges); +out: + if (ret) + pr_err("Failed to setup usable memory ranges\n"); + return ret; +} +#endif /* CONFIG_CRASH_DUMP */ +#endif /* CONFIG_KEXEC_FILE */ + +#ifdef CONFIG_CRASH_DUMP +/** + * get_crash_memory_ranges - Get crash memory ranges. This list includes + * first/crashing kernel's memory regions that + * would be exported via an elfcore. + * @mem_ranges: Range list to add the memory ranges to. + * + * Returns 0 on success, negative errno on error. + */ +int get_crash_memory_ranges(struct crash_mem **mem_ranges) +{ + phys_addr_t base, end; + struct crash_mem *tmem; + u64 i; + int ret; + + for_each_mem_range(i, &base, &end) { + u64 size = end - base; + + /* Skip backup memory region, which needs a separate entry */ + if (base == BACKUP_SRC_START) { + if (size > BACKUP_SRC_SIZE) { + base = BACKUP_SRC_END + 1; + size -= BACKUP_SRC_SIZE; + } else + continue; + } + + ret = add_mem_range(mem_ranges, base, size); + if (ret) + goto out; + + /* Try merging adjacent ranges before reallocation attempt */ + if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges) + sort_memory_ranges(*mem_ranges, true); + } + + /* Reallocate memory ranges if there is no space to split ranges */ + tmem = *mem_ranges; + if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) { + tmem = realloc_mem_ranges(mem_ranges); + if (!tmem) + goto out; + } + + /* Exclude crashkernel region */ + ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end); + if (ret) + goto out; + + /* + * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL + * regions are exported to save their context at the time of + * crash, they should actually be backed up just like the + * first 64K bytes of memory. + */ + ret = add_rtas_mem_range(mem_ranges); + if (ret) + goto out; + + ret = add_opal_mem_range(mem_ranges); + if (ret) + goto out; + + /* create a separate program header for the backup region */ + ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE); + if (ret) + goto out; + + sort_memory_ranges(*mem_ranges, false); +out: + if (ret) + pr_err("Failed to setup crash memory ranges\n"); + return ret; +} + +/** + * remove_mem_range - Removes the given memory range from the range list. + * @mem_ranges: Range list to remove the memory range to. + * @base: Base address of the range to remove. + * @size: Size of the memory range to remove. + * + * (Re)allocates memory, if needed. + * + * Returns 0 on success, negative errno on error. + */ +int remove_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size) +{ + u64 end; + int ret = 0; + unsigned int i; + u64 mstart, mend; + struct crash_mem *mem_rngs = *mem_ranges; + + if (!size) + return 0; + + /* + * Memory range are stored as start and end address, use + * the same format to do remove operation. + */ + end = base + size - 1; + + for (i = 0; i < mem_rngs->nr_ranges; i++) { + mstart = mem_rngs->ranges[i].start; + mend = mem_rngs->ranges[i].end; + + /* + * Memory range to remove is not part of this range entry + * in the memory range list + */ + if (!(base >= mstart && end <= mend)) + continue; + + /* + * Memory range to remove is equivalent to this entry in the + * memory range list. Remove the range entry from the list. + */ + if (base == mstart && end == mend) { + for (; i < mem_rngs->nr_ranges - 1; i++) { + mem_rngs->ranges[i].start = mem_rngs->ranges[i+1].start; + mem_rngs->ranges[i].end = mem_rngs->ranges[i+1].end; + } + mem_rngs->nr_ranges--; + goto out; + } + /* + * Start address of the memory range to remove and the + * current memory range entry in the list is same. Just + * move the start address of the current memory range + * entry in the list to end + 1. + */ + else if (base == mstart) { + mem_rngs->ranges[i].start = end + 1; + goto out; + } + /* + * End address of the memory range to remove and the + * current memory range entry in the list is same. + * Just move the end address of the current memory + * range entry in the list to base - 1. + */ + else if (end == mend) { + mem_rngs->ranges[i].end = base - 1; + goto out; + } + /* + * Memory range to remove is not at the edge of current + * memory range entry. Split the current memory entry into + * two half. + */ + else { + mem_rngs->ranges[i].end = base - 1; + size = mem_rngs->ranges[i].end - end; + ret = add_mem_range(mem_ranges, end + 1, size); + } + } +out: + return ret; +} +#endif /* CONFIG_CRASH_DUMP */ diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S index 104c9911f406..dd86e338307d 100644 --- a/arch/powerpc/kexec/relocate_32.S +++ b/arch/powerpc/kexec/relocate_32.S @@ -348,16 +348,13 @@ write_utlb: rlwinm r10, r24, 0, 22, 27 cmpwi r10, PPC47x_TLB0_4K - bne 0f li r10, 0x1000 /* r10 = 4k */ - ANNOTATE_INTRA_FUNCTION_CALL - bl 1f + beq 0f -0: /* Defaults to 256M */ lis r10, 0x1000 - bcl 20,31,$+4 +0: bcl 20,31,$+4 1: mflr r4 addi r4, r4, (2f-1b) /* virtual address of 2f */ diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index dbfdc126bf14..2f2702c867f7 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -83,6 +83,7 @@ config KVM_BOOK3S_64_HV depends on KVM_BOOK3S_64 && PPC_POWERNV select KVM_BOOK3S_HV_POSSIBLE select KVM_GENERIC_MMU_NOTIFIER + select KVM_BOOK3S_HV_PMU select CMA help Support running unmodified book3s_64 guest kernels in @@ -171,6 +172,18 @@ config KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND those buggy L1s which saves the L2 state, at the cost of performance in all nested-capable guest entry/exit. +config KVM_BOOK3S_HV_PMU + tristate "Hypervisor Perf events for KVM Book3s-HV" + depends on KVM_BOOK3S_64_HV + help + Enable Book3s-HV Hypervisor Perf events PMU named 'kvm-hv'. These + Perf events give an overview of hypervisor performance overall + instead of a specific guests. Currently the PMU reports + L0-Hypervisor stats on a kvm-hv enabled PSeries LPAR like: + * Total/Used Guest-Heap + * Total/Used Guest Page-table Memory + * Total amount of Guest Page-table Memory reclaimed + config KVM_BOOKE_HV bool diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 8acec144120e..d79c5d1098c0 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -360,10 +360,6 @@ static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, break; } -#if 0 - printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); -#endif - if (deliver) kvmppc_inject_interrupt(vcpu, vec, 0); @@ -426,7 +422,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, - bool *writable) + bool *writable, struct page **page) { ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; gfn_t gfn = gpa >> PAGE_SHIFT; @@ -441,13 +437,14 @@ kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, kvm_pfn_t pfn; pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; - get_page(pfn_to_page(pfn)); + *page = pfn_to_page(pfn); + get_page(*page); if (writable) *writable = true; return pfn; } - return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); + return kvm_faultin_pfn(vcpu, gfn, writing, writable, page); } EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn); @@ -899,11 +896,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return kvm->arch.kvm_ops->test_age_gfn(kvm, range); } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - return kvm->arch.kvm_ops->set_spte_gfn(kvm, range); -} - int kvmppc_core_init_vm(struct kvm *kvm) { diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 58391b4b32ed..4aa2ab89afbc 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h @@ -12,7 +12,6 @@ extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm, extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); -extern bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 4b3a8d80cfa3..c7e4b62642ea 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -125,11 +125,10 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, return (u32*)pteg; } -extern char etext[]; - int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, bool iswrite) { + struct page *page; kvm_pfn_t hpaddr; u64 vpn; u64 vsid; @@ -145,7 +144,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, bool writable; /* Get host physical address for gpa */ - hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); + hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable, &page); if (is_error_noslot_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", orig_pte->raddr); @@ -232,7 +231,7 @@ next_pteg: pte = kvmppc_mmu_hpte_cache_next(vcpu); if (!pte) { - kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); + kvm_release_page_unused(page); r = -EAGAIN; goto out; } @@ -250,7 +249,7 @@ next_pteg: kvmppc_mmu_hpte_cache_map(vcpu, pte); - kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); + kvm_release_page_clean(page); out: return r; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index bc6a381b5346..be20aee6fd7d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -88,13 +88,14 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, struct hpte_cache *cpte; unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; unsigned long pfn; + struct page *page; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); /* Get host physical address for gpa */ - pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); + pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable, &page); if (is_error_noslot_pfn(pfn)) { printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", orig_pte->raddr); @@ -121,13 +122,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); - kvm_set_pfn_accessed(pfn); if (!orig_pte->may_write || !writable) rflags |= PP_RXRX; - else { + else mark_page_dirty(vcpu->kvm, gfn); - kvm_set_pfn_dirty(pfn); - } if (!orig_pte->may_execute) rflags |= HPTE_R_N; @@ -202,8 +200,10 @@ map_again: } out_unlock: + /* FIXME: Don't unconditionally pass unused=false. */ + kvm_release_faultin_page(kvm, page, false, + orig_pte->may_write && writable); spin_unlock(&kvm->mmu_lock); - kvm_release_pfn_clean(pfn); if (cpte) kvmppc_mmu_hpte_cache_free(cpte); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 2b1f0cdd8c18..f305395cf26e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -603,27 +603,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, write_ok = writing; hva = gfn_to_hva_memslot(memslot, gfn); - /* - * Do a fast check first, since __gfn_to_pfn_memslot doesn't - * do it with !atomic && !async, which is how we call it. - * We always ask for write permission since the common case - * is that the page is writable. - */ - if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) { - write_ok = true; - } else { - /* Call KVM generic code to do the slow-path check */ - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, - writing, &write_ok, NULL); - if (is_error_noslot_pfn(pfn)) - return -EFAULT; - page = NULL; - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (PageReserved(page)) - page = NULL; - } - } + pfn = __kvm_faultin_pfn(memslot, gfn, writing ? FOLL_WRITE : 0, + &write_ok, &page); + if (is_error_noslot_pfn(pfn)) + return -EFAULT; /* * Read the PTE from the process' radix tree and use that @@ -1010,18 +993,6 @@ bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) return kvm_test_age_rmapp(kvm, range->slot, range->start); } -bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) -{ - WARN_ON(range->start + 1 != range->end); - - if (kvm_is_radix(kvm)) - kvm_unmap_radix(kvm, range->slot, range->start); - else - kvm_unmap_rmapp(kvm, range->slot, range->start); - - return false; -} - static int vcpus_running(struct kvm *kvm) { return atomic_read(&kvm->arch.vcpus_running) != 0; diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 408d98f8a514..b3e6e73d6a08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -821,7 +821,7 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, - bool writing, bool kvm_ro, + bool writing, pte_t *inserted_pte, unsigned int *levelp) { struct kvm *kvm = vcpu->kvm; @@ -829,40 +829,21 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long mmu_seq; unsigned long hva, gfn = gpa >> PAGE_SHIFT; bool upgrade_write = false; - bool *upgrade_p = &upgrade_write; pte_t pte, *ptep; unsigned int shift, level; int ret; bool large_enable; + kvm_pfn_t pfn; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); - /* - * Do a fast check first, since __gfn_to_pfn_memslot doesn't - * do it with !atomic && !async, which is how we call it. - * We always ask for write permission since the common case - * is that the page is writable. - */ hva = gfn_to_hva_memslot(memslot, gfn); - if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) { - upgrade_write = true; - } else { - unsigned long pfn; - - /* Call KVM generic code to do the slow-path check */ - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, - writing, upgrade_p, NULL); - if (is_error_noslot_pfn(pfn)) - return -EFAULT; - page = NULL; - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (PageReserved(page)) - page = NULL; - } - } + pfn = __kvm_faultin_pfn(memslot, gfn, writing ? FOLL_WRITE : 0, + &upgrade_write, &page); + if (is_error_noslot_pfn(pfn)) + return -EFAULT; /* * Read the PTE from the process' radix tree and use that @@ -950,7 +931,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot; long ret; bool writing = !!(dsisr & DSISR_ISSTORE); - bool kvm_ro = false; /* Check for unusual errors */ if (dsisr & DSISR_UNSUPP_MMU) { @@ -1003,7 +983,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } - kvm_ro = true; } /* Failed to set the reference/change bits */ @@ -1021,7 +1000,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, /* Try to insert a pte */ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing, - kvm_ro, NULL, NULL); + NULL, NULL); if (ret == 0 || ret == -EAGAIN) ret = RESUME_GUEST; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index b569ebaa590e..742aa58a7c7e 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -115,23 +115,20 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, struct iommu_table_group *table_group; long i; struct kvmppc_spapr_tce_iommu_table *stit; - struct fd f; + CLASS(fd, f)(tablefd); - f = fdget(tablefd); - if (!f.file) + if (fd_empty(f)) return -EBADF; rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { - if (stt == f.file->private_data) { + if (stt == fd_file(f)->private_data) { found = true; break; } } rcu_read_unlock(); - fdput(f); - if (!found) return -EINVAL; diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 5bbfb2eed127..de126d153328 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -714,7 +714,7 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_HID1: to_book3s(vcpu)->hid[1] = spr_val; break; - case SPRN_HID2: + case SPRN_HID2_750FX: to_book3s(vcpu)->hid[2] = spr_val; break; case SPRN_HID2_GEKKO: @@ -900,7 +900,7 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val case SPRN_HID1: *spr_val = to_book3s(vcpu)->hid[1]; break; - case SPRN_HID2: + case SPRN_HID2_750FX: case SPRN_HID2_GEKKO: *spr_val = to_book3s(vcpu)->hid[2]; break; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8e86eb577eb8..7667563fb9ff 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -400,7 +400,10 @@ static inline unsigned long map_pcr_to_cap(unsigned long pcr) cap = H_GUEST_CAP_POWER9; break; case PCR_ARCH_31: - cap = H_GUEST_CAP_POWER10; + if (cpu_has_feature(CPU_FTR_P11_PVR)) + cap = H_GUEST_CAP_POWER11; + else + cap = H_GUEST_CAP_POWER10; break; default: break; @@ -415,7 +418,7 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) struct kvmppc_vcore *vc = vcpu->arch.vcore; /* We can (emulate) our own architecture version and anything older */ - if (cpu_has_feature(CPU_FTR_ARCH_31)) + if (cpu_has_feature(CPU_FTR_P11_PVR) || cpu_has_feature(CPU_FTR_ARCH_31)) host_pcr_bit = PCR_ARCH_31; else if (cpu_has_feature(CPU_FTR_ARCH_300)) host_pcr_bit = PCR_ARCH_300; @@ -1922,14 +1925,22 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, r = EMULATE_FAIL; if (cpu_has_feature(CPU_FTR_ARCH_300)) { - if (cause == FSCR_MSGP_LG) + switch (cause) { + case FSCR_MSGP_LG: r = kvmppc_emulate_doorbell_instr(vcpu); - if (cause == FSCR_PM_LG) + break; + case FSCR_PM_LG: r = kvmppc_pmu_unavailable(vcpu); - if (cause == FSCR_EBB_LG) + break; + case FSCR_EBB_LG: r = kvmppc_ebb_unavailable(vcpu); - if (cause == FSCR_TM_LG) + break; + case FSCR_TM_LG: r = kvmppc_tm_unavailable(vcpu); + break; + default: + break; + } } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL | @@ -2052,36 +2063,9 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) fallthrough; /* go to facility unavailable handler */ #endif - case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: { - u64 cause = vcpu->arch.hfscr >> 56; - - /* - * Only pass HFU interrupts to the L1 if the facility is - * permitted but disabled by the L1's HFSCR, otherwise - * the interrupt does not make sense to the L1 so turn - * it into a HEAI. - */ - if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || - (vcpu->arch.nested_hfscr & (1UL << cause))) { - ppc_inst_t pinst; - vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; - - /* - * If the fetch failed, return to guest and - * try executing it again. - */ - r = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); - vcpu->arch.emul_inst = ppc_inst_val(pinst); - if (r != EMULATE_DONE) - r = RESUME_GUEST; - else - r = RESUME_HOST; - } else { - r = RESUME_HOST; - } - + case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: + r = RESUME_HOST; break; - } case BOOK3S_INTERRUPT_HV_RM_HARD: vcpu->arch.trap = 0; @@ -2305,7 +2289,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); break; case KVM_REG_PPC_SDAR: - *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); + *val = get_reg_val(id, kvmppc_get_sdar_hv(vcpu)); break; case KVM_REG_PPC_SIER: *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0)); @@ -2349,6 +2333,15 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_DAWRX1: *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu)); break; + case KVM_REG_PPC_DEXCR: + *val = get_reg_val(id, kvmppc_get_dexcr_hv(vcpu)); + break; + case KVM_REG_PPC_HASHKEYR: + *val = get_reg_val(id, kvmppc_get_hashkeyr_hv(vcpu)); + break; + case KVM_REG_PPC_HASHPKEYR: + *val = get_reg_val(id, kvmppc_get_hashpkeyr_hv(vcpu)); + break; case KVM_REG_PPC_CIABR: *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu)); break; @@ -2540,7 +2533,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.mmcrs = set_reg_val(id, *val); break; case KVM_REG_PPC_MMCR3: - *val = get_reg_val(id, vcpu->arch.mmcr[3]); + kvmppc_set_mmcr_hv(vcpu, 3, set_reg_val(id, *val)); break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; @@ -2592,6 +2585,15 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_DAWRX1: kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP); break; + case KVM_REG_PPC_DEXCR: + kvmppc_set_dexcr_hv(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_HASHKEYR: + kvmppc_set_hashkeyr_hv(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_HASHPKEYR: + kvmppc_set_hashpkeyr_hv(vcpu, set_reg_val(id, *val)); + break; case KVM_REG_PPC_CIABR: kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val)); /* Don't allow setting breakpoints in hypervisor code */ @@ -4031,7 +4033,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* Return to whole-core mode if we split the core earlier */ if (cmd_bit) { unsigned long hid0 = mfspr(SPRN_HID0); - unsigned long loops = 0; hid0 &= ~HID0_POWER8_DYNLPARDIS; stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; @@ -4043,7 +4044,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) if (!(hid0 & stat_bit)) break; cpu_relax(); - ++loops; } split_info.do_nap = 0; } @@ -4108,6 +4108,144 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu) } } +/* Helper functions for reading L2's stats from L1's VPA */ +#ifdef CONFIG_PPC_PSERIES +static DEFINE_PER_CPU(u64, l1_to_l2_cs); +static DEFINE_PER_CPU(u64, l2_to_l1_cs); +static DEFINE_PER_CPU(u64, l2_runtime_agg); + +int kvmhv_get_l2_counters_status(void) +{ + return firmware_has_feature(FW_FEATURE_LPAR) && + get_lppaca()->l2_counters_enable; +} + +void kvmhv_set_l2_counters_status(int cpu, bool status) +{ + if (!firmware_has_feature(FW_FEATURE_LPAR)) + return; + if (status) + lppaca_of(cpu).l2_counters_enable = 1; + else + lppaca_of(cpu).l2_counters_enable = 0; +} +EXPORT_SYMBOL(kvmhv_set_l2_counters_status); + +int kvmhv_counters_tracepoint_regfunc(void) +{ + int cpu; + + for_each_present_cpu(cpu) { + kvmhv_set_l2_counters_status(cpu, true); + } + return 0; +} + +void kvmhv_counters_tracepoint_unregfunc(void) +{ + int cpu; + + for_each_present_cpu(cpu) { + kvmhv_set_l2_counters_status(cpu, false); + } +} + +static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu) +{ + struct lppaca *lp = get_lppaca(); + u64 l1_to_l2_ns, l2_to_l1_ns, l2_runtime_ns; + u64 *l1_to_l2_cs_ptr = this_cpu_ptr(&l1_to_l2_cs); + u64 *l2_to_l1_cs_ptr = this_cpu_ptr(&l2_to_l1_cs); + u64 *l2_runtime_agg_ptr = this_cpu_ptr(&l2_runtime_agg); + + l1_to_l2_ns = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb)); + l2_to_l1_ns = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb)); + l2_runtime_ns = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb)); + trace_kvmppc_vcpu_stats(vcpu, l1_to_l2_ns - *l1_to_l2_cs_ptr, + l2_to_l1_ns - *l2_to_l1_cs_ptr, + l2_runtime_ns - *l2_runtime_agg_ptr); + *l1_to_l2_cs_ptr = l1_to_l2_ns; + *l2_to_l1_cs_ptr = l2_to_l1_ns; + *l2_runtime_agg_ptr = l2_runtime_ns; + vcpu->arch.l1_to_l2_cs = l1_to_l2_ns; + vcpu->arch.l2_to_l1_cs = l2_to_l1_ns; + vcpu->arch.l2_runtime_agg = l2_runtime_ns; +} + +u64 kvmhv_get_l1_to_l2_cs_time(void) +{ + return tb_to_ns(be64_to_cpu(get_lppaca()->l1_to_l2_cs_tb)); +} +EXPORT_SYMBOL(kvmhv_get_l1_to_l2_cs_time); + +u64 kvmhv_get_l2_to_l1_cs_time(void) +{ + return tb_to_ns(be64_to_cpu(get_lppaca()->l2_to_l1_cs_tb)); +} +EXPORT_SYMBOL(kvmhv_get_l2_to_l1_cs_time); + +u64 kvmhv_get_l2_runtime_agg(void) +{ + return tb_to_ns(be64_to_cpu(get_lppaca()->l2_runtime_tb)); +} +EXPORT_SYMBOL(kvmhv_get_l2_runtime_agg); + +u64 kvmhv_get_l1_to_l2_cs_time_vcpu(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vcpu_arch *arch; + + vcpu = local_paca->kvm_hstate.kvm_vcpu; + if (vcpu) { + arch = &vcpu->arch; + return arch->l1_to_l2_cs; + } else { + return 0; + } +} +EXPORT_SYMBOL(kvmhv_get_l1_to_l2_cs_time_vcpu); + +u64 kvmhv_get_l2_to_l1_cs_time_vcpu(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vcpu_arch *arch; + + vcpu = local_paca->kvm_hstate.kvm_vcpu; + if (vcpu) { + arch = &vcpu->arch; + return arch->l2_to_l1_cs; + } else { + return 0; + } +} +EXPORT_SYMBOL(kvmhv_get_l2_to_l1_cs_time_vcpu); + +u64 kvmhv_get_l2_runtime_agg_vcpu(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vcpu_arch *arch; + + vcpu = local_paca->kvm_hstate.kvm_vcpu; + if (vcpu) { + arch = &vcpu->arch; + return arch->l2_runtime_agg; + } else { + return 0; + } +} +EXPORT_SYMBOL(kvmhv_get_l2_runtime_agg_vcpu); + +#else +int kvmhv_get_l2_counters_status(void) +{ + return 0; +} + +static void do_trace_nested_cs_time(struct kvm_vcpu *vcpu) +{ +} +#endif + static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { @@ -4116,6 +4254,11 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, int trap; long rc; + if (vcpu->arch.doorbell_request) { + vcpu->arch.doorbell_request = 0; + kvmppc_set_dpdes(vcpu, 1); + } + io = &vcpu->arch.nestedv2_io; msr = mfmsr(); @@ -4156,6 +4299,10 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, timer_rearm_host_dec(*tb); + /* Record context switch and guest_run_time data */ + if (kvmhv_get_l2_counters_status()) + do_trace_nested_cs_time(vcpu); + return trap; } @@ -4206,6 +4353,15 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns hvregs.hdec_expiry = time_limit; /* + * hvregs has the doorbell status, so zero it here which + * enables us to receive doorbells when H_ENTER_NESTED is + * in progress for this vCPU + */ + + if (vcpu->arch.doorbell_request) + vcpu->arch.doorbell_request = 0; + + /* * When setting DEC, we must always deal with irq_work_raise * via NMI vs setting DEC. The problem occurs right as we * switch into guest mode if a NMI hits and sets pending work @@ -4794,9 +4950,20 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, BOOK3S_INTERRUPT_EXTERNAL, 0); else lpcr |= LPCR_MER; + } else { + /* + * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit + * unexpectedly set - for e.g. during NMI handling when all register + * states are synchronized from L0 to L1. L1 needs to inform L0 about + * MER=1 only when there are pending external interrupts. + * In the above if check, MER bit is set if there are pending + * external interrupts. Hence, explicitly mask off MER bit + * here as otherwise it may generate spurious interrupts in L2 KVM + * causing an endless loop, which results in L2 guest getting hung. + */ + lpcr &= ~LPCR_MER; } } else if (vcpu->arch.pending_exceptions || - vcpu->arch.doorbell_request || xive_interrupt_pending(vcpu)) { vcpu->arch.ret = RESUME_HOST; goto out; @@ -4857,7 +5024,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, * entering a nested guest in which case the decrementer is now owned * by L2 and the L1 decrementer is provided in hdec_expires */ - if (!kvmhv_is_nestedv2() && kvmppc_core_pending_dec(vcpu) && + if (kvmppc_core_pending_dec(vcpu) && ((tb < kvmppc_dec_expires_host_tb(vcpu)) || (trap == BOOK3S_INTERRUPT_SYSCALL && kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED))) @@ -5874,7 +6041,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) * the underlying calls, which will EOI the interrupt in real * mode, need an HW IRQ number mapped in the XICS IRQ domain. */ - host_data = irq_domain_get_irq_data(irq_get_default_host(), host_irq); + host_data = irq_domain_get_irq_data(irq_get_default_domain(), host_irq); irq_map->r_hwirq = (unsigned int)irqd_to_hwirq(host_data); if (i == pimap->n_mapped) @@ -6364,7 +6531,6 @@ static struct kvmppc_ops kvm_ops_hv = { .unmap_gfn_range = kvm_unmap_gfn_range_hv, .age_gfn = kvm_age_gfn_hv, .test_age_gfn = kvm_test_age_gfn_hv, - .set_spte_gfn = kvm_set_spte_gfn_hv, .free_memslot = kvmppc_core_free_memslot_hv, .init_vm = kvmppc_core_init_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv, @@ -6375,10 +6541,6 @@ static struct kvmppc_ops kvm_ops_hv = { .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, .hcall_implemented = kvmppc_hcall_impl_hv, -#ifdef CONFIG_KVM_XICS - .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv, - .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv, -#endif .configure_mmu = kvmhv_configure_mmu, .get_rmmu_info = kvmhv_get_rmmu_info, .set_smt_mode = kvmhv_set_smt_mode, @@ -6496,6 +6658,22 @@ static int kvmppc_book3s_init_hv(void) return r; } +#if defined(CONFIG_KVM_XICS) + /* + * IRQ bypass is supported only for interrupts whose EOI operations are + * handled via OPAL calls. Therefore, register IRQ bypass handlers + * exclusively for PowerNV KVM when booted with 'xive=off', indicating + * the use of the emulated XICS interrupt controller. + */ + if (!kvmhv_on_pseries()) { + pr_info("KVM-HV: Enabling IRQ bypass\n"); + kvm_ops_hv.irq_bypass_add_producer = + kvmppc_irq_bypass_add_producer_hv; + kvm_ops_hv.irq_bypass_del_producer = + kvmppc_irq_bypass_del_producer_hv; + } +#endif + kvm_ops_hv.owner = THIS_MODULE; kvmppc_hv_ops = &kvm_ops_hv; @@ -6520,6 +6698,7 @@ static void kvmppc_book3s_exit_hv(void) module_init(kvmppc_book3s_init_hv); module_exit(kvmppc_book3s_exit_hv); +MODULE_DESCRIPTION("KVM on Book3S (POWER8 and later) in hypervisor mode"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS("devname:kvm"); diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h index 47b2c815641e..a404c9b221c1 100644 --- a/arch/powerpc/kvm/book3s_hv.h +++ b/arch/powerpc/kvm/book3s_hv.h @@ -116,6 +116,9 @@ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dexcr, 64, KVMPPC_GSID_DEXCR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashkeyr, 64, KVMPPC_GSID_HASHKEYR) +KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashpkeyr, 64, KVMPPC_GSID_HASHPKEYR) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT) KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR) diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 05f5220960c6..5f8c2321cfb5 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -32,7 +32,7 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) struct kvmppc_vcore *vc = vcpu->arch.vcore; hr->pcr = vc->pcr | PCR_MASK; - hr->dpdes = vc->dpdes; + hr->dpdes = vcpu->arch.doorbell_request; hr->hfscr = vcpu->arch.hfscr; hr->tb_offset = vc->tb_offset; hr->dawr0 = vcpu->arch.dawr0; @@ -105,7 +105,7 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, { struct kvmppc_vcore *vc = vcpu->arch.vcore; - hr->dpdes = vc->dpdes; + hr->dpdes = vcpu->arch.doorbell_request; hr->purr = vcpu->arch.purr; hr->spurr = vcpu->arch.spurr; hr->ic = vcpu->arch.ic; @@ -143,7 +143,7 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state * struct kvmppc_vcore *vc = vcpu->arch.vcore; vc->pcr = hr->pcr | PCR_MASK; - vc->dpdes = hr->dpdes; + vcpu->arch.doorbell_request = hr->dpdes; vcpu->arch.hfscr = hr->hfscr; vcpu->arch.dawr0 = hr->dawr0; vcpu->arch.dawrx0 = hr->dawrx0; @@ -170,7 +170,13 @@ void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, { struct kvmppc_vcore *vc = vcpu->arch.vcore; - vc->dpdes = hr->dpdes; + /* + * This L2 vCPU might have received a doorbell while H_ENTER_NESTED was being handled. + * Make sure we preserve the doorbell if it was either: + * a) Sent after H_ENTER_NESTED was called on this vCPU (arch.doorbell_request would be 1) + * b) Doorbell was not handled and L2 exited for some other reason (hr->dpdes would be 1) + */ + vcpu->arch.doorbell_request = vcpu->arch.doorbell_request | hr->dpdes; vcpu->arch.hfscr = hr->hfscr; vcpu->arch.purr = hr->purr; vcpu->arch.spurr = hr->spurr; @@ -445,6 +451,8 @@ long kvmhv_nested_init(void) if (rc == H_SUCCESS) { unsigned long capabilities = 0; + if (cpu_has_feature(CPU_FTR_P11_PVR)) + capabilities |= H_GUEST_CAP_POWER11; if (cpu_has_feature(CPU_FTR_ARCH_31)) capabilities |= H_GUEST_CAP_POWER10; if (cpu_has_feature(CPU_FTR_ARCH_300)) @@ -1527,7 +1535,6 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, unsigned long n_gpa, gpa, gfn, perm = 0UL; unsigned int shift, l1_shift, level; bool writing = !!(dsisr & DSISR_ISSTORE); - bool kvm_ro = false; long int ret; if (!gp->l1_gr_to_hr) { @@ -1607,7 +1614,6 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } - kvm_ro = true; } /* 2. Find the host pte for this L1 guest real address */ @@ -1629,7 +1635,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) { /* No suitable pte found -> try to insert a mapping */ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, - writing, kvm_ro, &pte, &level); + writing, &pte, &level); if (ret == -EAGAIN) return RESUME_GUEST; else if (ret) diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c index 8e6f5355f08b..87691cf86cae 100644 --- a/arch/powerpc/kvm/book3s_hv_nestedv2.c +++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c @@ -71,8 +71,8 @@ gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb, } if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) { - kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT, - cfg->vcpu_run_output_cfg); + rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT, + cfg->vcpu_run_output_cfg); if (rc < 0) return rc; } @@ -123,6 +123,12 @@ static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm) case KVMPPC_GSID_PROCESS_TABLE: case KVMPPC_GSID_RUN_INPUT: case KVMPPC_GSID_RUN_OUTPUT: + /* Host wide counters */ + case KVMPPC_GSID_L0_GUEST_HEAP: + case KVMPPC_GSID_L0_GUEST_HEAP_MAX: + case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE: + case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX: + case KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM: break; default: size += kvmppc_gse_total_size(kvmppc_gsid_size(iden)); @@ -193,6 +199,15 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb, case KVMPPC_GSID_DAWRX1: rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1); break; + case KVMPPC_GSID_DEXCR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dexcr); + break; + case KVMPPC_GSID_HASHKEYR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashkeyr); + break; + case KVMPPC_GSID_HASHPKEYR: + rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashpkeyr); + break; case KVMPPC_GSID_CIABR: rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr); break; @@ -311,6 +326,10 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb, rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.vcore->vtb); break; + case KVMPPC_GSID_DPDES: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.vcore->dpdes); + break; case KVMPPC_GSID_LPCR: rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.vcore->lpcr); @@ -357,7 +376,9 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb, * default to L1's PVR. */ if (!vcpu->arch.vcore->arch_compat) { - if (cpu_has_feature(CPU_FTR_ARCH_31)) + if (cpu_has_feature(CPU_FTR_P11_PVR)) + arch_compat = PVR_ARCH_31_P11; + else if (cpu_has_feature(CPU_FTR_ARCH_31)) arch_compat = PVR_ARCH_31; else if (cpu_has_feature(CPU_FTR_ARCH_300)) arch_compat = PVR_ARCH_300; @@ -441,6 +462,15 @@ static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm, case KVMPPC_GSID_DAWRX1: vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse); break; + case KVMPPC_GSID_DEXCR: + vcpu->arch.dexcr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_HASHKEYR: + vcpu->arch.hashkeyr = kvmppc_gse_get_u64(gse); + break; + case KVMPPC_GSID_HASHPKEYR: + vcpu->arch.hashpkeyr = kvmppc_gse_get_u64(gse); + break; case KVMPPC_GSID_CIABR: vcpu->arch.ciabr = kvmppc_gse_get_u64(gse); break; @@ -543,6 +573,9 @@ static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm, case KVMPPC_GSID_VTB: vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse); break; + case KVMPPC_GSID_DPDES: + vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse); + break; case KVMPPC_GSID_LPCR: vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse); break; diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index e42984878503..f2636414d82a 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -837,7 +837,7 @@ static inline void this_cpu_inc_rm(unsigned int __percpu *addr) */ static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc) { - this_cpu_inc_rm(desc->kstat_irqs); + this_cpu_inc_rm(&desc->kstat_irqs->cnt); __this_cpu_inc(kstat.irqs_sum); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ea7ad200b330..83f7504349d2 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1524,14 +1524,12 @@ kvm_flush_link_stack: /* Flush the link stack. On Power8 it's up to 32 entries in size. */ .rept 32 - ANNOTATE_INTRA_FUNCTION_CALL bl .+4 .endr /* And on Power9 it's up to 64. */ BEGIN_FTR_SECTION .rept 32 - ANNOTATE_INTRA_FUNCTION_CALL bl .+4 .endr END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 92f33115144b..3a6592a31a10 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -879,9 +879,8 @@ static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, { int ret = H_PARAMETER; - struct page *uvmem_page; + struct page *page, *uvmem_page; struct kvmppc_uvmem_page_pvt *pvt; - unsigned long pfn; unsigned long gfn = gpa >> page_shift; int srcu_idx; unsigned long uvmem_pfn; @@ -901,8 +900,8 @@ static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, retry: mutex_unlock(&kvm->arch.uvmem_lock); - pfn = gfn_to_pfn(kvm, gfn); - if (is_error_noslot_pfn(pfn)) + page = gfn_to_page(kvm, gfn); + if (!page) goto out; mutex_lock(&kvm->arch.uvmem_lock); @@ -911,16 +910,16 @@ retry: pvt = uvmem_page->zone_device_data; pvt->skip_page_out = true; pvt->remove_gfn = false; /* it continues to be a valid GFN */ - kvm_release_pfn_clean(pfn); + kvm_release_page_unused(page); goto retry; } - if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, + if (!uv_page_in(kvm->arch.lpid, page_to_pfn(page) << page_shift, gpa, 0, page_shift)) { kvmppc_gfn_shared(gfn, kvm); ret = H_SUCCESS; } - kvm_release_pfn_clean(pfn); + kvm_release_page_clean(page); mutex_unlock(&kvm->arch.uvmem_lock); out: srcu_read_unlock(&kvm->srcu, srcu_idx); @@ -1083,21 +1082,21 @@ out: int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) { - unsigned long pfn; + struct page *page; int ret = U_SUCCESS; - pfn = gfn_to_pfn(kvm, gfn); - if (is_error_noslot_pfn(pfn)) + page = gfn_to_page(kvm, gfn); + if (!page) return -EFAULT; mutex_lock(&kvm->arch.uvmem_lock); if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) goto out; - ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, - 0, PAGE_SHIFT); + ret = uv_page_in(kvm->arch.lpid, page_to_pfn(page) << PAGE_SHIFT, + gfn << PAGE_SHIFT, 0, PAGE_SHIFT); out: - kvm_release_pfn_clean(pfn); + kvm_release_page_clean(page); mutex_unlock(&kvm->arch.uvmem_lock); return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT; } diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index ce79ac33e8d3..d904e13e069b 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c @@ -92,12 +92,6 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) spin_unlock(&vcpu3s->mmu_lock); } -static void free_pte_rcu(struct rcu_head *head) -{ - struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head); - kmem_cache_free(hpte_cache, pte); -} - static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); @@ -126,7 +120,7 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) spin_unlock(&vcpu3s->mmu_lock); - call_rcu(&pte->rcu_head, free_pte_rcu); + kfree_rcu(pte, rcu_head); } static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 5b92619a05fd..83bcdc80ce51 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -461,12 +461,6 @@ static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* The page will get remapped properly on its next fault */ - return do_kvm_unmap_gfn(kvm, range); -} - /*****************************************/ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) @@ -645,29 +639,27 @@ static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) */ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) { - struct page *hpage; + struct kvm_host_map map; u64 hpage_offset; u32 *page; - int i; + int i, r; - hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); - if (is_error_page(hpage)) + r = kvm_vcpu_map(vcpu, pte->raddr >> PAGE_SHIFT, &map); + if (r) return; hpage_offset = pte->raddr & ~PAGE_MASK; hpage_offset &= ~0xFFFULL; hpage_offset /= 4; - get_page(hpage); - page = kmap_atomic(hpage); + page = map.hva; /* patch dcbz into reserved instruction, so we trap */ for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) page[i] &= cpu_to_be32(0xfffffff7); - kunmap_atomic(page); - put_page(hpage); + kvm_vcpu_unmap(vcpu, &map); } static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) @@ -2071,7 +2063,6 @@ static struct kvmppc_ops kvm_ops_pr = { .unmap_gfn_range = kvm_unmap_gfn_range_pr, .age_gfn = kvm_age_gfn_pr, .test_age_gfn = kvm_test_age_gfn_pr, - .set_spte_gfn = kvm_set_spte_gfn_pr, .free_memslot = kvmppc_core_free_memslot_pr, .init_vm = kvmppc_core_init_vm_pr, .destroy_vm = kvmppc_core_destroy_vm_pr, @@ -2118,6 +2109,7 @@ void kvmppc_book3s_exit_pr(void) module_init(kvmppc_book3s_init_pr); module_exit(kvmppc_book3s_exit_pr); +MODULE_DESCRIPTION("KVM on Book3S without using hypervisor mode"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS("devname:kvm"); diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 29a382249770..1302b5ac5672 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -531,7 +531,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) xc->cppr = xive_prio_from_guest(new_cppr); /* - * IPIs are synthetized from MFRR and thus don't need + * IPIs are synthesized from MFRR and thus don't need * any special EOI handling. The underlying interrupt * used to signal MFRR changes is EOId when fetched from * the queue. @@ -1555,7 +1555,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, struct kvmppc_xive_src_block *sb; struct kvmppc_xive_irq_state *state; struct irq_data *host_data = - irq_domain_get_irq_data(irq_get_default_host(), host_irq); + irq_domain_get_irq_data(irq_get_default_domain(), host_irq); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data); u16 idx; u8 prio; diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 6e2ebbd8aaac..d9bf1bc3ff61 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -654,7 +654,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, } page = gfn_to_page(kvm, gfn); - if (is_error_page(page)) { + if (!page) { srcu_read_unlock(&kvm->srcu, srcu_idx); pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr); return -EINVAL; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 6a5be025a8af..3401b96be475 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -572,7 +572,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, /* * Return the number of jiffies until the next timeout. If the timeout is - * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA + * longer than the TIMER_NEXT_MAX_DELTA, then return TIMER_NEXT_MAX_DELTA * because the larger value can break the timer APIs. */ static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) @@ -598,7 +598,7 @@ static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) if (do_div(nr_jiffies, tb_ticks_per_jiffy)) nr_jiffies++; - return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA); + return min_t(unsigned long long, nr_jiffies, TIMER_NEXT_MAX_DELTA); } static void arm_next_watchdog(struct kvm_vcpu *vcpu) @@ -616,19 +616,19 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu) spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); nr_jiffies = watchdog_next_timeout(vcpu); /* - * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA + * If the number of jiffies of watchdog timer >= TIMER_NEXT_MAX_DELTA * then do not run the watchdog timer as this can break timer APIs. */ - if (nr_jiffies < NEXT_TIMER_MAX_DELTA) + if (nr_jiffies < TIMER_NEXT_MAX_DELTA) mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); else - del_timer(&vcpu->arch.wdt_timer); + timer_delete(&vcpu->arch.wdt_timer); spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); } static void kvmppc_watchdog_func(struct timer_list *t) { - struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer); + struct kvm_vcpu *vcpu = timer_container_of(vcpu, t, arch.wdt_timer); u32 tsr, new_tsr; int final; @@ -1441,7 +1441,7 @@ int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) { - del_timer_sync(&vcpu->arch.wdt_timer); + timer_delete_sync(&vcpu->arch.wdt_timer); } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 6d0d329cbb35..f9acf866c709 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -34,6 +34,8 @@ enum vcpu_ftr { #define E500_TLB_BITMAP (1 << 30) /* TLB1 entry is mapped by host TLB0 */ #define E500_TLB_TLB0 (1 << 29) +/* entry is writable on the host */ +#define E500_TLB_WRITABLE (1 << 28) /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */ #define E500_TLB_MAS2_ATTR (0x7f) diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index ccb8f16ffe41..06caf8bbbe2b 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -45,11 +45,14 @@ static inline unsigned int tlb1_max_shadow_size(void) return host_tlb_params[1].entries - tlbcam_index - 1; } -static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) +static inline u32 e500_shadow_mas3_attrib(u32 mas3, bool writable, int usermode) { /* Mask off reserved bits. */ mas3 &= MAS3_ATTRIB_MASK; + if (!writable) + mas3 &= ~(MAS3_UW|MAS3_SW); + #ifndef CONFIG_KVM_BOOKE_HV if (!usermode) { /* Guest is in supervisor mode, @@ -244,19 +247,16 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, - kvm_pfn_t pfn, unsigned int wimg) + kvm_pfn_t pfn, unsigned int wimg, + bool writable) { ref->pfn = pfn; ref->flags = E500_TLB_VALID; + if (writable) + ref->flags |= E500_TLB_WRITABLE; /* Use guest supplied MAS2_G and MAS2_E */ ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; - - /* Mark the page accessed */ - kvm_set_pfn_accessed(pfn); - - if (tlbe_is_writable(gtlbe)) - kvm_set_pfn_dirty(pfn); } static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) @@ -309,6 +309,7 @@ static void kvmppc_e500_setup_stlbe( { kvm_pfn_t pfn = ref->pfn; u32 pr = vcpu->arch.shared->msr & MSR_PR; + bool writable = !!(ref->flags & E500_TLB_WRITABLE); BUG_ON(!(ref->flags & E500_TLB_VALID)); @@ -316,7 +317,7 @@ static void kvmppc_e500_setup_stlbe( stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | - e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); + e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr); } static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, @@ -325,18 +326,19 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, struct tlbe_ref *ref) { struct kvm_memory_slot *slot; - unsigned long pfn = 0; /* silence GCC warning */ + unsigned int psize; + unsigned long pfn; + struct page *page = NULL; unsigned long hva; - int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; int ret = 0; unsigned long mmu_seq; struct kvm *kvm = vcpu_e500->vcpu.kvm; - unsigned long tsize_pages = 0; pte_t *ptep; unsigned int wimg = 0; pgd_t *pgdir; unsigned long flags; + bool writable = false; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; @@ -353,110 +355,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); hva = gfn_to_hva_memslot(slot, gfn); - if (tlbsel == 1) { - struct vm_area_struct *vma; - mmap_read_lock(kvm->mm); - - vma = find_vma(kvm->mm, hva); - if (vma && hva >= vma->vm_start && - (vma->vm_flags & VM_PFNMAP)) { - /* - * This VMA is a physically contiguous region (e.g. - * /dev/mem) that bypasses normal Linux page - * management. Find the overlap between the - * vma and the memslot. - */ - - unsigned long start, end; - unsigned long slot_start, slot_end; - - pfnmap = 1; - - start = vma->vm_pgoff; - end = start + - vma_pages(vma); - - pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); - - slot_start = pfn - (gfn - slot->base_gfn); - slot_end = slot_start + slot->npages; - - if (start < slot_start) - start = slot_start; - if (end > slot_end) - end = slot_end; - - tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> - MAS1_TSIZE_SHIFT; - - /* - * e500 doesn't implement the lowest tsize bit, - * or 1K pages. - */ - tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); - - /* - * Now find the largest tsize (up to what the guest - * requested) that will cover gfn, stay within the - * range, and for which gfn and pfn are mutually - * aligned. - */ - - for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { - unsigned long gfn_start, gfn_end; - tsize_pages = 1UL << (tsize - 2); - - gfn_start = gfn & ~(tsize_pages - 1); - gfn_end = gfn_start + tsize_pages; - - if (gfn_start + pfn - gfn < start) - continue; - if (gfn_end + pfn - gfn > end) - continue; - if ((gfn & (tsize_pages - 1)) != - (pfn & (tsize_pages - 1))) - continue; - - gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); - pfn &= ~(tsize_pages - 1); - break; - } - } else if (vma && hva >= vma->vm_start && - is_vm_hugetlb_page(vma)) { - unsigned long psize = vma_kernel_pagesize(vma); - - tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> - MAS1_TSIZE_SHIFT; - - /* - * Take the largest page size that satisfies both host - * and guest mapping - */ - tsize = min(__ilog2(psize) - 10, tsize); - - /* - * e500 doesn't implement the lowest tsize bit, - * or 1K pages. - */ - tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); - } - - mmap_read_unlock(kvm->mm); - } - - if (likely(!pfnmap)) { - tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT); - pfn = gfn_to_pfn_memslot(slot, gfn); - if (is_error_noslot_pfn(pfn)) { - if (printk_ratelimit()) - pr_err("%s: real page not found for gfn %lx\n", - __func__, (long)gfn); - return -EINVAL; - } - - /* Align guest and physical address to page map boundaries */ - pfn &= ~(tsize_pages - 1); - gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); + pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &page); + if (is_error_noslot_pfn(pfn)) { + if (printk_ratelimit()) + pr_err("%s: real page not found for gfn %lx\n", + __func__, (long)gfn); + return -EINVAL; } spin_lock(&kvm->mmu_lock); @@ -474,14 +378,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, * can't run hence pfn won't change. */ local_irq_save(flags); - ptep = find_linux_pte(pgdir, hva, NULL, NULL); + ptep = find_linux_pte(pgdir, hva, NULL, &psize); if (ptep) { pte_t pte = READ_ONCE(*ptep); if (pte_present(pte)) { wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; - local_irq_restore(flags); } else { local_irq_restore(flags); pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n", @@ -490,20 +393,79 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, goto out; } } - kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); + local_irq_restore(flags); + + if (psize && tlbsel == 1) { + unsigned long psize_pages, tsize_pages; + unsigned long start, end; + unsigned long slot_start, slot_end; + + psize_pages = 1UL << (psize - PAGE_SHIFT); + start = pfn & ~(psize_pages - 1); + end = start + psize_pages; + + slot_start = pfn - (gfn - slot->base_gfn); + slot_end = slot_start + slot->npages; + + if (start < slot_start) + start = slot_start; + if (end > slot_end) + end = slot_end; + + tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> + MAS1_TSIZE_SHIFT; + + /* + * Any page size that doesn't satisfy the host mapping + * will fail the start and end tests. + */ + tsize = min(psize - PAGE_SHIFT + BOOK3E_PAGESZ_4K, tsize); + + /* + * e500 doesn't implement the lowest tsize bit, + * or 1K pages. + */ + tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); + + /* + * Now find the largest tsize (up to what the guest + * requested) that will cover gfn, stay within the + * range, and for which gfn and pfn are mutually + * aligned. + */ + + for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { + unsigned long gfn_start, gfn_end; + tsize_pages = 1UL << (tsize - 2); + + gfn_start = gfn & ~(tsize_pages - 1); + gfn_end = gfn_start + tsize_pages; + + if (gfn_start + pfn - gfn < start) + continue; + if (gfn_end + pfn - gfn > end) + continue; + if ((gfn & (tsize_pages - 1)) != + (pfn & (tsize_pages - 1))) + continue; + + gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); + pfn &= ~(tsize_pages - 1); + break; + } + } + kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg, writable); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); + writable = tlbe_is_writable(stlbe); /* Clear i-cache for new pages */ kvmppc_mmu_flush_icache(pfn); out: + kvm_release_faultin_page(kvm, page, !!ret, writable); spin_unlock(&kvm->mmu_lock); - - /* Drop refcount on page, so that mmu notifiers can clear it */ - kvm_release_pfn_clean(pfn); - return ret; } @@ -747,12 +709,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* The page will get remapped properly on its next fault */ - return kvm_e500_mmu_unmap_gfn(kvm, range); -} - /*****************************************/ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) diff --git a/arch/powerpc/kvm/guest-state-buffer.c b/arch/powerpc/kvm/guest-state-buffer.c index b80dbc58621f..871cf60ddeb6 100644 --- a/arch/powerpc/kvm/guest-state-buffer.c +++ b/arch/powerpc/kvm/guest-state-buffer.c @@ -92,6 +92,10 @@ static int kvmppc_gsid_class(u16 iden) (iden <= KVMPPC_GSE_GUESTWIDE_END)) return KVMPPC_GS_CLASS_GUESTWIDE; + if ((iden >= KVMPPC_GSE_HOSTWIDE_START) && + (iden <= KVMPPC_GSE_HOSTWIDE_END)) + return KVMPPC_GS_CLASS_HOSTWIDE; + if ((iden >= KVMPPC_GSE_META_START) && (iden <= KVMPPC_GSE_META_END)) return KVMPPC_GS_CLASS_META; @@ -118,6 +122,21 @@ static int kvmppc_gsid_type(u16 iden) int type = -1; switch (kvmppc_gsid_class(iden)) { + case KVMPPC_GS_CLASS_HOSTWIDE: + switch (iden) { + case KVMPPC_GSID_L0_GUEST_HEAP: + fallthrough; + case KVMPPC_GSID_L0_GUEST_HEAP_MAX: + fallthrough; + case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE: + fallthrough; + case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX: + fallthrough; + case KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM: + type = KVMPPC_GSE_BE64; + break; + } + break; case KVMPPC_GS_CLASS_GUESTWIDE: switch (iden) { case KVMPPC_GSID_HOST_STATE_SIZE: @@ -187,6 +206,9 @@ unsigned long kvmppc_gsid_flags(u16 iden) case KVMPPC_GS_CLASS_GUESTWIDE: flags = KVMPPC_GS_FLAGS_WIDE; break; + case KVMPPC_GS_CLASS_HOSTWIDE: + flags = KVMPPC_GS_FLAGS_HOST_WIDE; + break; case KVMPPC_GS_CLASS_META: case KVMPPC_GS_CLASS_DWORD_REG: case KVMPPC_GS_CLASS_WORD_REG: @@ -310,6 +332,13 @@ static inline int kvmppc_gse_flatten_iden(u16 iden) bit += KVMPPC_GSE_GUESTWIDE_COUNT; + if (class == KVMPPC_GS_CLASS_HOSTWIDE) { + bit += iden - KVMPPC_GSE_HOSTWIDE_START; + return bit; + } + + bit += KVMPPC_GSE_HOSTWIDE_COUNT; + if (class == KVMPPC_GS_CLASS_META) { bit += iden - KVMPPC_GSE_META_START; return bit; @@ -356,6 +385,12 @@ static inline u16 kvmppc_gse_unflatten_iden(int bit) } bit -= KVMPPC_GSE_GUESTWIDE_COUNT; + if (bit < KVMPPC_GSE_HOSTWIDE_COUNT) { + iden = KVMPPC_GSE_HOSTWIDE_START + bit; + return iden; + } + bit -= KVMPPC_GSE_HOSTWIDE_COUNT; + if (bit < KVMPPC_GSE_META_COUNT) { iden = KVMPPC_GSE_META_START + bit; return iden; @@ -588,6 +623,8 @@ int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags) if (flags & KVMPPC_GS_FLAGS_WIDE) hflags |= H_GUEST_FLAGS_WIDE; + if (flags & KVMPPC_GS_FLAGS_HOST_WIDE) + hflags |= H_GUEST_FLAGS_HOST_WIDE; rc = plpar_guest_set_state(hflags, gsb->guest_id, gsb->vcpu_id, __pa(gsb->hdr), gsb->capacity, &i); @@ -613,6 +650,8 @@ int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags) if (flags & KVMPPC_GS_FLAGS_WIDE) hflags |= H_GUEST_FLAGS_WIDE; + if (flags & KVMPPC_GS_FLAGS_HOST_WIDE) + hflags |= H_GUEST_FLAGS_HOST_WIDE; rc = plpar_guest_get_state(hflags, gsb->guest_id, gsb->vcpu_id, __pa(gsb->hdr), gsb->capacity, &i); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index d32abe7fe6ab..153587741864 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -550,12 +550,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: + fallthrough; case KVM_CAP_SPAPR_TCE_64: - r = 1; - break; case KVM_CAP_SPAPR_TCE_VFIO: - r = !!cpu_has_feature(CPU_FTR_HVMODE); - break; case KVM_CAP_PPC_RTAS: case KVM_CAP_PPC_FIXUP_HCALL: case KVM_CAP_PPC_ENABLE_HCALL: @@ -612,9 +609,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 8 | 4 | 2 | 1; } break; - case KVM_CAP_PPC_RMA: - r = 0; - break; case KVM_CAP_PPC_HWRNG: r = kvmppc_hwrng_present(); break; @@ -769,8 +763,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { int err; - hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); - vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; + hrtimer_setup(&vcpu->arch.dec_timer, kvmppc_decrementer_wakeup, CLOCK_REALTIME, + HRTIMER_MODE_ABS); #ifdef CONFIG_KVM_EXIT_TIMING mutex_init(&vcpu->arch.exit_timing_lock); @@ -1852,7 +1846,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_sigset_activate(vcpu); - if (run->immediate_exit) + if (!vcpu->wants_to_run) r = -EINTR; else r = kvmppc_vcpu_run(vcpu); @@ -1933,54 +1927,48 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, #endif #ifdef CONFIG_KVM_MPIC case KVM_CAP_IRQ_MPIC: { - struct fd f; + CLASS(fd, f)(cap->args[0]); struct kvm_device *dev; r = -EBADF; - f = fdget(cap->args[0]); - if (!f.file) + if (fd_empty(f)) break; r = -EPERM; - dev = kvm_device_from_filp(f.file); + dev = kvm_device_from_filp(fd_file(f)); if (dev) r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); - fdput(f); break; } #endif #ifdef CONFIG_KVM_XICS case KVM_CAP_IRQ_XICS: { - struct fd f; + CLASS(fd, f)(cap->args[0]); struct kvm_device *dev; r = -EBADF; - f = fdget(cap->args[0]); - if (!f.file) + if (fd_empty(f)) break; r = -EPERM; - dev = kvm_device_from_filp(f.file); + dev = kvm_device_from_filp(fd_file(f)); if (dev) { if (xics_on_xive()) r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); else r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); } - - fdput(f); break; } #endif /* CONFIG_KVM_XICS */ #ifdef CONFIG_KVM_XIVE case KVM_CAP_PPC_IRQ_XIVE: { - struct fd f; + CLASS(fd, f)(cap->args[0]); struct kvm_device *dev; r = -EBADF; - f = fdget(cap->args[0]); - if (!f.file) + if (fd_empty(f)) break; r = -ENXIO; @@ -1988,12 +1976,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, break; r = -EPERM; - dev = kvm_device_from_filp(f.file); + dev = kvm_device_from_filp(fd_file(f)); if (dev) r = kvmppc_xive_native_connect_vcpu(dev, vcpu, cap->args[1]); - - fdput(f); break; } #endif /* CONFIG_KVM_XIVE */ diff --git a/arch/powerpc/kvm/test-guest-state-buffer.c b/arch/powerpc/kvm/test-guest-state-buffer.c index 4720b8dc8837..5ccca306997a 100644 --- a/arch/powerpc/kvm/test-guest-state-buffer.c +++ b/arch/powerpc/kvm/test-guest-state-buffer.c @@ -5,6 +5,7 @@ #include <kunit/test.h> #include <asm/guest-state-buffer.h> +#include <asm/kvm_ppc.h> static void test_creating_buffer(struct kunit *test) { @@ -141,6 +142,16 @@ static void test_gs_bitmap(struct kunit *test) i++; } + for (u16 iden = KVMPPC_GSID_L0_GUEST_HEAP; + iden <= KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM; iden++) { + kvmppc_gsbm_set(&gsbm, iden); + kvmppc_gsbm_set(&gsbm1, iden); + KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); + kvmppc_gsbm_clear(&gsbm, iden); + KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden)); + i++; + } + for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA; iden++) { kvmppc_gsbm_set(&gsbm, iden); @@ -151,7 +162,7 @@ static void test_gs_bitmap(struct kunit *test) i++; } - for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) { + for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSE_DW_REGS_END; iden++) { kvmppc_gsbm_set(&gsbm, iden); kvmppc_gsbm_set(&gsbm1, iden); KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); @@ -309,12 +320,215 @@ static void test_gs_msg(struct kunit *test) kvmppc_gsm_free(gsm); } +/* Test data struct for hostwide/L0 counters */ +struct kvmppc_gs_msg_test_hostwide_data { + u64 guest_heap; + u64 guest_heap_max; + u64 guest_pgtable_size; + u64 guest_pgtable_size_max; + u64 guest_pgtable_reclaim; +}; + +static size_t test_hostwide_get_size(struct kvmppc_gs_msg *gsm) + +{ + size_t size = 0; + u16 ids[] = { + KVMPPC_GSID_L0_GUEST_HEAP, + KVMPPC_GSID_L0_GUEST_HEAP_MAX, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, + KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM + }; + + for (int i = 0; i < ARRAY_SIZE(ids); i++) + size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i])); + return size; +} + +static int test_hostwide_fill_info(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm) +{ + struct kvmppc_gs_msg_test_hostwide_data *data = gsm->data; + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_HEAP, + data->guest_heap); + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_HEAP_MAX, + data->guest_heap_max); + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, + data->guest_pgtable_size); + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, + data->guest_pgtable_size_max); + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM, + data->guest_pgtable_reclaim); + + return 0; +} + +static int test_hostwide_refresh_info(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb) +{ + struct kvmppc_gs_parser gsp = { 0 }; + struct kvmppc_gs_msg_test_hostwide_data *data = gsm->data; + struct kvmppc_gs_elem *gse; + int rc; + + rc = kvmppc_gse_parse(&gsp, gsb); + if (rc < 0) + return rc; + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP); + if (gse) + data->guest_heap = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + if (gse) + data->guest_heap_max = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + if (gse) + data->guest_pgtable_size = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + if (gse) + data->guest_pgtable_size_max = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + if (gse) + data->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse); + + return 0; +} + +static struct kvmppc_gs_msg_ops gs_msg_test_hostwide_ops = { + .get_size = test_hostwide_get_size, + .fill_info = test_hostwide_fill_info, + .refresh_info = test_hostwide_refresh_info, +}; + +static void test_gs_hostwide_msg(struct kunit *test) +{ + struct kvmppc_gs_msg_test_hostwide_data test_data = { + .guest_heap = 0xdeadbeef, + .guest_heap_max = ~0ULL, + .guest_pgtable_size = 0xff, + .guest_pgtable_size_max = 0xffffff, + .guest_pgtable_reclaim = 0xdeadbeef, + }; + struct kvmppc_gs_msg *gsm; + struct kvmppc_gs_buff *gsb; + + gsm = kvmppc_gsm_new(&gs_msg_test_hostwide_ops, &test_data, GSM_SEND, + GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm); + + gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP); + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + + kvmppc_gsm_fill_info(gsm, gsb); + + memset(&test_data, 0, sizeof(test_data)); + + kvmppc_gsm_refresh_info(gsm, gsb); + KUNIT_EXPECT_EQ(test, test_data.guest_heap, 0xdeadbeef); + KUNIT_EXPECT_EQ(test, test_data.guest_heap_max, ~0ULL); + KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_size, 0xff); + KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_size_max, 0xffffff); + KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_reclaim, 0xdeadbeef); + + kvmppc_gsm_free(gsm); +} + +/* Test if the H_GUEST_GET_STATE for hostwide counters works */ +static void test_gs_hostwide_counters(struct kunit *test) +{ + struct kvmppc_gs_msg_test_hostwide_data test_data; + struct kvmppc_gs_parser gsp = { 0 }; + + struct kvmppc_gs_msg *gsm; + struct kvmppc_gs_buff *gsb; + struct kvmppc_gs_elem *gse; + int rc; + + if (!kvmhv_on_pseries()) + kunit_skip(test, "This test need a kmv-hv guest"); + + gsm = kvmppc_gsm_new(&gs_msg_test_hostwide_ops, &test_data, GSM_SEND, + GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm); + + gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + + kvmppc_gsm_fill_info(gsm, gsb); + + /* With HOST_WIDE flags guestid and vcpuid will be ignored */ + rc = kvmppc_gsb_recv(gsb, KVMPPC_GS_FLAGS_HOST_WIDE); + KUNIT_ASSERT_EQ(test, rc, 0); + + /* Parse the guest state buffer is successful */ + rc = kvmppc_gse_parse(&gsp, gsb); + KUNIT_ASSERT_EQ(test, rc, 0); + + /* Parse the GSB and get the counters */ + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 Heap counter missing"); + kunit_info(test, "Guest Heap Size=%llu bytes", + kvmppc_gse_get_u64(gse)); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 Heap counter max missing"); + kunit_info(test, "Guest Heap Size Max=%llu bytes", + kvmppc_gse_get_u64(gse)); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table size missing"); + kunit_info(test, "Guest Page-table Size=%llu bytes", + kvmppc_gse_get_u64(gse)); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table size-max missing"); + kunit_info(test, "Guest Page-table Size Max=%llu bytes", + kvmppc_gse_get_u64(gse)); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table reclaim size missing"); + kunit_info(test, "Guest Page-table Reclaim Size=%llu bytes", + kvmppc_gse_get_u64(gse)); + + kvmppc_gsm_free(gsm); + kvmppc_gsb_free(gsb); +} + static struct kunit_case guest_state_buffer_testcases[] = { KUNIT_CASE(test_creating_buffer), KUNIT_CASE(test_adding_element), KUNIT_CASE(test_gs_bitmap), KUNIT_CASE(test_gs_parsing), KUNIT_CASE(test_gs_msg), + KUNIT_CASE(test_gs_hostwide_msg), + KUNIT_CASE(test_gs_hostwide_counters), {} }; @@ -325,4 +539,5 @@ static struct kunit_suite guest_state_buffer_test_suite = { kunit_test_suites(&guest_state_buffer_test_suite); +MODULE_DESCRIPTION("KUnit tests for Guest State Buffer APIs"); MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h index 45817ab82bb4..14b0e23f601f 100644 --- a/arch/powerpc/kvm/timing.h +++ b/arch/powerpc/kvm/timing.h @@ -38,11 +38,7 @@ static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {} static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) { /* type has to be known at build time for optimization */ - - /* The BUILD_BUG_ON below breaks in funny ways, commented out - * for now ... -BenH BUILD_BUG_ON(!__builtin_constant_p(type)); - */ switch (type) { case EXT_INTR_EXITS: vcpu->stat.ext_intr_exits++; diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h index 8d57c8428531..35fccaa575cc 100644 --- a/arch/powerpc/kvm/trace_hv.h +++ b/arch/powerpc/kvm/trace_hv.h @@ -512,6 +512,35 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, __entry->vcpu_id, __entry->exit, __entry->ret) ); +#ifdef CONFIG_PPC_PSERIES + +TRACE_EVENT_FN_COND(kvmppc_vcpu_stats, + TP_PROTO(struct kvm_vcpu *vcpu, u64 l1_to_l2_cs, u64 l2_to_l1_cs, u64 l2_runtime), + + TP_ARGS(vcpu, l1_to_l2_cs, l2_to_l1_cs, l2_runtime), + + TP_CONDITION(l1_to_l2_cs || l2_to_l1_cs || l2_runtime), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(u64, l1_to_l2_cs) + __field(u64, l2_to_l1_cs) + __field(u64, l2_runtime) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->l1_to_l2_cs = l1_to_l2_cs; + __entry->l2_to_l1_cs = l2_to_l1_cs; + __entry->l2_runtime = l2_runtime; + ), + + TP_printk("VCPU %d: l1_to_l2_cs_time=%llu ns l2_to_l1_cs_time=%llu ns l2_runtime=%llu ns", + __entry->vcpu_id, __entry->l1_to_l2_cs, + __entry->l2_to_l1_cs, __entry->l2_runtime), + kvmhv_counters_tracepoint_regfunc, kvmhv_counters_tracepoint_unregfunc +); +#endif #endif /* _TRACE_KVM_HV_H */ /* This part must be outside protection */ diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 0ab65eeb93ee..481f968e42c7 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -3,7 +3,7 @@ # Makefile for ppc-specific library files.. # -ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) +obj-y += crypto/ CFLAGS_code-patching.o += -fno-stack-protector CFLAGS_feature-fixups.o += -fno-stack-protector @@ -80,4 +80,10 @@ CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec) # Enable <altivec.h> CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include) +obj-$(CONFIG_CRC32_ARCH) += crc32-powerpc.o +crc32-powerpc-y := crc32.o crc32c-vpmsum_asm.o + +obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-powerpc.o +crc-t10dif-powerpc-y := crc-t10dif.o crct10dif-vpmsum_asm.o + obj-$(CONFIG_PPC64) += $(obj64-y) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index c6ab46156cda..f84e0337cc02 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -17,18 +17,17 @@ #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/page.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/inst.h> -static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) +static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword) { - if (!ppc_inst_prefixed(instr)) { - u32 val = ppc_inst_val(instr); + if (!IS_ENABLED(CONFIG_PPC64) || likely(!is_dword)) { + /* For big endian correctness: plain address would use the wrong half */ + u32 val32 = val; - __put_kernel_nofault(patch_addr, &val, u32, failed); + __put_kernel_nofault(patch_addr, &val32, u32, failed); } else { - u64 val = ppc_inst_as_ulong(instr); - __put_kernel_nofault(patch_addr, &val, u64, failed); } @@ -44,7 +43,10 @@ failed: int raw_patch_instruction(u32 *addr, ppc_inst_t instr) { - return __patch_instruction(addr, instr, addr); + if (ppc_inst_prefixed(instr)) + return __patch_mem(addr, ppc_inst_as_ulong(instr), addr, true); + else + return __patch_mem(addr, ppc_inst_val(instr), addr, false); } struct patch_context { @@ -106,7 +108,7 @@ static int text_area_cpu_up(unsigned int cpu) unsigned long addr; int err; - area = get_vm_area(PAGE_SIZE, VM_ALLOC); + area = get_vm_area(PAGE_SIZE, 0); if (!area) { WARN_ONCE(1, "Failed to create text area for cpu %d\n", cpu); @@ -225,7 +227,7 @@ void __init poking_init(void) static unsigned long get_patch_pfn(void *addr) { - if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) + if (IS_ENABLED(CONFIG_EXECMEM) && is_vmalloc_or_module_addr(addr)) return vmalloc_to_pfn(addr); else return __pa_symbol(addr) >> PAGE_SHIFT; @@ -276,7 +278,7 @@ static void unmap_patch_area(unsigned long addr) flush_tlb_kernel_range(addr, addr + PAGE_SIZE); } -static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) +static int __do_patch_mem_mm(void *addr, unsigned long val, bool is_dword) { int err; u32 *patch_addr; @@ -305,7 +307,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) orig_mm = start_using_temp_mm(patching_mm); - err = __patch_instruction(addr, instr, patch_addr); + err = __patch_mem(addr, val, patch_addr, is_dword); /* context synchronisation performed by __patch_instruction (isync or exception) */ stop_using_temp_mm(patching_mm, orig_mm); @@ -322,7 +324,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) return err; } -static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) +static int __do_patch_mem(void *addr, unsigned long val, bool is_dword) { int err; u32 *patch_addr; @@ -339,7 +341,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) if (radix_enabled()) asm volatile("ptesync": : :"memory"); - err = __patch_instruction(addr, instr, patch_addr); + err = __patch_mem(addr, val, patch_addr, is_dword); pte_clear(&init_mm, text_poke_addr, pte); flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); @@ -347,7 +349,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) return err; } -int patch_instruction(u32 *addr, ppc_inst_t instr) +static int patch_mem(void *addr, unsigned long val, bool is_dword) { int err; unsigned long flags; @@ -359,22 +361,83 @@ int patch_instruction(u32 *addr, ppc_inst_t instr) */ if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) || !static_branch_likely(&poking_init_done)) - return raw_patch_instruction(addr, instr); + return __patch_mem(addr, val, addr, is_dword); local_irq_save(flags); if (mm_patch_enabled()) - err = __do_patch_instruction_mm(addr, instr); + err = __do_patch_mem_mm(addr, val, is_dword); else - err = __do_patch_instruction(addr, instr); + err = __do_patch_mem(addr, val, is_dword); local_irq_restore(flags); return err; } + +#ifdef CONFIG_PPC64 + +int patch_instruction(u32 *addr, ppc_inst_t instr) +{ + if (ppc_inst_prefixed(instr)) + return patch_mem(addr, ppc_inst_as_ulong(instr), true); + else + return patch_mem(addr, ppc_inst_val(instr), false); +} NOKPROBE_SYMBOL(patch_instruction); +int patch_uint(void *addr, unsigned int val) +{ + if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int))) + return -EINVAL; + + return patch_mem(addr, val, false); +} +NOKPROBE_SYMBOL(patch_uint); + +int patch_ulong(void *addr, unsigned long val) +{ + if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long))) + return -EINVAL; + + return patch_mem(addr, val, true); +} +NOKPROBE_SYMBOL(patch_ulong); + +#else + +int patch_instruction(u32 *addr, ppc_inst_t instr) +{ + return patch_mem(addr, ppc_inst_val(instr), false); +} +NOKPROBE_SYMBOL(patch_instruction) + +#endif + +static int patch_memset64(u64 *addr, u64 val, size_t count) +{ + for (u64 *end = addr + count; addr < end; addr++) + __put_kernel_nofault(addr, &val, u64, failed); + + return 0; + +failed: + return -EPERM; +} + +static int patch_memset32(u32 *addr, u32 val, size_t count) +{ + for (u32 *end = addr + count; addr < end; addr++) + __put_kernel_nofault(addr, &val, u32, failed); + + return 0; + +failed: + return -EPERM; +} + static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr) { unsigned long start = (unsigned long)patch_addr; + int err; /* Repeat instruction */ if (repeat_instr) { @@ -383,19 +446,19 @@ static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool rep if (ppc_inst_prefixed(instr)) { u64 val = ppc_inst_as_ulong(instr); - memset64((u64 *)patch_addr, val, len / 8); + err = patch_memset64((u64 *)patch_addr, val, len / 8); } else { u32 val = ppc_inst_val(instr); - memset32(patch_addr, val, len / 4); + err = patch_memset32(patch_addr, val, len / 4); } } else { - memcpy(patch_addr, code, len); + err = copy_to_kernel_nofault(patch_addr, code, len); } smp_wmb(); /* smp write barrier */ flush_icache_range(start, start + len); - return 0; + return err; } /* @@ -430,7 +493,9 @@ static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool rep orig_mm = start_using_temp_mm(patching_mm); + kasan_disable_current(); err = __patch_instructions(patch_addr, code, len, repeat_instr); + kasan_enable_current(); /* context synchronisation performed by __patch_instructions */ stop_using_temp_mm(patching_mm, orig_mm); diff --git a/arch/powerpc/lib/crc-t10dif.c b/arch/powerpc/lib/crc-t10dif.c new file mode 100644 index 000000000000..be23ded3a9df --- /dev/null +++ b/arch/powerpc/lib/crc-t10dif.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Calculate a CRC T10-DIF with vpmsum acceleration + * + * Copyright 2017, Daniel Axtens, IBM Corporation. + * [based on crc32c-vpmsum_glue.c] + */ + +#include <asm/switch_to.h> +#include <crypto/internal/simd.h> +#include <linux/cpufeature.h> +#include <linux/crc-t10dif.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/preempt.h> +#include <linux/uaccess.h> + +#define VMX_ALIGN 16 +#define VMX_ALIGN_MASK (VMX_ALIGN-1) + +#define VECTOR_BREAKPOINT 64 + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_vec_crypto); + +u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len); + +u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len) +{ + unsigned int prealign; + unsigned int tail; + u32 crc = crci; + + if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || + !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable()) + return crc_t10dif_generic(crc, p, len); + + if ((unsigned long)p & VMX_ALIGN_MASK) { + prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); + crc = crc_t10dif_generic(crc, p, prealign); + len -= prealign; + p += prealign; + } + + if (len & ~VMX_ALIGN_MASK) { + crc <<= 16; + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + crc = __crct10dif_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); + disable_kernel_altivec(); + pagefault_enable(); + preempt_enable(); + crc >>= 16; + } + + tail = len & VMX_ALIGN_MASK; + if (tail) { + p += len & ~VMX_ALIGN_MASK; + crc = crc_t10dif_generic(crc, p, tail); + } + + return crc & 0xffff; +} +EXPORT_SYMBOL(crc_t10dif_arch); + +static int __init crc_t10dif_powerpc_init(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_207S) && + (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) + static_branch_enable(&have_vec_crypto); + return 0; +} +subsys_initcall(crc_t10dif_powerpc_init); + +static void __exit crc_t10dif_powerpc_exit(void) +{ +} +module_exit(crc_t10dif_powerpc_exit); + +MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>"); +MODULE_DESCRIPTION("CRCT10DIF using vector polynomial multiply-sum instructions"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/crypto/crc32-vpmsum_core.S b/arch/powerpc/lib/crc-vpmsum-template.S index b0f87f595b26..b0f87f595b26 100644 --- a/arch/powerpc/crypto/crc32-vpmsum_core.S +++ b/arch/powerpc/lib/crc-vpmsum-template.S diff --git a/arch/powerpc/lib/crc32.c b/arch/powerpc/lib/crc32.c new file mode 100644 index 000000000000..0d9befb6e7b8 --- /dev/null +++ b/arch/powerpc/lib/crc32.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <asm/switch_to.h> +#include <crypto/internal/simd.h> +#include <linux/cpufeature.h> +#include <linux/crc32.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/preempt.h> +#include <linux/uaccess.h> + +#define VMX_ALIGN 16 +#define VMX_ALIGN_MASK (VMX_ALIGN-1) + +#define VECTOR_BREAKPOINT 512 + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_vec_crypto); + +u32 __crc32c_vpmsum(u32 crc, const u8 *p, size_t len); + +u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) +{ + return crc32_le_base(crc, p, len); +} +EXPORT_SYMBOL(crc32_le_arch); + +u32 crc32c_arch(u32 crc, const u8 *p, size_t len) +{ + unsigned int prealign; + unsigned int tail; + + if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || + !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable()) + return crc32c_base(crc, p, len); + + if ((unsigned long)p & VMX_ALIGN_MASK) { + prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); + crc = crc32c_base(crc, p, prealign); + len -= prealign; + p += prealign; + } + + if (len & ~VMX_ALIGN_MASK) { + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); + disable_kernel_altivec(); + pagefault_enable(); + preempt_enable(); + } + + tail = len & VMX_ALIGN_MASK; + if (tail) { + p += len & ~VMX_ALIGN_MASK; + crc = crc32c_base(crc, p, tail); + } + + return crc; +} +EXPORT_SYMBOL(crc32c_arch); + +u32 crc32_be_arch(u32 crc, const u8 *p, size_t len) +{ + return crc32_be_base(crc, p, len); +} +EXPORT_SYMBOL(crc32_be_arch); + +static int __init crc32_powerpc_init(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_207S) && + (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) + static_branch_enable(&have_vec_crypto); + return 0; +} +subsys_initcall(crc32_powerpc_init); + +static void __exit crc32_powerpc_exit(void) +{ +} +module_exit(crc32_powerpc_exit); + +u32 crc32_optimizations(void) +{ + if (static_key_enabled(&have_vec_crypto)) + return CRC32C_OPTIMIZATION; + return 0; +} +EXPORT_SYMBOL(crc32_optimizations); + +MODULE_AUTHOR("Anton Blanchard <anton@samba.org>"); +MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/crypto/crc32c-vpmsum_asm.S b/arch/powerpc/lib/crc32c-vpmsum_asm.S index bf442004ea1f..1b35c55cce0a 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_asm.S +++ b/arch/powerpc/lib/crc32c-vpmsum_asm.S @@ -839,4 +839,4 @@ #define CRC_FUNCTION_NAME __crc32c_vpmsum #define REFLECT -#include "crc32-vpmsum_core.S" +#include "crc-vpmsum-template.S" diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_asm.S b/arch/powerpc/lib/crct10dif-vpmsum_asm.S index f0b93a0fe168..47a6266d89a8 100644 --- a/arch/powerpc/crypto/crct10dif-vpmsum_asm.S +++ b/arch/powerpc/lib/crct10dif-vpmsum_asm.S @@ -842,4 +842,4 @@ .octa 0x0000000000000000000000018bb70000 #define CRC_FUNCTION_NAME __crct10dif_vpmsum -#include "crc32-vpmsum_core.S" +#include "crc-vpmsum-template.S" diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S index 7e5e1c28e56a..8967903c15e9 100644 --- a/arch/powerpc/lib/crtsavres.S +++ b/arch/powerpc/lib/crtsavres.S @@ -46,7 +46,7 @@ .section ".text" -#ifndef CONFIG_PPC64 +#ifndef __powerpc64__ /* Routines for saving integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ diff --git a/arch/powerpc/lib/crypto/Kconfig b/arch/powerpc/lib/crypto/Kconfig new file mode 100644 index 000000000000..3f9e1bbd9905 --- /dev/null +++ b/arch/powerpc/lib/crypto/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_CHACHA20_P10 + tristate + depends on PPC64 && CPU_LITTLE_ENDIAN && VSX + default CRYPTO_LIB_CHACHA + select CRYPTO_LIB_CHACHA_GENERIC + select CRYPTO_ARCH_HAVE_LIB_CHACHA + +config CRYPTO_POLY1305_P10 + tristate + depends on PPC64 && CPU_LITTLE_ENDIAN && VSX + depends on BROKEN # Needs to be fixed to work in softirq context + default CRYPTO_LIB_POLY1305 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 + select CRYPTO_LIB_POLY1305_GENERIC + +config CRYPTO_SHA256_PPC_SPE + tristate + depends on SPE + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 diff --git a/arch/powerpc/lib/crypto/Makefile b/arch/powerpc/lib/crypto/Makefile new file mode 100644 index 000000000000..27f231f8e334 --- /dev/null +++ b/arch/powerpc/lib/crypto/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o +chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o + +obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o +poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o + +obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o +sha256-ppc-spe-y := sha256.o sha256-spe-asm.o diff --git a/arch/powerpc/lib/crypto/chacha-p10-glue.c b/arch/powerpc/lib/crypto/chacha-p10-glue.c new file mode 100644 index 000000000000..fcd23c6f1590 --- /dev/null +++ b/arch/powerpc/lib/crypto/chacha-p10-glue.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ChaCha stream cipher (P10 accelerated) + * + * Copyright 2023- IBM Corp. All rights reserved. + */ + +#include <crypto/chacha.h> +#include <crypto/internal/simd.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/cpufeature.h> +#include <linux/sizes.h> +#include <asm/simd.h> +#include <asm/switch_to.h> + +asmlinkage void chacha_p10le_8x(const struct chacha_state *state, u8 *dst, + const u8 *src, unsigned int len, int nrounds); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10); + +static void vsx_begin(void) +{ + preempt_disable(); + enable_kernel_vsx(); +} + +static void vsx_end(void) +{ + disable_kernel_vsx(); + preempt_enable(); +} + +static void chacha_p10_do_8x(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + unsigned int l = bytes & ~0x0FF; + + if (l > 0) { + chacha_p10le_8x(state, dst, src, l, nrounds); + bytes -= l; + src += l; + dst += l; + state->x[12] += l / CHACHA_BLOCK_SIZE; + } + + if (bytes > 0) + chacha_crypt_generic(state, dst, src, bytes, nrounds); +} + +void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) +{ + hchacha_block_generic(state, out, nrounds); +} +EXPORT_SYMBOL(hchacha_block_arch); + +void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE || + !crypto_simd_usable()) + return chacha_crypt_generic(state, dst, src, bytes, nrounds); + + do { + unsigned int todo = min_t(unsigned int, bytes, SZ_4K); + + vsx_begin(); + chacha_p10_do_8x(state, dst, src, todo, nrounds); + vsx_end(); + + bytes -= todo; + src += todo; + dst += todo; + } while (bytes); +} +EXPORT_SYMBOL(chacha_crypt_arch); + +bool chacha_is_arch_optimized(void) +{ + return static_key_enabled(&have_p10); +} +EXPORT_SYMBOL(chacha_is_arch_optimized); + +static int __init chacha_p10_init(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + static_branch_enable(&have_p10); + return 0; +} +subsys_initcall(chacha_p10_init); + +static void __exit chacha_p10_exit(void) +{ +} +module_exit(chacha_p10_exit); + +MODULE_DESCRIPTION("ChaCha stream cipher (P10 accelerated)"); +MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/powerpc/crypto/chacha-p10le-8x.S b/arch/powerpc/lib/crypto/chacha-p10le-8x.S index 17bedb66b822..b29562bd5d40 100644 --- a/arch/powerpc/crypto/chacha-p10le-8x.S +++ b/arch/powerpc/lib/crypto/chacha-p10le-8x.S @@ -7,9 +7,6 @@ #=================================================================================== # Written by Danny Tsen <dtsen@us.ibm.com> # -# chacha_p10le_8x(u32 *state, byte *dst, const byte *src, -# size_t len, int nrounds); -# # do rounds, 8 quarter rounds # 1. a += b; d ^= a; d <<<= 16; # 2. c += d; b ^= c; b <<<= 12; @@ -575,7 +572,8 @@ .endm # -# chacha20_p10le_8x(u32 *state, byte *dst, const byte *src, size_t len, int nrounds); +# void chacha_p10le_8x(const struct chacha_state *state, u8 *dst, const u8 *src, +# unsigned int len, int nrounds); # SYM_FUNC_START(chacha_p10le_8x) .align 5 diff --git a/arch/powerpc/lib/crypto/poly1305-p10-glue.c b/arch/powerpc/lib/crypto/poly1305-p10-glue.c new file mode 100644 index 000000000000..3f1664a724b6 --- /dev/null +++ b/arch/powerpc/lib/crypto/poly1305-p10-glue.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Poly1305 authenticator algorithm, RFC7539. + * + * Copyright 2023- IBM Corp. All rights reserved. + */ +#include <asm/switch_to.h> +#include <crypto/internal/poly1305.h> +#include <linux/cpufeature.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/unaligned.h> + +asmlinkage void poly1305_p10le_4blocks(struct poly1305_block_state *state, const u8 *m, u32 mlen); +asmlinkage void poly1305_64s(struct poly1305_block_state *state, const u8 *m, u32 mlen, int highbit); +asmlinkage void poly1305_emit_64(const struct poly1305_state *state, const u32 nonce[4], u8 digest[POLY1305_DIGEST_SIZE]); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10); + +static void vsx_begin(void) +{ + preempt_disable(); + enable_kernel_vsx(); +} + +static void vsx_end(void) +{ + disable_kernel_vsx(); + preempt_enable(); +} + +void poly1305_block_init_arch(struct poly1305_block_state *dctx, + const u8 raw_key[POLY1305_BLOCK_SIZE]) +{ + if (!static_key_enabled(&have_p10)) + return poly1305_block_init_generic(dctx, raw_key); + + dctx->h = (struct poly1305_state){}; + dctx->core_r.key.r64[0] = get_unaligned_le64(raw_key + 0); + dctx->core_r.key.r64[1] = get_unaligned_le64(raw_key + 8); +} +EXPORT_SYMBOL_GPL(poly1305_block_init_arch); + +void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src, + unsigned int len, u32 padbit) +{ + if (!static_key_enabled(&have_p10)) + return poly1305_blocks_generic(state, src, len, padbit); + vsx_begin(); + if (len >= POLY1305_BLOCK_SIZE * 4) { + poly1305_p10le_4blocks(state, src, len); + src += len - (len % (POLY1305_BLOCK_SIZE * 4)); + len %= POLY1305_BLOCK_SIZE * 4; + } + while (len >= POLY1305_BLOCK_SIZE) { + poly1305_64s(state, src, POLY1305_BLOCK_SIZE, padbit); + len -= POLY1305_BLOCK_SIZE; + src += POLY1305_BLOCK_SIZE; + } + vsx_end(); +} +EXPORT_SYMBOL_GPL(poly1305_blocks_arch); + +void poly1305_emit_arch(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], + const u32 nonce[4]) +{ + if (!static_key_enabled(&have_p10)) + return poly1305_emit_generic(state, digest, nonce); + poly1305_emit_64(state, nonce, digest); +} +EXPORT_SYMBOL_GPL(poly1305_emit_arch); + +bool poly1305_is_arch_optimized(void) +{ + return static_key_enabled(&have_p10); +} +EXPORT_SYMBOL(poly1305_is_arch_optimized); + +static int __init poly1305_p10_init(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + static_branch_enable(&have_p10); + return 0; +} +subsys_initcall(poly1305_p10_init); + +static void __exit poly1305_p10_exit(void) +{ +} +module_exit(poly1305_p10_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>"); +MODULE_DESCRIPTION("Optimized Poly1305 for P10"); diff --git a/arch/powerpc/crypto/poly1305-p10le_64.S b/arch/powerpc/lib/crypto/poly1305-p10le_64.S index a3c1987f1ecd..a3c1987f1ecd 100644 --- a/arch/powerpc/crypto/poly1305-p10le_64.S +++ b/arch/powerpc/lib/crypto/poly1305-p10le_64.S diff --git a/arch/powerpc/crypto/sha256-spe-asm.S b/arch/powerpc/lib/crypto/sha256-spe-asm.S index cd99d71dae34..cd99d71dae34 100644 --- a/arch/powerpc/crypto/sha256-spe-asm.S +++ b/arch/powerpc/lib/crypto/sha256-spe-asm.S diff --git a/arch/powerpc/lib/crypto/sha256.c b/arch/powerpc/lib/crypto/sha256.c new file mode 100644 index 000000000000..6b0f079587eb --- /dev/null +++ b/arch/powerpc/lib/crypto/sha256.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-256 Secure Hash Algorithm, SPE optimized + * + * Based on generic implementation. The assembler module takes care + * about the SPE registers so it can run from interrupt context. + * + * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> + */ + +#include <asm/switch_to.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/preempt.h> + +/* + * MAX_BYTES defines the number of bytes that are allowed to be processed + * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000 + * operations per 64 bytes. e500 cores can issue two arithmetic instructions + * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2). + * Thus 1KB of input data will need an estimated maximum of 18,000 cycles. + * Headroom for cache misses included. Even with the low end model clocked + * at 667 MHz this equals to a critical time window of less than 27us. + * + */ +#define MAX_BYTES 1024 + +extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks); + +static void spe_begin(void) +{ + /* We just start SPE operations and will save SPE registers later. */ + preempt_disable(); + enable_kernel_spe(); +} + +static void spe_end(void) +{ + disable_kernel_spe(); + /* reenable preemption */ + preempt_enable(); +} + +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + do { + /* cut input data into smaller blocks */ + u32 unit = min_t(size_t, nblocks, + MAX_BYTES / SHA256_BLOCK_SIZE); + + spe_begin(); + ppc_spe_sha256_transform(state, data, unit); + spe_end(); + + data += unit * SHA256_BLOCK_SIZE; + nblocks -= unit; + } while (nblocks); +} +EXPORT_SYMBOL_GPL(sha256_blocks_arch); + +bool sha256_is_arch_optimized(void) +{ + return true; +} +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm, SPE optimized"); diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 4f82581ca203..587c8cf1230f 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -16,7 +16,7 @@ #include <linux/sched/mm.h> #include <linux/stop_machine.h> #include <asm/cputable.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/interrupt.h> #include <asm/page.h> #include <asm/sections.h> @@ -25,6 +25,13 @@ #include <asm/firmware.h> #include <asm/inst.h> +/* + * Used to generate warnings if mmu or cpu feature check functions that + * use static keys before they are initialized. + */ +bool static_key_feature_checks_initialized __read_mostly; +EXPORT_SYMBOL_GPL(static_key_feature_checks_initialized); + struct fixup_entry { unsigned long mask; unsigned long value; @@ -679,6 +686,7 @@ void __init setup_feature_keys(void) jump_label_init(); cpu_feature_keys_init(); mmu_feature_keys_init(); + static_key_feature_checks_initialized = true; } static int __init check_features(void) diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c index 5de4dd549f6e..bcc7e4dff8c3 100644 --- a/arch/powerpc/lib/qspinlock.c +++ b/arch/powerpc/lib/qspinlock.c @@ -697,7 +697,15 @@ again: } release: - qnodesp->count--; /* release the node */ + /* + * Clear the lock before releasing the node, as another CPU might see stale + * values if an interrupt occurs after we increment qnodesp->count + * but before node->lock is initialized. The barrier ensures that + * there are no further stores to the node after it has been released. + */ + node->lock = NULL; + barrier(); + qnodesp->count--; } void queued_spin_lock_slowpath(struct qspinlock *lock) diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index e65f3fb68d06..ac3ee19531d8 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -780,8 +780,8 @@ static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, #endif /* __powerpc64 */ #ifdef CONFIG_VSX -void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, - const void *mem, bool rev) +static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, + const void *mem, bool rev) { int size, read_size; int i, j; @@ -863,11 +863,9 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, break; } } -EXPORT_SYMBOL_GPL(emulate_vsx_load); -NOKPROBE_SYMBOL(emulate_vsx_load); -void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, - void *mem, bool rev) +static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, + void *mem, bool rev) { int size, write_size; int i, j; @@ -955,8 +953,6 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, break; } } -EXPORT_SYMBOL_GPL(emulate_vsx_store); -NOKPROBE_SYMBOL(emulate_vsx_store); static nokprobe_inline int do_vsx_load(struct instruction_op *op, unsigned long ea, struct pt_regs *regs, diff --git a/arch/powerpc/lib/test-code-patching.c b/arch/powerpc/lib/test-code-patching.c index c44823292f73..1440d99630b3 100644 --- a/arch/powerpc/lib/test-code-patching.c +++ b/arch/powerpc/lib/test-code-patching.c @@ -6,7 +6,7 @@ #include <linux/vmalloc.h> #include <linux/init.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> static int __init instr_is_branch_to_addr(const u32 *instr, unsigned long addr) { @@ -347,6 +347,137 @@ static void __init test_prefixed_patching(void) check(!memcmp(iptr, expected, sizeof(expected))); } +static void __init test_multi_instruction_patching(void) +{ + u32 code[32]; + void *buf; + u32 *addr32; + u64 *addr64; + ppc_inst_t inst64 = ppc_inst_prefix(OP_PREFIX << 26 | 3UL << 24, PPC_RAW_TRAP()); + u32 inst32 = PPC_RAW_NOP(); + + buf = vzalloc(PAGE_SIZE * 8); + check(buf); + if (!buf) + return; + + /* Test single page 32-bit repeated instruction */ + addr32 = buf + PAGE_SIZE; + check(!patch_instructions(addr32 + 1, &inst32, 12, true)); + + check(addr32[0] == 0); + check(addr32[1] == inst32); + check(addr32[2] == inst32); + check(addr32[3] == inst32); + check(addr32[4] == 0); + + /* Test single page 64-bit repeated instruction */ + if (IS_ENABLED(CONFIG_PPC64)) { + check(ppc_inst_prefixed(inst64)); + + addr64 = buf + PAGE_SIZE * 2; + ppc_inst_write(code, inst64); + check(!patch_instructions((u32 *)(addr64 + 1), code, 24, true)); + + check(addr64[0] == 0); + check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[1]), inst64)); + check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[2]), inst64)); + check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[3]), inst64)); + check(addr64[4] == 0); + } + + /* Test single page memcpy */ + addr32 = buf + PAGE_SIZE * 3; + + for (int i = 0; i < ARRAY_SIZE(code); i++) + code[i] = i + 1; + + check(!patch_instructions(addr32 + 1, code, sizeof(code), false)); + + check(addr32[0] == 0); + check(!memcmp(&addr32[1], code, sizeof(code))); + check(addr32[ARRAY_SIZE(code) + 1] == 0); + + /* Test multipage 32-bit repeated instruction */ + addr32 = buf + PAGE_SIZE * 4 - 8; + check(!patch_instructions(addr32 + 1, &inst32, 12, true)); + + check(addr32[0] == 0); + check(addr32[1] == inst32); + check(addr32[2] == inst32); + check(addr32[3] == inst32); + check(addr32[4] == 0); + + /* Test multipage 64-bit repeated instruction */ + if (IS_ENABLED(CONFIG_PPC64)) { + check(ppc_inst_prefixed(inst64)); + + addr64 = buf + PAGE_SIZE * 5 - 8; + ppc_inst_write(code, inst64); + check(!patch_instructions((u32 *)(addr64 + 1), code, 24, true)); + + check(addr64[0] == 0); + check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[1]), inst64)); + check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[2]), inst64)); + check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[3]), inst64)); + check(addr64[4] == 0); + } + + /* Test multipage memcpy */ + addr32 = buf + PAGE_SIZE * 6 - 12; + + for (int i = 0; i < ARRAY_SIZE(code); i++) + code[i] = i + 1; + + check(!patch_instructions(addr32 + 1, code, sizeof(code), false)); + + check(addr32[0] == 0); + check(!memcmp(&addr32[1], code, sizeof(code))); + check(addr32[ARRAY_SIZE(code) + 1] == 0); + + vfree(buf); +} + +static void __init test_data_patching(void) +{ + void *buf; + u32 *addr32; + + buf = vzalloc(PAGE_SIZE); + check(buf); + if (!buf) + return; + + addr32 = buf + 128; + + addr32[1] = 0xA0A1A2A3; + addr32[2] = 0xB0B1B2B3; + + check(!patch_uint(&addr32[1], 0xC0C1C2C3)); + + check(addr32[0] == 0); + check(addr32[1] == 0xC0C1C2C3); + check(addr32[2] == 0xB0B1B2B3); + check(addr32[3] == 0); + + /* Unaligned patch_ulong() should fail */ + if (IS_ENABLED(CONFIG_PPC64)) + check(patch_ulong(&addr32[1], 0xD0D1D2D3) == -EINVAL); + + check(!patch_ulong(&addr32[2], 0xD0D1D2D3)); + + check(addr32[0] == 0); + check(addr32[1] == 0xC0C1C2C3); + check(*(unsigned long *)(&addr32[2]) == 0xD0D1D2D3); + + if (!IS_ENABLED(CONFIG_PPC64)) + check(addr32[3] == 0); + + check(addr32[4] == 0); + + vfree(buf); +} + static int __init test_code_patching(void) { pr_info("Running code patching self-tests ...\n"); @@ -356,6 +487,8 @@ static int __init test_code_patching(void) test_create_function_call(); test_translate_branch(); test_prefixed_patching(); + test_multi_instruction_patching(); + test_data_patching(); return 0; } diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 23c7805fb7b3..66b5b4fa1686 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -11,7 +11,7 @@ #include <asm/cpu_has_feature.h> #include <asm/sstep.h> #include <asm/ppc-opcode.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/inst.h> #define MAX_SUBTESTS 16 diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c index d491da8d1838..54340912398f 100644 --- a/arch/powerpc/lib/vmx-helper.c +++ b/arch/powerpc/lib/vmx-helper.c @@ -45,7 +45,7 @@ int exit_vmx_usercopy(void) * set and we are preemptible. The hack here is to schedule a * decrementer to fire here and reschedule for us if necessary. */ - if (IS_ENABLED(CONFIG_PREEMPT) && need_resched()) + if (need_irq_preemption() && need_resched()) set_dec(1); return 0; } diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 503a6e249940..8c1582b2987d 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -3,8 +3,6 @@ # Makefile for the linux ppc-specific parts of the memory manager. # -ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) - obj-y := fault.o mem.o pgtable.o maccess.o pageattr.o \ init_$(BITS).o pgtable_$(BITS).o \ pgtable-frag.o ioremap.o ioremap_$(BITS).o \ @@ -17,5 +15,5 @@ obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump/ +obj-$(CONFIG_PTDUMP) += ptdump/ obj-$(CONFIG_KASAN) += kasan/ diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 5445587bfe84..be9c4106e22f 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -25,7 +25,7 @@ #include <asm/mmu.h> #include <asm/machdep.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/sections.h> #include <mm/mmu_decl.h> @@ -184,7 +184,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) static bool is_module_segment(unsigned long addr) { - if (!IS_ENABLED(CONFIG_MODULES)) + if (!IS_ENABLED(CONFIG_EXECMEM)) return false; if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M)) return false; @@ -193,7 +193,7 @@ static bool is_module_segment(unsigned long addr) return true; } -void mmu_mark_initmem_nx(void) +int mmu_mark_initmem_nx(void) { int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; @@ -223,6 +223,8 @@ void mmu_mark_initmem_nx(void) update_bats(); + BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, SZ_256M) < TASK_SIZE); + for (i = TASK_SIZE >> 28; i < 16; i++) { /* Do not set NX on VM space for modules */ if (is_module_segment(i << 28)) @@ -230,9 +232,10 @@ void mmu_mark_initmem_nx(void) mtsr(mfsr(i << 28) | 0x10000000, i << 28); } + return 0; } -void mmu_mark_rodata_ro(void) +int mmu_mark_rodata_ro(void) { int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; @@ -245,6 +248,8 @@ void mmu_mark_rodata_ro(void) } update_bats(); + + return 0; } /* @@ -372,10 +377,7 @@ void __init MMU_init_hw(void) * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); - Hash = memblock_alloc(Hash_size, Hash_size); - if (!Hash) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, Hash_size, Hash_size); + Hash = memblock_alloc_or_panic(Hash_size, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; pr_info("Total memory = %lldMB; using %ldkB for hash table\n", diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile index cad2abc1730f..33af5795856a 100644 --- a/arch/powerpc/mm/book3s64/Makefile +++ b/arch/powerpc/mm/book3s64/Makefile @@ -1,7 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := $(NO_MINIMAL_TOC) - obj-y += mmu_context.o pgtable.o trace.o ifdef CONFIG_PPC_64S_HASH_MMU CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE) diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c index 430d1d935a7c..e9e2dd70c060 100644 --- a/arch/powerpc/mm/book3s64/hash_native.c +++ b/arch/powerpc/mm/book3s64/hash_native.c @@ -27,8 +27,6 @@ #include <asm/ppc-opcode.h> #include <asm/feature-fixups.h> -#include <misc/cxl-base.h> - #ifdef DEBUG_LOW #define DBG_LOW(fmt...) udbg_printf(fmt) #else @@ -217,11 +215,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) static inline void tlbie(unsigned long vpn, int psize, int apsize, int ssize, int local) { - unsigned int use_local; + unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); - use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); - if (use_local) use_local = mmu_psize_defs[psize].tlbiel; if (lock_tlbie && !use_local) @@ -789,10 +785,6 @@ static void native_flush_hash_range(unsigned long number, int local) unsigned long psize = batch->psize; int ssize = batch->ssize; int i; - unsigned int use_local; - - use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && - mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); local_irq_save(flags); @@ -827,7 +819,8 @@ static void native_flush_hash_range(unsigned long number, int local) } pte_iterate_hashed_end(); } - if (use_local) { + if (mmu_has_feature(MMU_FTR_TLBIEL) && + mmu_psize_defs[psize].tlbiel && local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { vpn = batch->vpn[i]; diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 01c3b4b65241..5158aefe4873 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -40,6 +40,7 @@ #include <linux/random.h> #include <linux/elf-randomize.h> #include <linux/of_fdt.h> +#include <linux/kfence.h> #include <asm/interrupt.h> #include <asm/processor.h> @@ -55,9 +56,9 @@ #include <asm/cacheflush.h> #include <asm/cputable.h> #include <asm/sections.h> -#include <asm/copro.h> +#include <asm/spu.h> #include <asm/udbg.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/fadump.h> #include <asm/firmware.h> #include <asm/tm.h> @@ -66,6 +67,7 @@ #include <asm/pte-walk.h> #include <asm/asm-prototypes.h> #include <asm/ultravisor.h> +#include <asm/kfence.h> #include <mm/mmu_decl.h> @@ -123,9 +125,7 @@ EXPORT_SYMBOL_GPL(mmu_slb_size); #ifdef CONFIG_PPC_64K_PAGES int mmu_ci_restrictions; #endif -static u8 *linear_map_hash_slots; -static unsigned long linear_map_hash_count; -struct mmu_hash_ops mmu_hash_ops; +struct mmu_hash_ops mmu_hash_ops __ro_after_init; EXPORT_SYMBOL(mmu_hash_ops); /* @@ -273,6 +273,270 @@ void hash__tlbiel_all(unsigned int action) WARN(1, "%s called on pre-POWER7 CPU\n", __func__); } +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) +static void kernel_map_linear_page(unsigned long vaddr, unsigned long idx, + u8 *slots, raw_spinlock_t *lock) +{ + unsigned long hash; + unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); + unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); + unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY); + long ret; + + hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); + + /* Don't create HPTE entries for bad address */ + if (!vsid) + return; + + if (slots[idx] & 0x80) + return; + + ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, + HPTE_V_BOLTED, + mmu_linear_psize, mmu_kernel_ssize); + + BUG_ON (ret < 0); + raw_spin_lock(lock); + BUG_ON(slots[idx] & 0x80); + slots[idx] = ret | 0x80; + raw_spin_unlock(lock); +} + +static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long idx, + u8 *slots, raw_spinlock_t *lock) +{ + unsigned long hash, hslot, slot; + unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); + unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); + + hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); + raw_spin_lock(lock); + if (!(slots[idx] & 0x80)) { + raw_spin_unlock(lock); + return; + } + hslot = slots[idx] & 0x7f; + slots[idx] = 0; + raw_spin_unlock(lock); + if (hslot & _PTEIDX_SECONDARY) + hash = ~hash; + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hslot & _PTEIDX_GROUP_IX; + mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize, + mmu_linear_psize, + mmu_kernel_ssize, 0); +} +#endif + +static inline bool hash_supports_debug_pagealloc(void) +{ + unsigned long max_hash_count = ppc64_rma_size / 4; + unsigned long linear_map_count = memblock_end_of_DRAM() >> PAGE_SHIFT; + + if (!debug_pagealloc_enabled() || linear_map_count > max_hash_count) + return false; + return true; +} + +#ifdef CONFIG_DEBUG_PAGEALLOC +static u8 *linear_map_hash_slots; +static unsigned long linear_map_hash_count; +static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); +static void hash_debug_pagealloc_alloc_slots(void) +{ + if (!hash_supports_debug_pagealloc()) + return; + + linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; + linear_map_hash_slots = memblock_alloc_try_nid( + linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, + ppc64_rma_size, NUMA_NO_NODE); + if (!linear_map_hash_slots) + panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", + __func__, linear_map_hash_count, &ppc64_rma_size); +} + +static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, + int slot) +{ + if (!debug_pagealloc_enabled() || !linear_map_hash_count) + return; + if ((paddr >> PAGE_SHIFT) < linear_map_hash_count) + linear_map_hash_slots[paddr >> PAGE_SHIFT] = slot | 0x80; +} + +static int hash_debug_pagealloc_map_pages(struct page *page, int numpages, + int enable) +{ + unsigned long flags, vaddr, lmi; + int i; + + if (!debug_pagealloc_enabled() || !linear_map_hash_count) + return 0; + + local_irq_save(flags); + for (i = 0; i < numpages; i++, page++) { + vaddr = (unsigned long)page_address(page); + lmi = __pa(vaddr) >> PAGE_SHIFT; + if (lmi >= linear_map_hash_count) + continue; + if (enable) + kernel_map_linear_page(vaddr, lmi, + linear_map_hash_slots, &linear_map_hash_lock); + else + kernel_unmap_linear_page(vaddr, lmi, + linear_map_hash_slots, &linear_map_hash_lock); + } + local_irq_restore(flags); + return 0; +} + +#else /* CONFIG_DEBUG_PAGEALLOC */ +static inline void hash_debug_pagealloc_alloc_slots(void) {} +static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) {} +static int __maybe_unused +hash_debug_pagealloc_map_pages(struct page *page, int numpages, int enable) +{ + return 0; +} +#endif /* CONFIG_DEBUG_PAGEALLOC */ + +#ifdef CONFIG_KFENCE +static u8 *linear_map_kf_hash_slots; +static unsigned long linear_map_kf_hash_count; +static DEFINE_RAW_SPINLOCK(linear_map_kf_hash_lock); + +static phys_addr_t kfence_pool; + +static inline void hash_kfence_alloc_pool(void) +{ + if (!kfence_early_init_enabled()) + goto err; + + /* allocate linear map for kfence within RMA region */ + linear_map_kf_hash_count = KFENCE_POOL_SIZE >> PAGE_SHIFT; + linear_map_kf_hash_slots = memblock_alloc_try_nid( + linear_map_kf_hash_count, 1, + MEMBLOCK_LOW_LIMIT, ppc64_rma_size, + NUMA_NO_NODE); + if (!linear_map_kf_hash_slots) { + pr_err("%s: memblock for linear map (%lu) failed\n", __func__, + linear_map_kf_hash_count); + goto err; + } + + /* allocate kfence pool early */ + kfence_pool = memblock_phys_alloc_range(KFENCE_POOL_SIZE, PAGE_SIZE, + MEMBLOCK_LOW_LIMIT, MEMBLOCK_ALLOC_ANYWHERE); + if (!kfence_pool) { + pr_err("%s: memblock for kfence pool (%lu) failed\n", __func__, + KFENCE_POOL_SIZE); + memblock_free(linear_map_kf_hash_slots, + linear_map_kf_hash_count); + linear_map_kf_hash_count = 0; + goto err; + } + memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + + return; +err: + pr_info("Disabling kfence\n"); + disable_kfence(); +} + +static inline void hash_kfence_map_pool(void) +{ + unsigned long kfence_pool_start, kfence_pool_end; + unsigned long prot = pgprot_val(PAGE_KERNEL); + + if (!kfence_pool) + return; + + kfence_pool_start = (unsigned long) __va(kfence_pool); + kfence_pool_end = kfence_pool_start + KFENCE_POOL_SIZE; + __kfence_pool = (char *) kfence_pool_start; + BUG_ON(htab_bolt_mapping(kfence_pool_start, kfence_pool_end, + kfence_pool, prot, mmu_linear_psize, + mmu_kernel_ssize)); + memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); +} + +static inline void hash_kfence_add_slot(phys_addr_t paddr, int slot) +{ + unsigned long vaddr = (unsigned long) __va(paddr); + unsigned long lmi = (vaddr - (unsigned long)__kfence_pool) + >> PAGE_SHIFT; + + if (!kfence_pool) + return; + BUG_ON(!is_kfence_address((void *)vaddr)); + BUG_ON(lmi >= linear_map_kf_hash_count); + linear_map_kf_hash_slots[lmi] = slot | 0x80; +} + +static int hash_kfence_map_pages(struct page *page, int numpages, int enable) +{ + unsigned long flags, vaddr, lmi; + int i; + + WARN_ON_ONCE(!linear_map_kf_hash_count); + local_irq_save(flags); + for (i = 0; i < numpages; i++, page++) { + vaddr = (unsigned long)page_address(page); + lmi = (vaddr - (unsigned long)__kfence_pool) >> PAGE_SHIFT; + + /* Ideally this should never happen */ + if (lmi >= linear_map_kf_hash_count) { + WARN_ON_ONCE(1); + continue; + } + + if (enable) + kernel_map_linear_page(vaddr, lmi, + linear_map_kf_hash_slots, + &linear_map_kf_hash_lock); + else + kernel_unmap_linear_page(vaddr, lmi, + linear_map_kf_hash_slots, + &linear_map_kf_hash_lock); + } + local_irq_restore(flags); + return 0; +} +#else +static inline void hash_kfence_alloc_pool(void) {} +static inline void hash_kfence_map_pool(void) {} +static inline void hash_kfence_add_slot(phys_addr_t paddr, int slot) {} +static int __maybe_unused +hash_kfence_map_pages(struct page *page, int numpages, int enable) +{ + return 0; +} +#endif + +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) +int hash__kernel_map_pages(struct page *page, int numpages, int enable) +{ + void *vaddr = page_address(page); + + if (is_kfence_address(vaddr)) + return hash_kfence_map_pages(page, numpages, enable); + else + return hash_debug_pagealloc_map_pages(page, numpages, enable); +} + +static void hash_linear_map_add_slot(phys_addr_t paddr, int slot) +{ + if (is_kfence_address(__va(paddr))) + hash_kfence_add_slot(paddr, slot); + else + hash_debug_pagealloc_add_slot(paddr, slot); +} +#else +static void hash_linear_map_add_slot(phys_addr_t paddr, int slot) {} +#endif + /* * 'R' and 'C' update notes: * - Under pHyp or KVM, the updatepp path will not set C, thus it *will* @@ -431,9 +695,8 @@ repeat: break; cond_resched(); - if (debug_pagealloc_enabled_or_kfence() && - (paddr >> PAGE_SHIFT) < linear_map_hash_count) - linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; + /* add slot info in debug_pagealloc / kfence linear map */ + hash_linear_map_add_slot(paddr, ret); } return ret < 0 ? ret : 0; } @@ -814,7 +1077,7 @@ static void __init htab_init_page_sizes(void) bool aligned = true; init_hpte_page_sizes(); - if (!debug_pagealloc_enabled_or_kfence()) { + if (!hash_supports_debug_pagealloc() && !kfence_early_init_enabled()) { /* * Pick a size for the linear mapping. Currently, we only * support 16M, 1M and 4K which is the default @@ -1095,18 +1358,6 @@ static void __init htab_initialize(void) } else { unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE; -#ifdef CONFIG_PPC_CELL - /* - * Cell may require the hash table down low when using the - * Axon IOMMU in order to fit the dynamic region over it, see - * comments in cell/iommu.c - */ - if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) { - limit = 0x80000000; - pr_info("Hash table forced below 2G for Axon IOMMU\n"); - } -#endif /* CONFIG_PPC_CELL */ - table = memblock_phys_alloc_range(htab_size_bytes, htab_size_bytes, 0, limit); @@ -1134,16 +1385,8 @@ static void __init htab_initialize(void) prot = pgprot_val(PAGE_KERNEL); - if (debug_pagealloc_enabled_or_kfence()) { - linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; - linear_map_hash_slots = memblock_alloc_try_nid( - linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, - ppc64_rma_size, NUMA_NO_NODE); - if (!linear_map_hash_slots) - panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", - __func__, linear_map_hash_count, &ppc64_rma_size); - } - + hash_debug_pagealloc_alloc_slots(); + hash_kfence_alloc_pool(); /* create bolted the linear mapping in the hash table */ for_each_mem_range(i, &base, &end) { size = end - base; @@ -1160,6 +1403,7 @@ static void __init htab_initialize(void) BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), prot, mmu_linear_psize, mmu_kernel_ssize)); } + hash_kfence_map_pool(); memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); /* @@ -1233,10 +1477,6 @@ void __init hash__early_init_mmu(void) __pmd_table_size = H_PMD_TABLE_SIZE; __pud_table_size = H_PUD_TABLE_SIZE; __pgd_table_size = H_PGD_TABLE_SIZE; - /* - * 4k use hugepd format, so for hash set then to - * zero - */ __pmd_val_bits = HASH_PMD_VAL_BITS; __pud_val_bits = HASH_PUD_VAL_BITS; __pgd_val_bits = HASH_PGD_VAL_BITS; @@ -1360,7 +1600,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr) if (get_slice_psize(mm, addr) == MMU_PAGE_4K) return; slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); - copro_flush_all_slbs(mm); +#ifdef CONFIG_SPU_BASE + spu_flush_all_slbs(mm); +#endif if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { copy_mm_to_paca(mm); @@ -1546,6 +1788,13 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, goto bail; } + if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !radix_enabled()) { + if (hugeshift == PMD_SHIFT && psize == MMU_PAGE_16M) + hugeshift = mmu_psize_defs[MMU_PAGE_16M].shift; + if (hugeshift == PUD_SHIFT && psize == MMU_PAGE_16G) + hugeshift = mmu_psize_defs[MMU_PAGE_16G].shift; + } + /* * Add _PAGE_PRESENT to the required access perm. If there are parallel * updates to the pte that can possibly clear _PAGE_PTE, catch that too. @@ -1622,7 +1871,9 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, "to 4kB pages because of " "non-cacheable mapping\n"); psize = mmu_vmalloc_psize = MMU_PAGE_4K; - copro_flush_all_slbs(mm); +#ifdef CONFIG_SPU_BASE + spu_flush_all_slbs(mm); +#endif } } @@ -2117,82 +2368,6 @@ void hpt_do_stress(unsigned long ea, unsigned long hpte_group) } } -#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) -static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); - -static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) -{ - unsigned long hash; - unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); - unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); - unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY); - long ret; - - hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); - - /* Don't create HPTE entries for bad address */ - if (!vsid) - return; - - if (linear_map_hash_slots[lmi] & 0x80) - return; - - ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, - HPTE_V_BOLTED, - mmu_linear_psize, mmu_kernel_ssize); - - BUG_ON (ret < 0); - raw_spin_lock(&linear_map_hash_lock); - BUG_ON(linear_map_hash_slots[lmi] & 0x80); - linear_map_hash_slots[lmi] = ret | 0x80; - raw_spin_unlock(&linear_map_hash_lock); -} - -static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) -{ - unsigned long hash, hidx, slot; - unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); - unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); - - hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); - raw_spin_lock(&linear_map_hash_lock); - if (!(linear_map_hash_slots[lmi] & 0x80)) { - raw_spin_unlock(&linear_map_hash_lock); - return; - } - hidx = linear_map_hash_slots[lmi] & 0x7f; - linear_map_hash_slots[lmi] = 0; - raw_spin_unlock(&linear_map_hash_lock); - if (hidx & _PTEIDX_SECONDARY) - hash = ~hash; - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - slot += hidx & _PTEIDX_GROUP_IX; - mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize, - mmu_linear_psize, - mmu_kernel_ssize, 0); -} - -int hash__kernel_map_pages(struct page *page, int numpages, int enable) -{ - unsigned long flags, vaddr, lmi; - int i; - - local_irq_save(flags); - for (i = 0; i < numpages; i++, page++) { - vaddr = (unsigned long)page_address(page); - lmi = __pa(vaddr) >> PAGE_SHIFT; - if (lmi >= linear_map_hash_count) - continue; - if (enable) - kernel_map_linear_page(vaddr, lmi); - else - kernel_unmap_linear_page(vaddr, lmi); - } - local_irq_restore(flags); - return 0; -} -#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */ - void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { diff --git a/arch/powerpc/mm/book3s64/hugetlbpage.c b/arch/powerpc/mm/book3s64/hugetlbpage.c index 5a2e512e96db..83c3361b358b 100644 --- a/arch/powerpc/mm/book3s64/hugetlbpage.c +++ b/arch/powerpc/mm/book3s64/hugetlbpage.c @@ -53,6 +53,16 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, /* If PTE permissions don't match, take page fault */ if (unlikely(!check_pte_access(access, old_pte))) return 1; + /* + * If hash-4k, hugepages use seeral contiguous PxD entries + * so bail out and let mm make the page young or dirty + */ + if (IS_ENABLED(CONFIG_PPC_4K_PAGES)) { + if (!(old_pte & _PAGE_ACCESSED)) + return 1; + if ((access & _PAGE_WRITE) && !(old_pte & _PAGE_DIRTY)) + return 1; + } /* * Try to lock the PTE, add ACCESSED and DIRTY if it was diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c index 1715b07c630c..4e1e45420bd4 100644 --- a/arch/powerpc/mm/book3s64/mmu_context.c +++ b/arch/powerpc/mm/book3s64/mmu_context.c @@ -253,7 +253,7 @@ static void pmd_frag_destroy(void *pmd_frag) count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 83823db3488b..0db01e10a3f8 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -10,7 +10,6 @@ #include <linux/pkeys.h> #include <linux/debugfs.h> #include <linux/proc_fs.h> -#include <misc/cxl-base.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -37,6 +36,19 @@ EXPORT_SYMBOL(__pmd_frag_nr); unsigned long __pmd_frag_size_shift; EXPORT_SYMBOL(__pmd_frag_size_shift); +#ifdef CONFIG_KFENCE +extern bool kfence_early_init; +static int __init parse_kfence_early_init(char *arg) +{ + int val; + + if (get_option(&arg, &val)) + kfence_early_init = !!val; + return 0; +} +early_param("kfence.sample_interval", parse_kfence_early_init); +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * This is called when relaxing access to a hugepage. It's also called in the page @@ -170,11 +182,23 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, { unsigned long old_pmd; + VM_WARN_ON_ONCE(!pmd_present(*pmdp)); old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return __pmd(old_pmd); } +pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, + pud_t *pudp) +{ + unsigned long old_pud; + + VM_WARN_ON_ONCE(!pud_present(*pudp)); + old_pud = pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID); + flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); + return __pud(old_pud); +} + pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, int full) { @@ -245,11 +269,6 @@ pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot) return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot)); } -pmd_t mk_pmd(struct page *page, pgprot_t pgprot) -{ - return pfn_pmd(page_to_pfn(page), pgprot); -} - pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { unsigned long pmdv; @@ -258,6 +277,15 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmdv &= _HPAGE_CHG_MASK; return pmd_set_protbits(__pmd(pmdv), newprot); } + +pud_t pud_modify(pud_t pud, pgprot_t newprot) +{ + unsigned long pudv; + + pudv = pud_val(pud); + pudv &= _HPAGE_CHG_MASK; + return pud_set_protbits(__pud(pudv), newprot); +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* For use by kexec, called with MMU off */ @@ -296,11 +324,7 @@ void __init mmu_partition_table_init(void) unsigned long ptcr; /* Initialize the Partition Table with no entries */ - partition_tb = memblock_alloc(patb_size, patb_size); - if (!partition_tb) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, patb_size, patb_size); - + partition_tb = memblock_alloc_or_panic(patb_size, patb_size); ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); set_ptcr_when_no_uv(ptcr); powernv_set_nmmu_ptcr(ptcr); @@ -393,7 +417,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) ptdesc = pagetable_alloc(gfp, 0); if (!ptdesc) return NULL; - if (!pagetable_pmd_ctor(ptdesc)) { + if (!pagetable_pmd_ctor(mm, ptdesc)) { pagetable_free(ptdesc); return NULL; } @@ -443,7 +467,7 @@ void pmd_fragment_free(unsigned long *pmd) BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0); if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) { - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } } @@ -460,18 +484,6 @@ static inline void pgtable_free(void *table, int index) case PUD_INDEX: __pud_free(table); break; -#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE) - /* 16M hugepd directory at pud level */ - case HTLB_16M_INDEX: - BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0); - kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table); - break; - /* 16G hugepd directory at the pgd level */ - case HTLB_16G_INDEX: - BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0); - kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table); - break; -#endif /* We don't free pgd table via RCU callback */ default: BUG(); @@ -569,7 +581,7 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, /* * Does the CPU support tlbie? */ -bool tlbie_capable __read_mostly = true; +bool tlbie_capable __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE); EXPORT_SYMBOL(tlbie_capable); /* @@ -577,7 +589,7 @@ EXPORT_SYMBOL(tlbie_capable); * address spaces? tlbie may still be used for nMMU accelerators, and for KVM * guest address spaces. */ -bool tlbie_enabled __read_mostly = true; +bool tlbie_enabled __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE); static int __init setup_disable_tlbie(char *str) { diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 15e88f1439ec..9f764bc42b8c 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -17,6 +17,7 @@ #include <linux/hugetlb.h> #include <linux/string_helpers.h> #include <linux/memory.h> +#include <linux/kfence.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> @@ -31,6 +32,7 @@ #include <asm/uaccess.h> #include <asm/ultravisor.h> #include <asm/set_memory.h> +#include <asm/kfence.h> #include <trace/events/thp.h> @@ -293,7 +295,8 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end) static int __meminit create_physical_mapping(unsigned long start, unsigned long end, - int nid, pgprot_t _prot) + int nid, pgprot_t _prot, + unsigned long mapping_sz_limit) { unsigned long vaddr, addr, mapping_size = 0; bool prev_exec, exec = false; @@ -301,7 +304,10 @@ static int __meminit create_physical_mapping(unsigned long start, int psize; unsigned long max_mapping_size = memory_block_size; - if (debug_pagealloc_enabled_or_kfence()) + if (mapping_sz_limit < max_mapping_size) + max_mapping_size = mapping_sz_limit; + + if (debug_pagealloc_enabled()) max_mapping_size = PAGE_SIZE; start = ALIGN(start, PAGE_SIZE); @@ -356,8 +362,62 @@ static int __meminit create_physical_mapping(unsigned long start, return 0; } +#ifdef CONFIG_KFENCE +static inline phys_addr_t alloc_kfence_pool(void) +{ + phys_addr_t kfence_pool; + + /* + * TODO: Support to enable KFENCE after bootup depends on the ability to + * split page table mappings. As such support is not currently + * implemented for radix pagetables, support enabling KFENCE + * only at system startup for now. + * + * After support for splitting mappings is available on radix, + * alloc_kfence_pool() & map_kfence_pool() can be dropped and + * mapping for __kfence_pool memory can be + * split during arch_kfence_init_pool(). + */ + if (!kfence_early_init) + goto no_kfence; + + kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (!kfence_pool) + goto no_kfence; + + memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + return kfence_pool; + +no_kfence: + disable_kfence(); + return 0; +} + +static inline void map_kfence_pool(phys_addr_t kfence_pool) +{ + if (!kfence_pool) + return; + + if (create_physical_mapping(kfence_pool, kfence_pool + KFENCE_POOL_SIZE, + -1, PAGE_KERNEL, PAGE_SIZE)) + goto err; + + memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); + __kfence_pool = __va(kfence_pool); + return; + +err: + memblock_phys_free(kfence_pool, KFENCE_POOL_SIZE); + disable_kfence(); +} +#else +static inline phys_addr_t alloc_kfence_pool(void) { return 0; } +static inline void map_kfence_pool(phys_addr_t kfence_pool) { } +#endif + static void __init radix_init_pgtable(void) { + phys_addr_t kfence_pool; unsigned long rts_field; phys_addr_t start, end; u64 i; @@ -365,6 +425,8 @@ static void __init radix_init_pgtable(void) /* We don't support slb for radix */ slb_set_size(0); + kfence_pool = alloc_kfence_pool(); + /* * Create the linear mapping */ @@ -381,9 +443,11 @@ static void __init radix_init_pgtable(void) } WARN_ON(create_physical_mapping(start, end, - -1, PAGE_KERNEL)); + -1, PAGE_KERNEL, ~0UL)); } + map_kfence_pool(kfence_pool); + if (!cpu_has_feature(CPU_FTR_HVMODE) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { /* @@ -875,7 +939,7 @@ int __meminit radix__create_section_mapping(unsigned long start, } return create_physical_mapping(__pa(start), __pa(end), - nid, prot); + nid, prot, ~0UL); } int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) @@ -912,7 +976,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, return 0; } - +#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { if (radix_enabled()) @@ -920,6 +984,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) return false; } +#endif int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) @@ -1056,6 +1121,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in pmd_t *pmd; pte_t *pte; + /* + * Make sure we align the start vmemmap addr so that we calculate + * the correct start_pfn in altmap boundary check to decided whether + * we should use altmap or RAM based backing memory allocation. Also + * the address need to be aligned for set_pte operation. + + * If the start addr is already PMD_SIZE aligned we will try to use + * a pmd mapping. We don't want to be too aggressive here beacause + * that will cause more allocations in RAM. So only if the namespace + * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping. + */ + + start = ALIGN_DOWN(start, PAGE_SIZE); for (addr = start; addr < end; addr = next) { next = pmd_addr_end(addr, end); @@ -1081,8 +1159,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in * in altmap block allocation failures, in which case * we fallback to RAM for vmemmap allocation. */ - if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || - altmap_cross_boundary(altmap, addr, PMD_SIZE))) { + if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap && + altmap_cross_boundary(altmap, addr, PMD_SIZE))) { /* * make sure we don't create altmap mappings * covering things outside the device. diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c index f2708c8629a5..6b783552403c 100644 --- a/arch/powerpc/mm/book3s64/slb.c +++ b/arch/powerpc/mm/book3s64/slb.c @@ -24,7 +24,7 @@ #include <linux/pgtable.h> #include <asm/udbg.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include "internal.h" diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c index c0b58afb9a47..28bec5bc7879 100644 --- a/arch/powerpc/mm/book3s64/slice.c +++ b/arch/powerpc/mm/book3s64/slice.c @@ -22,7 +22,7 @@ #include <linux/security.h> #include <asm/mman.h> #include <asm/mmu.h> -#include <asm/copro.h> +#include <asm/spu.h> #include <asm/hugetlb.h> #include <asm/mmu_context.h> @@ -248,7 +248,9 @@ static void slice_convert(struct mm_struct *mm, spin_unlock_irqrestore(&slice_convert_lock, flags); - copro_flush_all_slbs(mm); +#ifdef CONFIG_SPU_BASE + spu_flush_all_slbs(mm); +#endif } /* @@ -282,12 +284,10 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, { int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); unsigned long found, next_end; - struct vm_unmapped_area_info info; - - info.flags = 0; - info.length = len; - info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); - info.align_offset = 0; + struct vm_unmapped_area_info info = { + .length = len, + .align_mask = PAGE_MASK & ((1ul << pshift) - 1), + }; /* * Check till the allow max value for this mmap request */ @@ -326,13 +326,13 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, { int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); unsigned long found, prev; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = { + .flags = VM_UNMAPPED_AREA_TOPDOWN, + .length = len, + .align_mask = PAGE_MASK & ((1ul << pshift) - 1), + }; unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr); - info.flags = VM_UNMAPPED_AREA_TOPDOWN; - info.length = len; - info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); - info.align_offset = 0; /* * If we are trying to allocate above DEFAULT_MAP_WINDOW * Add the different to the mmap_base. @@ -635,30 +635,58 @@ return_addr: } EXPORT_SYMBOL_GPL(slice_get_unmapped_area); +#ifdef CONFIG_HUGETLB_PAGE +static int file_to_psize(struct file *file) +{ + struct hstate *hstate = hstate_file(file); + + return shift_to_mmu_psize(huge_page_shift(hstate)); +} +#else +static int file_to_psize(struct file *file) +{ + return 0; +} +#endif + unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, + vm_flags_t vm_flags) { + unsigned int psize; + if (radix_enabled()) - return generic_get_unmapped_area(filp, addr, len, pgoff, flags); + return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); - return slice_get_unmapped_area(addr, len, flags, - mm_ctx_user_psize(¤t->mm->context), 0); + if (filp && is_file_hugepages(filp)) + psize = file_to_psize(filp); + else + psize = mm_ctx_user_psize(¤t->mm->context); + + return slice_get_unmapped_area(addr, len, flags, psize, 0); } unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, - const unsigned long flags) + const unsigned long flags, + vm_flags_t vm_flags) { + unsigned int psize; + if (radix_enabled()) - return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags); + return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags, vm_flags); + + if (filp && is_file_hugepages(filp)) + psize = file_to_psize(filp); + else + psize = mm_ctx_user_psize(¤t->mm->context); - return slice_get_unmapped_area(addr0, len, flags, - mm_ctx_user_psize(¤t->mm->context), 1); + return slice_get_unmapped_area(addr0, len, flags, psize, 1); } unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr) @@ -788,20 +816,4 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start)); } - -static int file_to_psize(struct file *file) -{ - struct hstate *hstate = hstate_file(file); - return shift_to_mmu_psize(huge_page_shift(hstate)); -} - -unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags) -{ - if (radix_enabled()) - return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); - - return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1); -} #endif diff --git a/arch/powerpc/mm/cacheflush.c b/arch/powerpc/mm/cacheflush.c index 15189592da09..7186516eca52 100644 --- a/arch/powerpc/mm/cacheflush.c +++ b/arch/powerpc/mm/cacheflush.c @@ -78,7 +78,7 @@ EXPORT_SYMBOL(flush_icache_range); #ifdef CONFIG_HIGHMEM /** - * flush_dcache_icache_phys() - Flush a page by it's physical address + * flush_dcache_icache_phys() - Flush a page by its physical address * @physaddr: the physical address of the page */ static void flush_dcache_icache_phys(unsigned long physaddr) diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index f49fd873df8d..f5f8692e2c69 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -12,8 +12,6 @@ #include <linux/export.h> #include <asm/reg.h> #include <asm/copro.h> -#include <asm/spu.h> -#include <misc/cxl-base.h> /* * This ought to be kept in sync with the powerpc specific do_page_fault @@ -135,13 +133,4 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) return 0; } EXPORT_SYMBOL_GPL(copro_calculate_slb); - -void copro_flush_all_slbs(struct mm_struct *mm) -{ -#ifdef CONFIG_SPU_BASE - spu_flush_all_slbs(mm); -#endif - cxl_slbia(mm); -} -EXPORT_SYMBOL_GPL(copro_flush_all_slbs); #endif diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c index c110ab8fa8a3..8dd7b340d51f 100644 --- a/arch/powerpc/mm/drmem.c +++ b/arch/powerpc/mm/drmem.c @@ -491,10 +491,8 @@ static int __init drmem_init(void) const __be32 *prop; dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); - if (!dn) { - pr_info("No dynamic reconfiguration memory found\n"); + if (!dn) return 0; - } if (init_drmem_lmb_size(dn)) { of_node_put(dn); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 53335ae21a40..806c74e0d5ab 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/ptrace.h> @@ -71,23 +72,26 @@ static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long add return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); } -static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) +static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); return __bad_area_nosemaphore(regs, address, si_code); } static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; int pkey; /* @@ -109,7 +113,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, */ pkey = vma_pkey(vma); - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); /* * If we are in kernel mode, bail out with a SEGV, this will @@ -124,9 +131,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, return 0; } -static noinline int bad_access(struct pt_regs *regs, unsigned long address) +static noinline int bad_access(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - return __bad_area(regs, address, SEGV_ACCERR); + return __bad_area(regs, address, SEGV_ACCERR, mm, vma); } static int do_sigbus(struct pt_regs *regs, unsigned long address, @@ -211,7 +219,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, // Read/write fault blocked by KUAP is bad, it can never succeed. if (bad_kuap_fault(regs, address, is_write)) { pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n", - is_write ? "write" : "read", address, + str_write_read(is_write), address, from_kuid(&init_user_ns, current_uid())); // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad @@ -361,13 +369,13 @@ static void sanity_check_fault(bool is_write, bool is_user, * Define the correct "is_write" bit in error_code based * on the processor family */ -#if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) +#ifdef CONFIG_BOOKE #define page_fault_is_write(__err) ((__err) & ESR_DST) #else #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE) #endif -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) +#ifdef CONFIG_BOOKE #define page_fault_is_bad(__err) (0) #elif defined(CONFIG_PPC_8xx) #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) @@ -432,10 +440,16 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, /* * The kernel should never take an execute fault nor should it * take a page fault to a kernel address or a page fault to a user - * address outside of dedicated places + * address outside of dedicated places. + * + * Rather than kfence directly reporting false negatives, search whether + * the NIP belongs to the fixup table for cases where fault could come + * from functions like copy_from_kernel_nofault(). */ if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) { - if (kfence_handle_page_fault(address, is_write, regs)) + if (is_kfence_address((void *)address) && + !search_exception_tables(instruction_pointer(regs)) && + kfence_handle_page_fault(address, is_write, regs)) return 0; return SIGSEGV; @@ -479,13 +493,13 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access_pkey(regs, address, NULL, vma); } if (unlikely(access_error(is_write, is_exec, vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access(regs, address, NULL, vma); } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); @@ -521,10 +535,10 @@ retry: if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) - return bad_access_pkey(regs, address, vma); + return bad_access_pkey(regs, address, mm, vma); if (unlikely(access_error(is_write, is_exec, vma))) - return bad_access(regs, address); + return bad_access(regs, address, mm, vma); /* * If for any reason at all we couldn't handle the fault, @@ -612,7 +626,7 @@ static void __bad_page_fault(struct pt_regs *regs, int sig) case INTERRUPT_DATA_STORAGE: case INTERRUPT_H_DATA_STORAGE: pr_alert("BUG: %s on %s at 0x%08lx\n", msg, - is_write ? "write" : "read", regs->dar); + str_write_read(is_write), regs->dar); break; case INTERRUPT_DATA_SEGMENT: pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 594a4b7b2ca2..d3c1b749dcfc 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -28,8 +28,6 @@ bool hugetlb_disabled = false; -#define hugepd_none(hpd) (hpd_val(hpd) == 0) - #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \ __builtin_ffs(sizeof(void *))) @@ -42,156 +40,43 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long s return __find_linux_pte(mm->pgd, addr, NULL, NULL); } -static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, - unsigned long address, unsigned int pdshift, - unsigned int pshift, spinlock_t *ptl) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { - struct kmem_cache *cachep; - pte_t *new; - int i; - int num_hugepd; - - if (pshift >= pdshift) { - cachep = PGT_CACHE(PTE_T_ORDER); - num_hugepd = 1 << (pshift - pdshift); - } else { - cachep = PGT_CACHE(pdshift - pshift); - num_hugepd = 1; - } - - if (!cachep) { - WARN_ONCE(1, "No page table cache created for hugetlb tables"); - return -ENOMEM; - } - - new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; - BUG_ON(pshift > HUGEPD_SHIFT_MASK); - BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); + addr &= ~(sz - 1); - if (!new) - return -ENOMEM; + p4d = p4d_offset(pgd_offset(mm, addr), addr); + if (!mm_pud_folded(mm) && sz >= P4D_SIZE) + return (pte_t *)p4d; - /* - * Make sure other cpus find the hugepd set only after a - * properly initialized page table is visible to them. - * For more details look for comment in __pte_alloc(). - */ - smp_wmb(); + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return NULL; + if (!mm_pmd_folded(mm) && sz >= PUD_SIZE) + return (pte_t *)pud; - spin_lock(ptl); - /* - * We have multiple higher-level entries that point to the same - * actual pte location. Fill in each as we go and backtrack on error. - * We need all of these so the DTLB pgtable walk code can find the - * right higher-level entry without knowing if it's a hugepage or not. - */ - for (i = 0; i < num_hugepd; i++, hpdp++) { - if (unlikely(!hugepd_none(*hpdp))) - break; - hugepd_populate(hpdp, new, pshift); - } - /* If we bailed from the for loop early, an error occurred, clean up */ - if (i < num_hugepd) { - for (i = i - 1 ; i >= 0; i--, hpdp--) - *hpdp = __hugepd(0); - kmem_cache_free(cachep, new); - } else { - kmemleak_ignore(new); - } - spin_unlock(ptl); - return 0; -} + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return NULL; -/* - * At this point we do the placement change only for BOOK3S 64. This would - * possibly work on other subarchs. - */ -pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, unsigned long sz) -{ - pgd_t *pg; - p4d_t *p4; - pud_t *pu; - pmd_t *pm; - hugepd_t *hpdp = NULL; - unsigned pshift = __ffs(sz); - unsigned pdshift = PGDIR_SHIFT; - spinlock_t *ptl; - - addr &= ~(sz-1); - pg = pgd_offset(mm, addr); - p4 = p4d_offset(pg, addr); + if (sz >= PMD_SIZE) { + /* On 8xx, all hugepages are handled as contiguous PTEs */ + if (IS_ENABLED(CONFIG_PPC_8xx)) { + int i; -#ifdef CONFIG_PPC_BOOK3S_64 - if (pshift == PGDIR_SHIFT) - /* 16GB huge page */ - return (pte_t *) p4; - else if (pshift > PUD_SHIFT) { - /* - * We need to use hugepd table - */ - ptl = &mm->page_table_lock; - hpdp = (hugepd_t *)p4; - } else { - pdshift = PUD_SHIFT; - pu = pud_alloc(mm, p4, addr); - if (!pu) - return NULL; - if (pshift == PUD_SHIFT) - return (pte_t *)pu; - else if (pshift > PMD_SHIFT) { - ptl = pud_lockptr(mm, pu); - hpdp = (hugepd_t *)pu; - } else { - pdshift = PMD_SHIFT; - pm = pmd_alloc(mm, pu, addr); - if (!pm) - return NULL; - if (pshift == PMD_SHIFT) - /* 16MB hugepage */ - return (pte_t *)pm; - else { - ptl = pmd_lockptr(mm, pm); - hpdp = (hugepd_t *)pm; + for (i = 0; i < sz / PMD_SIZE; i++) { + if (!pte_alloc_huge(mm, pmd + i, addr)) + return NULL; } } + return (pte_t *)pmd; } -#else - if (pshift >= PGDIR_SHIFT) { - ptl = &mm->page_table_lock; - hpdp = (hugepd_t *)p4; - } else { - pdshift = PUD_SHIFT; - pu = pud_alloc(mm, p4, addr); - if (!pu) - return NULL; - if (pshift >= PUD_SHIFT) { - ptl = pud_lockptr(mm, pu); - hpdp = (hugepd_t *)pu; - } else { - pdshift = PMD_SHIFT; - pm = pmd_alloc(mm, pu, addr); - if (!pm) - return NULL; - ptl = pmd_lockptr(mm, pm); - hpdp = (hugepd_t *)pm; - } - } -#endif - if (!hpdp) - return NULL; - - if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT) - return pte_alloc_huge(mm, (pmd_t *)hpdp, addr); - - BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); - if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, - pdshift, pshift, ptl)) - return NULL; - - return hugepte_offset(*hpdp, addr, pdshift); + return pte_alloc_huge(mm, pmd, addr); } #ifdef CONFIG_PPC_BOOK3S_64 @@ -228,6 +113,7 @@ static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) gpage_freearray[nr_gpages] = 0; list_add(&m->list, &huge_boot_pages[0]); m->hstate = hstate; + m->flags = 0; return 1; } @@ -248,264 +134,6 @@ int __init alloc_bootmem_huge_page(struct hstate *h, int nid) return __alloc_bootmem_huge_page(h, nid); } -#ifndef CONFIG_PPC_BOOK3S_64 -#define HUGEPD_FREELIST_SIZE \ - ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) - -struct hugepd_freelist { - struct rcu_head rcu; - unsigned int index; - void *ptes[]; -}; - -static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur); - -static void hugepd_free_rcu_callback(struct rcu_head *head) -{ - struct hugepd_freelist *batch = - container_of(head, struct hugepd_freelist, rcu); - unsigned int i; - - for (i = 0; i < batch->index; i++) - kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]); - - free_page((unsigned long)batch); -} - -static void hugepd_free(struct mmu_gather *tlb, void *hugepte) -{ - struct hugepd_freelist **batchp; - - batchp = &get_cpu_var(hugepd_freelist_cur); - - if (atomic_read(&tlb->mm->mm_users) < 2 || - mm_is_thread_local(tlb->mm)) { - kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte); - put_cpu_var(hugepd_freelist_cur); - return; - } - - if (*batchp == NULL) { - *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC); - (*batchp)->index = 0; - } - - (*batchp)->ptes[(*batchp)->index++] = hugepte; - if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { - call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback); - *batchp = NULL; - } - put_cpu_var(hugepd_freelist_cur); -} -#else -static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {} -#endif - -/* Return true when the entry to be freed maps more than the area being freed */ -static bool range_is_outside_limits(unsigned long start, unsigned long end, - unsigned long floor, unsigned long ceiling, - unsigned long mask) -{ - if ((start & mask) < floor) - return true; - if (ceiling) { - ceiling &= mask; - if (!ceiling) - return true; - } - return end - 1 > ceiling - 1; -} - -static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, - unsigned long start, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - pte_t *hugepte = hugepd_page(*hpdp); - int i; - - unsigned long pdmask = ~((1UL << pdshift) - 1); - unsigned int num_hugepd = 1; - unsigned int shift = hugepd_shift(*hpdp); - - /* Note: On fsl the hpdp may be the first of several */ - if (shift > pdshift) - num_hugepd = 1 << (shift - pdshift); - - if (range_is_outside_limits(start, end, floor, ceiling, pdmask)) - return; - - for (i = 0; i < num_hugepd; i++, hpdp++) - *hpdp = __hugepd(0); - - if (shift >= pdshift) - hugepd_free(tlb, hugepte); - else - pgtable_free_tlb(tlb, hugepte, - get_hugepd_cache_index(pdshift - shift)); -} - -static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - pgtable_t token = pmd_pgtable(*pmd); - - if (range_is_outside_limits(addr, end, floor, ceiling, PMD_MASK)) - return; - - pmd_clear(pmd); - pte_free_tlb(tlb, token, addr); - mm_dec_nr_ptes(tlb->mm); -} - -static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - pmd_t *pmd; - unsigned long next; - unsigned long start; - - start = addr; - do { - unsigned long more; - - pmd = pmd_offset(pud, addr); - next = pmd_addr_end(addr, end); - if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { - if (pmd_none_or_clear_bad(pmd)) - continue; - - /* - * if it is not hugepd pointer, we should already find - * it cleared. - */ - WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx)); - - hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling); - - continue; - } - /* - * Increment next by the size of the huge mapping since - * there may be more than one entry at this level for a - * single hugepage, but all of them point to - * the same kmem cache that holds the hugepte. - */ - more = addr + (1UL << hugepd_shift(*(hugepd_t *)pmd)); - if (more > next) - next = more; - - free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, - addr, next, floor, ceiling); - } while (addr = next, addr != end); - - if (range_is_outside_limits(start, end, floor, ceiling, PUD_MASK)) - return; - - pmd = pmd_offset(pud, start & PUD_MASK); - pud_clear(pud); - pmd_free_tlb(tlb, pmd, start & PUD_MASK); - mm_dec_nr_pmds(tlb->mm); -} - -static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - pud_t *pud; - unsigned long next; - unsigned long start; - - start = addr; - do { - pud = pud_offset(p4d, addr); - next = pud_addr_end(addr, end); - if (!is_hugepd(__hugepd(pud_val(*pud)))) { - if (pud_none_or_clear_bad(pud)) - continue; - hugetlb_free_pmd_range(tlb, pud, addr, next, floor, - ceiling); - } else { - unsigned long more; - /* - * Increment next by the size of the huge mapping since - * there may be more than one entry at this level for a - * single hugepage, but all of them point to - * the same kmem cache that holds the hugepte. - */ - more = addr + (1UL << hugepd_shift(*(hugepd_t *)pud)); - if (more > next) - next = more; - - free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, - addr, next, floor, ceiling); - } - } while (addr = next, addr != end); - - if (range_is_outside_limits(start, end, floor, ceiling, PGDIR_MASK)) - return; - - pud = pud_offset(p4d, start & PGDIR_MASK); - p4d_clear(p4d); - pud_free_tlb(tlb, pud, start & PGDIR_MASK); - mm_dec_nr_puds(tlb->mm); -} - -/* - * This function frees user-level page tables of a process. - */ -void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - pgd_t *pgd; - p4d_t *p4d; - unsigned long next; - - /* - * Because there are a number of different possible pagetable - * layouts for hugepage ranges, we limit knowledge of how - * things should be laid out to the allocation path - * (huge_pte_alloc(), above). Everything else works out the - * structure as it goes from information in the hugepd - * pointers. That means that we can't here use the - * optimization used in the normal page free_pgd_range(), of - * checking whether we're actually covering a large enough - * range to have to do anything at the top level of the walk - * instead of at the bottom. - * - * To make sense of this, you should probably go read the big - * block comment at the top of the normal free_pgd_range(), - * too. - */ - - do { - next = pgd_addr_end(addr, end); - pgd = pgd_offset(tlb->mm, addr); - p4d = p4d_offset(pgd, addr); - if (!is_hugepd(__hugepd(pgd_val(*pgd)))) { - if (p4d_none_or_clear_bad(p4d)) - continue; - hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling); - } else { - unsigned long more; - /* - * Increment next by the size of the huge mapping since - * there may be more than one entry at the pgd level - * for a single hugepage, but all of them point to the - * same kmem cache that holds the hugepte. - */ - more = addr + (1UL << hugepd_shift(*(hugepd_t *)pgd)); - if (more > next) - next = more; - - free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT, - addr, next, floor, ceiling); - } - } while (addr = next, addr != end); -} - bool __init arch_hugetlb_valid_size(unsigned long size) { int shift = __ffs(size); @@ -552,44 +180,14 @@ static int __init hugetlbpage_init(void) for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { unsigned shift; - unsigned pdshift; if (!mmu_psize_defs[psize].shift) continue; shift = mmu_psize_to_shift(psize); -#ifdef CONFIG_PPC_BOOK3S_64 - if (shift > PGDIR_SHIFT) - continue; - else if (shift > PUD_SHIFT) - pdshift = PGDIR_SHIFT; - else if (shift > PMD_SHIFT) - pdshift = PUD_SHIFT; - else - pdshift = PMD_SHIFT; -#else - if (shift < PUD_SHIFT) - pdshift = PMD_SHIFT; - else if (shift < PGDIR_SHIFT) - pdshift = PUD_SHIFT; - else - pdshift = PGDIR_SHIFT; -#endif - if (add_huge_page_size(1ULL << shift) < 0) continue; - /* - * if we have pdshift and shift value same, we don't - * use pgt cache for hugepd. - */ - if (pdshift > shift) { - if (!IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_cache_add(pdshift - shift); - } else if (IS_ENABLED(CONFIG_PPC_E500) || - IS_ENABLED(CONFIG_PPC_8xx)) { - pgtable_cache_add(PTE_T_ORDER); - } configured = true; } diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c index d3a7726ecf51..745097554bea 100644 --- a/arch/powerpc/mm/init-common.c +++ b/arch/powerpc/mm/init-common.c @@ -31,6 +31,10 @@ EXPORT_SYMBOL_GPL(kernstart_virt_addr); bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP); bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP); +#ifdef CONFIG_KFENCE +bool __ro_after_init kfence_disabled; +bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; +#endif static int __init parse_nosmep(char *p) { @@ -70,7 +74,7 @@ void setup_kup(void) #define CTOR(shift) static void ctor_##shift(void *addr) \ { \ - memset(addr, 0, sizeof(void *) << (shift)); \ + memset(addr, 0, sizeof(pgd_t) << (shift)); \ } CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7); @@ -114,18 +118,14 @@ EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */ void pgtable_cache_add(unsigned int shift) { char *name; - unsigned long table_size = sizeof(void *) << shift; + unsigned long table_size = sizeof(pgd_t) << shift; unsigned long align = table_size; /* When batching pgtable pointers for RCU freeing, we store * the index size in the low bits. Table alignment must be * big enough to fit it. - * - * Likewise, hugeapge pagetable pointers contain a (different) - * shift value in the low bits. All tables must be aligned so - * as to leave enough 0 bits in the address to contain it. */ - unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, - HUGEPD_SHIFT_MASK + 1); + */ + unsigned long minalign = MAX_PGTABLE_INDEX_SIZE + 1; struct kmem_cache *new = NULL; /* It would be nice if this was a BUILD_BUG_ON(), but at the diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d96bbc001e73..b6f3ae03ca9e 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -41,6 +41,7 @@ #include <linux/libfdt.h> #include <linux/memremap.h> #include <linux/memory.h> +#include <linux/bootmem_info.h> #include <asm/pgalloc.h> #include <asm/page.h> @@ -386,10 +387,13 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, } #endif + +#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) { } +#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index 7b0afcabd89f..4b4feba9873b 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -4,7 +4,6 @@ #include <linux/slab.h> #include <linux/mmzone.h> #include <linux/vmalloc.h> -#include <asm/io-workarounds.h> unsigned long ioremap_bot; EXPORT_SYMBOL(ioremap_bot); @@ -14,8 +13,6 @@ void __iomem *ioremap(phys_addr_t addr, unsigned long size) pgprot_t prot = pgprot_noncached(PAGE_KERNEL); void *caller = __builtin_return_address(0); - if (iowa_is_active()) - return iowa_ioremap(addr, size, prot, caller); return __ioremap_caller(addr, size, prot, caller); } EXPORT_SYMBOL(ioremap); @@ -25,8 +22,6 @@ void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size) pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); void *caller = __builtin_return_address(0); - if (iowa_is_active()) - return iowa_ioremap(addr, size, prot, caller); return __ioremap_caller(addr, size, prot, caller); } EXPORT_SYMBOL(ioremap_wc); @@ -36,22 +31,18 @@ void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) pgprot_t prot = pgprot_cached(PAGE_KERNEL); void *caller = __builtin_return_address(0); - if (iowa_is_active()) - return iowa_ioremap(addr, size, prot, caller); return __ioremap_caller(addr, size, prot, caller); } -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags) +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, pgprot_t prot) { - pte_t pte = __pte(flags); + pte_t pte = __pte(pgprot_val(prot)); void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ if (pte_write(pte)) pte = pte_mkdirty(pte); - if (iowa_is_active()) - return iowa_ioremap(addr, size, pte_pgprot(pte), caller); return __ioremap_caller(addr, size, pte_pgprot(pte), caller); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c index d24e5f166723..fb8b55bd2cd5 100644 --- a/arch/powerpc/mm/ioremap_64.c +++ b/arch/powerpc/mm/ioremap_64.c @@ -52,6 +52,6 @@ void iounmap(volatile void __iomem *token) if (!slab_is_available()) return; - generic_iounmap(PCI_FIX_ADDR(token)); + generic_iounmap(token); } EXPORT_SYMBOL(iounmap); diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c index 2784224054f8..989d6cdf4141 100644 --- a/arch/powerpc/mm/kasan/8xx.c +++ b/arch/powerpc/mm/kasan/8xx.c @@ -6,28 +6,33 @@ #include <linux/memblock.h> #include <linux/hugetlb.h> +#include <asm/pgalloc.h> + static int __init kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block) { pmd_t *pmd = pmd_off_k(k_start); unsigned long k_cur, k_next; - for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) { - pte_basic_t *new; + for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++, block += SZ_4M) { + pte_t *ptep; + int i; k_next = pgd_addr_end(k_cur, k_end); - k_next = pgd_addr_end(k_next, k_end); if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) continue; - new = memblock_alloc(sizeof(pte_basic_t), SZ_4K); - if (!new) + ptep = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); + if (!ptep) return -ENOMEM; - *new = pte_val(pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block)), PAGE_KERNEL))); + for (i = 0; i < PTRS_PER_PTE; i++) { + pte_t pte = pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block + i * PAGE_SIZE)), PAGE_KERNEL)); - hugepd_populate_kernel((hugepd_t *)pmd, (pte_t *)new, PAGE_SHIFT_8M); - hugepd_populate_kernel((hugepd_t *)pmd + 1, (pte_t *)new, PAGE_SHIFT_8M); + __set_pte_at(&init_mm, k_cur, ptep + i, pte, 1); + } + pmd_populate_kernel(&init_mm, pmd, ptep); + *pmd = __pmd(pmd_val(*pmd) | _PMD_PAGE_8M); } return 0; } diff --git a/arch/powerpc/mm/kasan/init_32.c b/arch/powerpc/mm/kasan/init_32.c index aa9aa11927b2..03666d790a53 100644 --- a/arch/powerpc/mm/kasan/init_32.c +++ b/arch/powerpc/mm/kasan/init_32.c @@ -7,7 +7,7 @@ #include <linux/memblock.h> #include <linux/sched/task.h> #include <asm/pgalloc.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <mm/mmu_decl.h> static pgprot_t __init kasan_prot_ro(void) diff --git a/arch/powerpc/mm/kasan/init_book3e_64.c b/arch/powerpc/mm/kasan/init_book3e_64.c index 11519e88dc6b..60c78aac0f63 100644 --- a/arch/powerpc/mm/kasan/init_book3e_64.c +++ b/arch/powerpc/mm/kasan/init_book3e_64.c @@ -40,19 +40,19 @@ static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgpr pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (kasan_pud_table(*p4dp)) { - pudp = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); + pudp = memblock_alloc_or_panic(PUD_TABLE_SIZE, PUD_TABLE_SIZE); memcpy(pudp, kasan_early_shadow_pud, PUD_TABLE_SIZE); p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); if (kasan_pmd_table(*pudp)) { - pmdp = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE); + pmdp = memblock_alloc_or_panic(PMD_TABLE_SIZE, PMD_TABLE_SIZE); memcpy(pmdp, kasan_early_shadow_pmd, PMD_TABLE_SIZE); pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, ea); if (kasan_pte_table(*pmdp)) { - ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); + ptep = memblock_alloc_or_panic(PTE_TABLE_SIZE, PTE_TABLE_SIZE); memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmdp, ptep); } @@ -74,7 +74,7 @@ static void __init kasan_init_phys_region(void *start, void *end) k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); - va = memblock_alloc(k_end - k_start, PAGE_SIZE); + va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE); for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) kasan_map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); } @@ -112,7 +112,7 @@ void __init kasan_init(void) pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO); for_each_mem_range(i, &start, &end) - kasan_init_phys_region((void *)start, (void *)end); + kasan_init_phys_region(phys_to_virt(start), phys_to_virt(end)); if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) kasan_remove_zero_shadow((void *)VMALLOC_START, VMALLOC_SIZE); diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c index 9300d641cf9a..7d959544c077 100644 --- a/arch/powerpc/mm/kasan/init_book3s_64.c +++ b/arch/powerpc/mm/kasan/init_book3s_64.c @@ -32,7 +32,7 @@ static void __init kasan_init_phys_region(void *start, void *end) k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); - va = memblock_alloc(k_end - k_start, PAGE_SIZE); + va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE); for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); } @@ -62,7 +62,7 @@ void __init kasan_init(void) } for_each_mem_range(i, &start, &end) - kasan_init_phys_region((void *)start, (void *)end); + kasan_init_phys_region(phys_to_virt(start), phys_to_virt(end)); for (i = 0; i < PTRS_PER_PTE; i++) __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 3a440004b97d..3ddbfdbfa941 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -16,6 +16,8 @@ #include <linux/highmem.h> #include <linux/suspend.h> #include <linux/dma-direct.h> +#include <linux/execmem.h> +#include <linux/vmalloc.h> #include <asm/swiotlb.h> #include <asm/machdep.h> @@ -24,13 +26,13 @@ #include <asm/svm.h> #include <asm/mmzone.h> #include <asm/ftrace.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/setup.h> #include <asm/fixmap.h> #include <mm/mmu_decl.h> -unsigned long long memory_limit; +unsigned long long memory_limit __initdata; unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); @@ -214,7 +216,7 @@ static int __init mark_nonram_nosave(void) * everything else. GFP_DMA32 page allocations automatically fall back to * ZONE_DMA. * - * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the + * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by * ZONE_DMA. @@ -228,6 +230,7 @@ void __init paging_init(void) { unsigned long long total_ram = memblock_phys_mem_size(); phys_addr_t top_of_ram = memblock_end_of_DRAM(); + int zone_dma_bits; #ifdef CONFIG_HIGHMEM unsigned long v = __fix_to_virt(FIX_KMAP_END); @@ -254,6 +257,8 @@ void __init paging_init(void) else zone_dma_bits = 31; + zone_dma_limit = DMA_BIT_MASK(zone_dma_bits); + #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 1UL << (zone_dma_bits - PAGE_SHIFT)); @@ -268,7 +273,7 @@ void __init paging_init(void) mark_nonram_nosave(); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* * book3s is limited to 16 page sizes due to encoding this in @@ -288,26 +293,8 @@ void __init mem_init(void) swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags); #endif - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); - kasan_late_init(); - memblock_free_all(); - -#ifdef CONFIG_HIGHMEM - { - unsigned long pfn, highmem_mapnr; - - highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; - for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { - phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; - struct page *page = pfn_to_page(pfn); - if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr)) - free_highmem_page(page); - } - } -#endif /* CONFIG_HIGHMEM */ - #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up @@ -316,28 +303,6 @@ void __init mem_init(void) per_cpu(next_tlbcam_idx, smp_processor_id()) = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif - -#ifdef CONFIG_PPC32 - pr_info("Kernel virtual memory layout:\n"); -#ifdef CONFIG_KASAN - pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n", - KASAN_SHADOW_START, KASAN_SHADOW_END); -#endif - pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); -#ifdef CONFIG_HIGHMEM - pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", - PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); -#endif /* CONFIG_HIGHMEM */ - if (ioremap_bot != IOREMAP_TOP) - pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", - ioremap_bot, IOREMAP_TOP); - pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", - VMALLOC_START, VMALLOC_END); -#ifdef MODULES_VADDR - pr_info(" * 0x%08lx..0x%08lx : modules\n", - MODULES_VADDR, MODULES_END); -#endif -#endif /* CONFIG_PPC32 */ } void free_initmem(void) @@ -373,7 +338,7 @@ static int __init add_system_ram_resources(void) */ res->end = end - 1; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - WARN_ON(request_resource(&iomem_resource, res) < 0); + WARN_ON(insert_resource(&iomem_resource, res) < 0); } } @@ -406,3 +371,80 @@ int devmem_is_allowed(unsigned long pfn) * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed. */ EXPORT_SYMBOL_GPL(walk_system_ram_range); + +#ifdef CONFIG_EXECMEM +static struct execmem_info execmem_info __ro_after_init; + +#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603) +static void prealloc_execmem_pgtable(void) +{ + unsigned long va; + + for (va = ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE); va < MODULES_END; va += PGDIR_SIZE) + pte_alloc_kernel(pmd_off_k(va), va); +} +#else +static void prealloc_execmem_pgtable(void) { } +#endif + +struct execmem_info __init *execmem_arch_setup(void) +{ + pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; + pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC; + unsigned long fallback_start = 0, fallback_end = 0; + unsigned long start, end; + + /* + * BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and + * allow allocating data in the entire vmalloc space + */ +#ifdef MODULES_VADDR + unsigned long limit = (unsigned long)_etext - SZ_32M; + + BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); + + /* First try within 32M limit from _etext to avoid branch trampolines */ + if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) { + start = limit; + fallback_start = MODULES_VADDR; + fallback_end = MODULES_END; + } else { + start = MODULES_VADDR; + } + + end = MODULES_END; +#else + start = VMALLOC_START; + end = VMALLOC_END; +#endif + + prealloc_execmem_pgtable(); + + execmem_info = (struct execmem_info){ + .ranges = { + [EXECMEM_DEFAULT] = { + .start = start, + .end = end, + .pgprot = prot, + .alignment = 1, + .fallback_start = fallback_start, + .fallback_end = fallback_end, + }, + [EXECMEM_KPROBES] = { + .start = VMALLOC_START, + .end = VMALLOC_END, + .pgprot = kprobes_prot, + .alignment = 1, + }, + [EXECMEM_MODULE_DATA] = { + .start = VMALLOC_START, + .end = VMALLOC_END, + .pgprot = PAGE_KERNEL, + .alignment = 1, + }, + }, + }; + + return &execmem_info; +} +#endif /* CONFIG_EXECMEM */ diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c index b24c19078eb1..3e3af29b4523 100644 --- a/arch/powerpc/mm/mmu_context.c +++ b/arch/powerpc/mm/mmu_context.c @@ -21,7 +21,7 @@ static inline void switch_mm_pgdir(struct task_struct *tsk, #ifdef CONFIG_PPC_BOOK3S_32 tsk->thread.sr0 = mm->context.sr0; #endif -#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP) +#if defined(CONFIG_BOOKE) && defined(CONFIG_PPC_KUAP) tsk->thread.pid = mm->context.id; #endif } diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 8e84bc214d13..b2d1eea09761 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -20,9 +20,9 @@ #include <asm/trace.h> /* - * On 40x and 8xx, we directly inline tlbia and tlbivax + * On 8xx, we directly inline tlbia */ -#if defined(CONFIG_40x) || defined(CONFIG_PPC_8xx) +#ifdef CONFIG_PPC_8xx static inline void _tlbil_all(void) { asm volatile ("sync; tlbia; isync" : : : "memory"); @@ -35,7 +35,7 @@ static inline void _tlbil_pid(unsigned int pid) } #define _tlbil_pid_noind(pid) _tlbil_pid(pid) -#else /* CONFIG_40x || CONFIG_PPC_8xx */ +#else /* CONFIG_PPC_8xx */ extern void _tlbil_all(void); extern void _tlbil_pid(unsigned int pid); #ifdef CONFIG_PPC_BOOK3E_64 @@ -43,7 +43,7 @@ extern void _tlbil_pid_noind(unsigned int pid); #else #define _tlbil_pid_noind(pid) _tlbil_pid(pid) #endif -#endif /* !(CONFIG_40x || CONFIG_PPC_8xx) */ +#endif /* !CONFIG_PPC_8xx */ /* * On 8xx, we directly inline tlbie, on others, it's extern @@ -160,11 +160,11 @@ static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } #endif #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500) -void mmu_mark_initmem_nx(void); -void mmu_mark_rodata_ro(void); +int mmu_mark_initmem_nx(void); +int mmu_mark_rodata_ro(void); #else -static inline void mmu_mark_initmem_nx(void) { } -static inline void mmu_mark_rodata_ro(void) { } +static inline int mmu_mark_initmem_nx(void) { return 0; } +static inline int mmu_mark_rodata_ro(void) { return 0; } #endif #ifdef CONFIG_PPC_8xx diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c deleted file mode 100644 index e835e80c09db..000000000000 --- a/arch/powerpc/mm/nohash/40x.c +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * This file contains the routines for initializing the MMU - * on the 4xx series of chips. - * -- paulus - * - * Derived from arch/ppc/mm/init.c: - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) - * and Cort Dougan (PReP) (cort@cs.nmt.edu) - * Copyright (C) 1996 Paul Mackerras - * - * Derived from "arch/i386/mm/init.c" - * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds - */ - -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/swap.h> -#include <linux/stddef.h> -#include <linux/vmalloc.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/highmem.h> -#include <linux/memblock.h> - -#include <asm/io.h> -#include <asm/mmu_context.h> -#include <asm/mmu.h> -#include <linux/uaccess.h> -#include <asm/smp.h> -#include <asm/bootx.h> -#include <asm/machdep.h> -#include <asm/setup.h> - -#include <mm/mmu_decl.h> - -/* - * MMU_init_hw does the chip-specific initialization of the MMU hardware. - */ -void __init MMU_init_hw(void) -{ - int i; - unsigned long zpr; - - /* - * The Zone Protection Register (ZPR) defines how protection will - * be applied to every page which is a member of a given zone. - * The zone index bits (of ZSEL) in the PTE are used for software - * indicators. We use the 4 upper bits of virtual address to select - * the zone. We set all zones above TASK_SIZE to zero, allowing - * only kernel access as indicated in the PTE. For zones below - * TASK_SIZE, we set a 01 binary (a value of 10 will not work) - * to allow user access as indicated in the PTE. This also allows - * kernel access as indicated in the PTE. - */ - - for (i = 0, zpr = 0; i < TASK_SIZE >> 28; i++) - zpr |= 1 << (30 - i * 2); - - mtspr(SPRN_ZPR, zpr); - - flush_instruction_cache(); - - /* - * Set up the real-mode cache parameters for the exception vector - * handlers (which are run in real-mode). - */ - - mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */ - - /* - * Cache instruction and data space where the exception - * vectors and the kernel live in real-mode. - */ - - mtspr(SPRN_DCCR, 0xFFFF0000); /* 2GByte of data space at 0x0. */ - mtspr(SPRN_ICCR, 0xFFFF0000); /* 2GByte of instr. space at 0x0. */ -} - -#define LARGE_PAGE_SIZE_16M (1<<24) -#define LARGE_PAGE_SIZE_4M (1<<22) - -unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) -{ - unsigned long v, s, mapped; - phys_addr_t p; - - v = KERNELBASE; - p = 0; - s = total_lowmem; - - if (IS_ENABLED(CONFIG_KFENCE)) - return 0; - - if (debug_pagealloc_enabled()) - return 0; - - if (strict_kernel_rwx_enabled()) - return 0; - - while (s >= LARGE_PAGE_SIZE_16M) { - pmd_t *pmdp; - unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW; - - pmdp = pmd_off_k(v); - *pmdp++ = __pmd(val); - *pmdp++ = __pmd(val); - *pmdp++ = __pmd(val); - *pmdp++ = __pmd(val); - - v += LARGE_PAGE_SIZE_16M; - p += LARGE_PAGE_SIZE_16M; - s -= LARGE_PAGE_SIZE_16M; - } - - while (s >= LARGE_PAGE_SIZE_4M) { - pmd_t *pmdp; - unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW; - - pmdp = pmd_off_k(v); - *pmdp = __pmd(val); - - v += LARGE_PAGE_SIZE_4M; - p += LARGE_PAGE_SIZE_4M; - s -= LARGE_PAGE_SIZE_4M; - } - - mapped = total_lowmem - s; - - /* If the size of RAM is not an exact power of two, we may not - * have covered RAM in its entirety with 16 and 4 MiB - * pages. Consequently, restrict the top end of RAM currently - * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail" - * coverage with normal-sized pages (or other reasons) do not - * attempt to allocate outside the allowed range. - */ - memblock_set_current_limit(mapped); - - return mapped; -} - -void setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size) -{ - /* We don't currently support the first MEMBLOCK not mapping 0 - * physical on those processors - */ - BUG_ON(first_memblock_base != 0); - - /* 40x can only access 16MB at the moment (see head_40x.S) */ - memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); -} diff --git a/arch/powerpc/mm/nohash/44x.c b/arch/powerpc/mm/nohash/44x.c index 1beae802bb1c..6d10c6d8be71 100644 --- a/arch/powerpc/mm/nohash/44x.c +++ b/arch/powerpc/mm/nohash/44x.c @@ -24,7 +24,7 @@ #include <asm/mmu.h> #include <asm/page.h> #include <asm/cacheflush.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/smp.h> #include <mm/mmu_decl.h> diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 6be6421086ed..ab1505cf42bf 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -11,6 +11,7 @@ #include <linux/hugetlb.h> #include <asm/fixmap.h> +#include <asm/pgalloc.h> #include <mm/mmu_decl.h> @@ -48,53 +49,47 @@ unsigned long p_block_mapped(phys_addr_t pa) return 0; } -static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va) -{ - if (hpd_val(*pmdp) == 0) { - pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K); - - if (!ptep) - return NULL; - - hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M); - hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M); - } - return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); -} - static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, pgprot_t prot, int psize, bool new) { pmd_t *pmdp = pmd_off_k(va); pte_t *ptep; - - if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M)) - return -EINVAL; + unsigned int shift = mmu_psize_to_shift(psize); if (new) { if (WARN_ON(slab_is_available())) return -EINVAL; - if (psize == MMU_PAGE_512K) + if (psize == MMU_PAGE_8M) { + if (WARN_ON(!pmd_none(*pmdp) || !pmd_none(*(pmdp + 1)))) + return -EINVAL; + + ptep = early_alloc_pgtable(PTE_FRAG_SIZE); + pmd_populate_kernel(&init_mm, pmdp, ptep); + + ptep = early_alloc_pgtable(PTE_FRAG_SIZE); + pmd_populate_kernel(&init_mm, pmdp + 1, ptep); + + ptep = (pte_t *)pmdp; + } else { ptep = early_pte_alloc_kernel(pmdp, va); - else - ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va); + /* The PTE should never be already present */ + if (WARN_ON(pte_present(*ptep) && pgprot_val(prot))) + return -EINVAL; + } } else { - if (psize == MMU_PAGE_512K) - ptep = pte_offset_kernel(pmdp, va); + if (psize == MMU_PAGE_8M) + ptep = (pte_t *)pmdp; else - ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); + ptep = pte_offset_kernel(pmdp, va); } if (WARN_ON(!ptep)) return -ENOMEM; - /* The PTE should never be already present */ - if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot))) - return -EINVAL; - set_huge_pte_at(&init_mm, va, ptep, - pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize); + arch_make_huge_pte(pfn_pte(pa >> PAGE_SHIFT, prot), shift, 0), + 1UL << shift); return 0; } @@ -119,23 +114,30 @@ void __init mmu_mapin_immr(void) PAGE_KERNEL_NCG, MMU_PAGE_512K, true); } -static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, - pgprot_t prot, bool new) +static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, + pgprot_t prot, bool new) { unsigned long v = PAGE_OFFSET + offset; unsigned long p = offset; + int err = 0; - WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); + WARN_ON(!IS_ALIGNED(offset, SZ_16K) || !IS_ALIGNED(top, SZ_16K)); - for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K) - __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); - for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M) - __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); - for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K) - __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN(p, SZ_512K) && p < top && !err; p += SZ_16K, v += SZ_16K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new); + for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); + for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN_DOWN(top, SZ_16K) && p < top && !err; p += SZ_16K, v += SZ_16K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new); if (!new) flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); + + return err; } unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) @@ -150,11 +152,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) mmu_mapin_immr(); - mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true); if (debug_pagealloc_enabled_or_kfence()) { top = boundary; } else { - mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true); mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); } @@ -166,27 +168,34 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) return top; } -void mmu_mark_initmem_nx(void) +int mmu_mark_initmem_nx(void) { unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); unsigned long sinittext = __pa(_sinittext); unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); + int err = 0; if (!debug_pagealloc_enabled_or_kfence()) - mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); + err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); + + if (IS_ENABLED(CONFIG_PIN_TLB_TEXT)) + mmu_pin_tlb(block_mapped_ram, false); - mmu_pin_tlb(block_mapped_ram, false); + return err; } #ifdef CONFIG_STRICT_KERNEL_RWX -void mmu_mark_rodata_ro(void) +int mmu_mark_rodata_ro(void) { unsigned long sinittext = __pa(_sinittext); + int err; - mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); + err = mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) mmu_pin_tlb(block_mapped_ram, true); + + return err; } #endif @@ -200,6 +209,8 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, /* 8xx can only access 32MB at the moment */ memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M)); + + BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE) < TASK_SIZE); } int pud_clear_huge(pud_t *pud) diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile index f3894e79d5f7..cf60c776c883 100644 --- a/arch/powerpc/mm/nohash/Makefile +++ b/arch/powerpc/mm/nohash/Makefile @@ -1,10 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) - obj-y += mmu_context.o tlb.o tlb_low.o kup.o -obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o -obj-$(CONFIG_40x) += 40x.o +obj-$(CONFIG_PPC_BOOK3E_64) += tlb_64e.o tlb_low_64e.o book3e_pgtable.o obj-$(CONFIG_44x) += 44x.o obj-$(CONFIG_PPC_8xx) += 8xx.o obj-$(CONFIG_PPC_E500) += e500.o diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c index 1c5e4ecbebeb..062e8785c1bb 100644 --- a/arch/powerpc/mm/nohash/book3e_pgtable.c +++ b/arch/powerpc/mm/nohash/book3e_pgtable.c @@ -10,7 +10,7 @@ #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/dma.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <mm/mmu_decl.h> @@ -29,10 +29,10 @@ int __meminit vmemmap_create_mapping(unsigned long start, _PAGE_KERNEL_RW; /* PTEs only contain page size encodings up to 32M */ - BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); + BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].shift - 10 > 0xf); /* Encode the size in the PTE */ - flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; + flags |= (mmu_psize_defs[mmu_vmemmap_psize].shift - 10) << 8; /* For each PTE for that area, map things. Note that we don't * increment phys because all PTEs are of the large size and diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c index 921c3521ec11..266fb22131fc 100644 --- a/arch/powerpc/mm/nohash/e500.c +++ b/arch/powerpc/mm/nohash/e500.c @@ -285,19 +285,23 @@ void __init adjust_total_lowmem(void) } #ifdef CONFIG_STRICT_KERNEL_RWX -void mmu_mark_rodata_ro(void) +int mmu_mark_rodata_ro(void) { unsigned long remapped; remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); - WARN_ON(__max_low_memory != remapped); + if (WARN_ON(__max_low_memory != remapped)) + return -EINVAL; + + return 0; } #endif -void mmu_mark_initmem_nx(void) +int mmu_mark_initmem_nx(void) { /* Everything is done in mmu_mark_rodata_ro() */ + return 0; } void setup_initial_memory_limit(phys_addr_t first_memblock_base, diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index cdff129abb14..5c8d1bb98b3e 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -376,7 +376,7 @@ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size) create_kaslr_tlb_entry(1, tlb_virt, tlb_phys); } - /* Copy the kernel to it's new location and run */ + /* Copy the kernel to its new location and run */ memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz); flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz); diff --git a/arch/powerpc/mm/nohash/kup.c b/arch/powerpc/mm/nohash/kup.c index e1f7de2e54ec..c20c4f357fbf 100644 --- a/arch/powerpc/mm/nohash/kup.c +++ b/arch/powerpc/mm/nohash/kup.c @@ -15,8 +15,6 @@ void setup_kuap(bool disabled) { if (disabled) { - if (IS_ENABLED(CONFIG_40x)) - disable_kuep = true; if (smp_processor_id() == boot_cpuid) cur_cpu_spec->mmu_features &= ~MMU_FTR_KUAP; return; diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c index ccd5819b1bd9..a1a4e697251a 100644 --- a/arch/powerpc/mm/nohash/mmu_context.c +++ b/arch/powerpc/mm/nohash/mmu_context.c @@ -219,9 +219,6 @@ static void set_context(unsigned long id, pgd_t *pgd) /* sync */ mb(); } else if (kuap_is_disabled()) { - if (IS_ENABLED(CONFIG_40x)) - mb(); /* sync */ - mtspr(SPRN_PID, id); isync(); } @@ -306,7 +303,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, if (IS_ENABLED(CONFIG_BDI_SWITCH)) abatron_pteptrs[1] = next->pgd; set_context(id, next->pgd); -#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP) +#if defined(CONFIG_BOOKE) && defined(CONFIG_PPC_KUAP) tsk->thread.pid = id; #endif raw_spin_unlock(&context_lock); @@ -388,21 +385,11 @@ void __init mmu_context_init(void) /* * Allocate the maps used by context management */ - context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); - if (!context_map) - panic("%s: Failed to allocate %zu bytes\n", __func__, - CTX_MAP_SIZE); - context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), + context_map = memblock_alloc_or_panic(CTX_MAP_SIZE, SMP_CACHE_BYTES); + context_mm = memblock_alloc_or_panic(sizeof(void *) * (LAST_CONTEXT + 1), SMP_CACHE_BYTES); - if (!context_mm) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(void *) * (LAST_CONTEXT + 1)); if (IS_ENABLED(CONFIG_SMP)) { - stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); - if (!stale_map[boot_cpuid]) - panic("%s: Failed to allocate %zu bytes\n", __func__, - CTX_MAP_SIZE); - + stale_map[boot_cpuid] = memblock_alloc_or_panic(CTX_MAP_SIZE, SMP_CACHE_BYTES); cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, "powerpc/mmu/ctx:prepare", mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead); diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index 5ffa0af4328a..0a650742f3a0 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -37,7 +37,7 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/tlb.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/cputhreads.h> #include <asm/hugetlb.h> #include <asm/paca.h> @@ -53,37 +53,30 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, - .enc = BOOK3E_PAGESZ_4K, }, [MMU_PAGE_2M] = { .shift = 21, - .enc = BOOK3E_PAGESZ_2M, }, [MMU_PAGE_4M] = { .shift = 22, - .enc = BOOK3E_PAGESZ_4M, }, [MMU_PAGE_16M] = { .shift = 24, - .enc = BOOK3E_PAGESZ_16M, }, [MMU_PAGE_64M] = { .shift = 26, - .enc = BOOK3E_PAGESZ_64M, }, [MMU_PAGE_256M] = { .shift = 28, - .enc = BOOK3E_PAGESZ_256M, }, [MMU_PAGE_1G] = { .shift = 30, - .enc = BOOK3E_PAGESZ_1GB, }, }; static inline int mmu_get_tsize(int psize) { - return mmu_psize_defs[psize].enc; + return mmu_psize_defs[psize].shift - 10; } #else static inline int mmu_get_tsize(int psize) @@ -110,28 +103,6 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { }; #endif -/* The variables below are currently only used on 64-bit Book3E - * though this will probably be made common with other nohash - * implementations at some point - */ -#ifdef CONFIG_PPC64 - -int mmu_pte_psize; /* Page size used for PTE pages */ -int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ -int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ -unsigned long linear_map_top; /* Top of linear mapping */ - - -/* - * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug - * exceptions. This is used for bolted and e6500 TLB miss handlers which - * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, - * this is set to zero. - */ -int extlb_level_exc; - -#endif /* CONFIG_PPC64 */ - #ifdef CONFIG_PPC_E500 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ DEFINE_PER_CPU(int, next_tlbcam_idx); @@ -358,381 +329,7 @@ void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm(tlb->mm); } -/* - * Below are functions specific to the 64-bit variant of Book3E though that - * may change in the future - */ - -#ifdef CONFIG_PPC64 - -/* - * Handling of virtual linear page tables or indirect TLB entries - * flushing when PTE pages are freed - */ -void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) -{ - int tsize = mmu_psize_defs[mmu_pte_psize].enc; - - if (book3e_htw_mode != PPC_HTW_NONE) { - unsigned long start = address & PMD_MASK; - unsigned long end = address + PMD_SIZE; - unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; - - /* This isn't the most optimal, ideally we would factor out the - * while preempt & CPU mask mucking around, or even the IPI but - * it will do for now - */ - while (start < end) { - __flush_tlb_page(tlb->mm, start, tsize, 1); - start += size; - } - } else { - unsigned long rmask = 0xf000000000000000ul; - unsigned long rid = (address & rmask) | 0x1000000000000000ul; - unsigned long vpte = address & ~rmask; - - vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; - vpte |= rid; - __flush_tlb_page(tlb->mm, vpte, tsize, 0); - } -} - -static void __init setup_page_sizes(void) -{ - unsigned int tlb0cfg; - unsigned int tlb0ps; - unsigned int eptcfg; - int i, psize; - -#ifdef CONFIG_PPC_E500 - unsigned int mmucfg = mfspr(SPRN_MMUCFG); - int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); - - if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { - unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); - unsigned int min_pg, max_pg; - - min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; - max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; - - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { - struct mmu_psize_def *def; - unsigned int shift; - - def = &mmu_psize_defs[psize]; - shift = def->shift; - - if (shift == 0 || shift & 1) - continue; - - /* adjust to be in terms of 4^shift Kb */ - shift = (shift - 10) >> 1; - - if ((shift >= min_pg) && (shift <= max_pg)) - def->flags |= MMU_PAGE_SIZE_DIRECT; - } - - goto out; - } - - if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { - u32 tlb1cfg, tlb1ps; - - tlb0cfg = mfspr(SPRN_TLB0CFG); - tlb1cfg = mfspr(SPRN_TLB1CFG); - tlb1ps = mfspr(SPRN_TLB1PS); - eptcfg = mfspr(SPRN_EPTCFG); - - if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) - book3e_htw_mode = PPC_HTW_E6500; - - /* - * We expect 4K subpage size and unrestricted indirect size. - * The lack of a restriction on indirect size is a Freescale - * extension, indicated by PSn = 0 but SPSn != 0. - */ - if (eptcfg != 2) - book3e_htw_mode = PPC_HTW_NONE; - - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { - struct mmu_psize_def *def = &mmu_psize_defs[psize]; - - if (!def->shift) - continue; - - if (tlb1ps & (1U << (def->shift - 10))) { - def->flags |= MMU_PAGE_SIZE_DIRECT; - - if (book3e_htw_mode && psize == MMU_PAGE_2M) - def->flags |= MMU_PAGE_SIZE_INDIRECT; - } - } - - goto out; - } -#endif - - tlb0cfg = mfspr(SPRN_TLB0CFG); - tlb0ps = mfspr(SPRN_TLB0PS); - eptcfg = mfspr(SPRN_EPTCFG); - - /* Look for supported direct sizes */ - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { - struct mmu_psize_def *def = &mmu_psize_defs[psize]; - - if (tlb0ps & (1U << (def->shift - 10))) - def->flags |= MMU_PAGE_SIZE_DIRECT; - } - - /* Indirect page sizes supported ? */ - if ((tlb0cfg & TLBnCFG_IND) == 0 || - (tlb0cfg & TLBnCFG_PT) == 0) - goto out; - - book3e_htw_mode = PPC_HTW_IBM; - - /* Now, we only deal with one IND page size for each - * direct size. Hopefully all implementations today are - * unambiguous, but we might want to be careful in the - * future. - */ - for (i = 0; i < 3; i++) { - unsigned int ps, sps; - - sps = eptcfg & 0x1f; - eptcfg >>= 5; - ps = eptcfg & 0x1f; - eptcfg >>= 5; - if (!ps || !sps) - continue; - for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { - struct mmu_psize_def *def = &mmu_psize_defs[psize]; - - if (ps == (def->shift - 10)) - def->flags |= MMU_PAGE_SIZE_INDIRECT; - if (sps == (def->shift - 10)) - def->ind = ps + 10; - } - } - -out: - /* Cleanup array and print summary */ - pr_info("MMU: Supported page sizes\n"); - for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { - struct mmu_psize_def *def = &mmu_psize_defs[psize]; - const char *__page_type_names[] = { - "unsupported", - "direct", - "indirect", - "direct & indirect" - }; - if (def->flags == 0) { - def->shift = 0; - continue; - } - pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), - __page_type_names[def->flags & 0x3]); - } -} - -static void __init setup_mmu_htw(void) -{ - /* - * If we want to use HW tablewalk, enable it by patching the TLB miss - * handlers to branch to the one dedicated to it. - */ - - switch (book3e_htw_mode) { - case PPC_HTW_IBM: - patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); - patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); - break; -#ifdef CONFIG_PPC_E500 - case PPC_HTW_E6500: - extlb_level_exc = EX_TLB_SIZE; - patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); - patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); - break; -#endif - } - pr_info("MMU: Book3E HW tablewalk %s\n", - book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); -} - -/* - * Early initialization of the MMU TLB code - */ -static void early_init_this_mmu(void) -{ - unsigned int mas4; - - /* Set MAS4 based on page table setting */ - - mas4 = 0x4 << MAS4_WIMGED_SHIFT; - switch (book3e_htw_mode) { - case PPC_HTW_E6500: - mas4 |= MAS4_INDD; - mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; - mas4 |= MAS4_TLBSELD(1); - mmu_pte_psize = MMU_PAGE_2M; - break; - - case PPC_HTW_IBM: - mas4 |= MAS4_INDD; - mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; - mmu_pte_psize = MMU_PAGE_1M; - break; - - case PPC_HTW_NONE: - mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; - mmu_pte_psize = mmu_virtual_psize; - break; - } - mtspr(SPRN_MAS4, mas4); - -#ifdef CONFIG_PPC_E500 - if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { - unsigned int num_cams; - bool map = true; - - /* use a quarter of the TLBCAM for bolted linear map */ - num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; - - /* - * Only do the mapping once per core, or else the - * transient mapping would cause problems. - */ -#ifdef CONFIG_SMP - if (hweight32(get_tensr()) > 1) - map = false; -#endif - - if (map) - linear_map_top = map_mem_in_cams(linear_map_top, - num_cams, false, true); - } -#endif - - /* A sync won't hurt us after mucking around with - * the MMU configuration - */ - mb(); -} - -static void __init early_init_mmu_global(void) -{ - /* XXX This should be decided at runtime based on supported - * page sizes in the TLB, but for now let's assume 16M is - * always there and a good fit (which it probably is) - * - * Freescale booke only supports 4K pages in TLB0, so use that. - */ - if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) - mmu_vmemmap_psize = MMU_PAGE_4K; - else - mmu_vmemmap_psize = MMU_PAGE_16M; - - /* XXX This code only checks for TLB 0 capabilities and doesn't - * check what page size combos are supported by the HW. It - * also doesn't handle the case where a separate array holds - * the IND entries from the array loaded by the PT. - */ - /* Look for supported page sizes */ - setup_page_sizes(); - - /* Look for HW tablewalk support */ - setup_mmu_htw(); - -#ifdef CONFIG_PPC_E500 - if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { - if (book3e_htw_mode == PPC_HTW_NONE) { - extlb_level_exc = EX_TLB_SIZE; - patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); - patch_exception(0x1e0, - exc_instruction_tlb_miss_bolted_book3e); - } - } -#endif - - /* Set the global containing the top of the linear mapping - * for use by the TLB miss code - */ - linear_map_top = memblock_end_of_DRAM(); - - ioremap_bot = IOREMAP_BASE; -} - -static void __init early_mmu_set_memory_limit(void) -{ -#ifdef CONFIG_PPC_E500 - if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { - /* - * Limit memory so we dont have linear faults. - * Unlike memblock_set_current_limit, which limits - * memory available during early boot, this permanently - * reduces the memory available to Linux. We need to - * do this because highmem is not supported on 64-bit. - */ - memblock_enforce_memory_limit(linear_map_top); - } -#endif - - memblock_set_current_limit(linear_map_top); -} - -/* boot cpu only */ -void __init early_init_mmu(void) -{ - early_init_mmu_global(); - early_init_this_mmu(); - early_mmu_set_memory_limit(); -} - -void early_init_mmu_secondary(void) -{ - early_init_this_mmu(); -} - -void setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size) -{ - /* On non-FSL Embedded 64-bit, we adjust the RMA size to match - * the bolted TLB entry. We know for now that only 1G - * entries are supported though that may eventually - * change. - * - * on FSL Embedded 64-bit, usually all RAM is bolted, but with - * unusual memory sizes it's possible for some RAM to not be mapped - * (such RAM is not used at all by Linux, since we don't support - * highmem on 64-bit). We limit ppc64_rma_size to what would be - * mappable if this memblock is the only one. Additional memblocks - * can only increase, not decrease, the amount that ends up getting - * mapped. We still limit max to 1G even if we'll eventually map - * more. This is due to what the early init code is set up to do. - * - * We crop it to the size of the first MEMBLOCK to - * avoid going over total available memory just in case... - */ -#ifdef CONFIG_PPC_E500 - if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { - unsigned long linear_sz; - unsigned int num_cams; - - /* use a quarter of the TLBCAM for bolted linear map */ - num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; - - linear_sz = map_mem_in_cams(first_memblock_size, num_cams, - true, true); - - ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); - } else -#endif - ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); - - /* Finally limit subsequent allocations */ - memblock_set_current_limit(first_memblock_base + ppc64_rma_size); -} -#else /* ! CONFIG_PPC64 */ +#ifndef CONFIG_PPC64 void __init early_init_mmu(void) { unsigned long root = of_get_flat_dt_root(); diff --git a/arch/powerpc/mm/nohash/tlb_64e.c b/arch/powerpc/mm/nohash/tlb_64e.c new file mode 100644 index 000000000000..4f925adf2695 --- /dev/null +++ b/arch/powerpc/mm/nohash/tlb_64e.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org> + * IBM Corp. + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/pagemap.h> +#include <linux/memblock.h> + +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> +#include <asm/tlb.h> +#include <asm/text-patching.h> +#include <asm/cputhreads.h> + +#include <mm/mmu_decl.h> + +/* The variables below are currently only used on 64-bit Book3E + * though this will probably be made common with other nohash + * implementations at some point + */ +static int mmu_pte_psize; /* Page size used for PTE pages */ +int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ +int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ +unsigned long linear_map_top; /* Top of linear mapping */ + + +/* + * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug + * exceptions. This is used for bolted and e6500 TLB miss handlers which + * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, + * this is set to zero. + */ +int extlb_level_exc; + +/* + * Handling of virtual linear page tables or indirect TLB entries + * flushing when PTE pages are freed + */ +void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) +{ + int tsize = mmu_psize_defs[mmu_pte_psize].shift - 10; + + if (book3e_htw_mode != PPC_HTW_NONE) { + unsigned long start = address & PMD_MASK; + unsigned long end = address + PMD_SIZE; + unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; + + /* This isn't the most optimal, ideally we would factor out the + * while preempt & CPU mask mucking around, or even the IPI but + * it will do for now + */ + while (start < end) { + __flush_tlb_page(tlb->mm, start, tsize, 1); + start += size; + } + } else { + unsigned long rmask = 0xf000000000000000ul; + unsigned long rid = (address & rmask) | 0x1000000000000000ul; + unsigned long vpte = address & ~rmask; + + vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; + vpte |= rid; + __flush_tlb_page(tlb->mm, vpte, tsize, 0); + } +} + +static void __init setup_page_sizes(void) +{ + unsigned int tlb0cfg; + unsigned int eptcfg; + int psize; + + unsigned int mmucfg = mfspr(SPRN_MMUCFG); + + if ((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { + unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); + unsigned int min_pg, max_pg; + + min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; + max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { + struct mmu_psize_def *def; + unsigned int shift; + + def = &mmu_psize_defs[psize]; + shift = def->shift; + + if (shift == 0 || shift & 1) + continue; + + /* adjust to be in terms of 4^shift Kb */ + shift = (shift - 10) >> 1; + + if ((shift >= min_pg) && (shift <= max_pg)) + def->flags |= MMU_PAGE_SIZE_DIRECT; + } + + goto out; + } + + if ((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { + u32 tlb1cfg, tlb1ps; + + tlb0cfg = mfspr(SPRN_TLB0CFG); + tlb1cfg = mfspr(SPRN_TLB1CFG); + tlb1ps = mfspr(SPRN_TLB1PS); + eptcfg = mfspr(SPRN_EPTCFG); + + if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) + book3e_htw_mode = PPC_HTW_E6500; + + /* + * We expect 4K subpage size and unrestricted indirect size. + * The lack of a restriction on indirect size is a Freescale + * extension, indicated by PSn = 0 but SPSn != 0. + */ + if (eptcfg != 2) + book3e_htw_mode = PPC_HTW_NONE; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { + struct mmu_psize_def *def = &mmu_psize_defs[psize]; + + if (!def->shift) + continue; + + if (tlb1ps & (1U << (def->shift - 10))) { + def->flags |= MMU_PAGE_SIZE_DIRECT; + + if (book3e_htw_mode && psize == MMU_PAGE_2M) + def->flags |= MMU_PAGE_SIZE_INDIRECT; + } + } + + goto out; + } +out: + /* Cleanup array and print summary */ + pr_info("MMU: Supported page sizes\n"); + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { + struct mmu_psize_def *def = &mmu_psize_defs[psize]; + const char *__page_type_names[] = { + "unsupported", + "direct", + "indirect", + "direct & indirect" + }; + if (def->flags == 0) { + def->shift = 0; + continue; + } + pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10), + __page_type_names[def->flags & 0x3]); + } +} + +/* + * Early initialization of the MMU TLB code + */ +static void early_init_this_mmu(void) +{ + unsigned int mas4; + + /* Set MAS4 based on page table setting */ + + mas4 = 0x4 << MAS4_WIMGED_SHIFT; + switch (book3e_htw_mode) { + case PPC_HTW_E6500: + mas4 |= MAS4_INDD; + mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; + mas4 |= MAS4_TLBSELD(1); + mmu_pte_psize = MMU_PAGE_2M; + break; + + case PPC_HTW_NONE: + mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; + mmu_pte_psize = mmu_virtual_psize; + break; + } + mtspr(SPRN_MAS4, mas4); + + unsigned int num_cams; + bool map = true; + + /* use a quarter of the TLBCAM for bolted linear map */ + num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; + + /* + * Only do the mapping once per core, or else the + * transient mapping would cause problems. + */ +#ifdef CONFIG_SMP + if (hweight32(get_tensr()) > 1) + map = false; +#endif + + if (map) + linear_map_top = map_mem_in_cams(linear_map_top, + num_cams, false, true); + + /* A sync won't hurt us after mucking around with + * the MMU configuration + */ + mb(); +} + +static void __init early_init_mmu_global(void) +{ + /* + * Freescale booke only supports 4K pages in TLB0, so use that. + */ + mmu_vmemmap_psize = MMU_PAGE_4K; + + /* XXX This code only checks for TLB 0 capabilities and doesn't + * check what page size combos are supported by the HW. It + * also doesn't handle the case where a separate array holds + * the IND entries from the array loaded by the PT. + */ + /* Look for supported page sizes */ + setup_page_sizes(); + + /* + * If we want to use HW tablewalk, enable it by patching the TLB miss + * handlers to branch to the one dedicated to it. + */ + extlb_level_exc = EX_TLB_SIZE; + switch (book3e_htw_mode) { + case PPC_HTW_E6500: + patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); + patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); + break; + } + + pr_info("MMU: Book3E HW tablewalk %s\n", + book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); + + /* Set the global containing the top of the linear mapping + * for use by the TLB miss code + */ + linear_map_top = memblock_end_of_DRAM(); + + ioremap_bot = IOREMAP_BASE; +} + +static void __init early_mmu_set_memory_limit(void) +{ + /* + * Limit memory so we dont have linear faults. + * Unlike memblock_set_current_limit, which limits + * memory available during early boot, this permanently + * reduces the memory available to Linux. We need to + * do this because highmem is not supported on 64-bit. + */ + memblock_enforce_memory_limit(linear_map_top); + + memblock_set_current_limit(linear_map_top); +} + +/* boot cpu only */ +void __init early_init_mmu(void) +{ + early_init_mmu_global(); + early_init_this_mmu(); + early_mmu_set_memory_limit(); +} + +void early_init_mmu_secondary(void) +{ + early_init_this_mmu(); +} + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* + * On FSL Embedded 64-bit, usually all RAM is bolted, but with + * unusual memory sizes it's possible for some RAM to not be mapped + * (such RAM is not used at all by Linux, since we don't support + * highmem on 64-bit). We limit ppc64_rma_size to what would be + * mappable if this memblock is the only one. Additional memblocks + * can only increase, not decrease, the amount that ends up getting + * mapped. We still limit max to 1G even if we'll eventually map + * more. This is due to what the early init code is set up to do. + * + * We crop it to the size of the first MEMBLOCK to + * avoid going over total available memory just in case... + */ + unsigned long linear_sz; + unsigned int num_cams; + + /* use a quarter of the TLBCAM for bolted linear map */ + num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; + + linear_sz = map_mem_in_cams(first_memblock_size, num_cams, true, true); + ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); + + /* Finally limit subsequent allocations */ + memblock_set_current_limit(first_memblock_base + ppc64_rma_size); +} diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index e1199608ff4d..c4d296e73731 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -32,32 +32,7 @@ #include <asm/asm-compat.h> #include <asm/feature-fixups.h> -#if defined(CONFIG_40x) - -/* - * 40x implementation needs only tlbil_va - */ -_GLOBAL(__tlbil_va) - /* We run the search with interrupts disabled because we have to change - * the PID and I don't want to preempt when that happens. - */ - mfmsr r5 - mfspr r6,SPRN_PID - wrteei 0 - mtspr SPRN_PID,r4 - tlbsx. r3, 0, r3 - mtspr SPRN_PID,r6 - wrtee r5 - bne 1f - sync - /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is - * clear. Since 25 is the V bit in the TLB_TAG, loading this value - * will invalidate the TLB entry. */ - tlbwe r3, r3, TLB_TAG - isync -1: blr - -#elif defined(CONFIG_PPC_8xx) +#if defined(CONFIG_PPC_8xx) /* * Nothing to do for 8xx, everything is inline diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index 7e0b8fe1c279..de568297d5c5 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -450,11 +450,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SMT) tlb_miss_huge_e6500: beq tlb_miss_fault_e6500 - li r10,1 - andi. r15,r14,HUGEPD_SHIFT_MASK@l /* r15 = psize */ - rldimi r14,r10,63,0 /* Set PD_HUGE */ - xor r14,r14,r15 /* Clear size bits */ - ldx r14,0,r14 + rlwinm r15,r14,32-_PAGE_PSIZE_SHIFT,0x1e /* * Now we build the MAS for a huge page. @@ -465,7 +461,6 @@ tlb_miss_huge_e6500: * MAS 2,3+7: Needs to be redone similar to non-tablewalk handler */ - subi r15,r15,10 /* Convert psize to tsize */ mfspr r10,SPRN_MAS1 rlwinm r10,r10,0,~MAS1_IND rlwimi r10,r15,MAS1_TSIZE_SHIFT,MAS1_TSIZE_MASK @@ -511,232 +506,6 @@ itlb_miss_fault_e6500: tlb_epilog_bolted b exc_instruction_storage_book3e -/********************************************************************** - * * - * TLB miss handling for Book3E with TLB reservation and HES support * - * * - **********************************************************************/ - - -/* Data TLB miss */ - START_EXCEPTION(data_tlb_miss) - TLB_MISS_PROLOG - - /* Now we handle the fault proper. We only save DEAR in normal - * fault case since that's the only interesting values here. - * We could probably also optimize by not saving SRR0/1 in the - * linear mapping case but I'll leave that for later - */ - mfspr r14,SPRN_ESR - mfspr r16,SPRN_DEAR /* get faulting address */ - srdi r15,r16,44 /* get region */ - xoris r15,r15,0xc - cmpldi cr0,r15,0 /* linear mapping ? */ - beq tlb_load_linear /* yes -> go to linear map load */ - cmpldi cr1,r15,1 /* vmalloc mapping ? */ - - /* The page tables are mapped virtually linear. At this point, though, - * we don't know whether we are trying to fault in a first level - * virtual address or a virtual page table address. We can get that - * from bit 0x1 of the region ID which we have set for a page table - */ - andis. r10,r15,0x1 - bne- virt_page_table_tlb_miss - - std r14,EX_TLB_ESR(r12); /* save ESR */ - std r16,EX_TLB_DEAR(r12); /* save DEAR */ - - /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ - li r11,_PAGE_PRESENT - oris r11,r11,_PAGE_ACCESSED@h - - /* We do the user/kernel test for the PID here along with the RW test - */ - srdi. r15,r16,60 /* Check for user region */ - - /* We pre-test some combination of permissions to avoid double - * faults: - * - * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE - * ESR_ST is 0x00800000 - * _PAGE_BAP_SW is 0x00000010 - * So the shift is >> 19. This tests for supervisor writeability. - * If the page happens to be supervisor writeable and not user - * writeable, we will take a new fault later, but that should be - * a rare enough case. - * - * We also move ESR_ST in _PAGE_DIRTY position - * _PAGE_DIRTY is 0x00001000 so the shift is >> 11 - * - * MAS1 is preset for all we need except for TID that needs to - * be cleared for kernel translations - */ - rlwimi r11,r14,32-19,27,27 - rlwimi r11,r14,32-16,19,19 - beq normal_tlb_miss_user - /* XXX replace the RMW cycles with immediate loads + writes */ -1: mfspr r10,SPRN_MAS1 - rlwinm r10,r10,0,16,1 /* Clear TID */ - mtspr SPRN_MAS1,r10 - beq+ cr1,normal_tlb_miss - - /* We got a crappy address, just fault with whatever DEAR and ESR - * are here - */ - TLB_MISS_EPILOG_ERROR - b exc_data_storage_book3e - -/* Instruction TLB miss */ - START_EXCEPTION(instruction_tlb_miss) - TLB_MISS_PROLOG - - /* If we take a recursive fault, the second level handler may need - * to know whether we are handling a data or instruction fault in - * order to get to the right store fault handler. We provide that - * info by writing a crazy value in ESR in our exception frame - */ - li r14,-1 /* store to exception frame is done later */ - - /* Now we handle the fault proper. We only save DEAR in the non - * linear mapping case since we know the linear mapping case will - * not re-enter. We could indeed optimize and also not save SRR0/1 - * in the linear mapping case but I'll leave that for later - * - * Faulting address is SRR0 which is already in r16 - */ - srdi r15,r16,44 /* get region */ - xoris r15,r15,0xc - cmpldi cr0,r15,0 /* linear mapping ? */ - beq tlb_load_linear /* yes -> go to linear map load */ - cmpldi cr1,r15,1 /* vmalloc mapping ? */ - - /* We do the user/kernel test for the PID here along with the RW test - */ - li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */ - oris r11,r11,_PAGE_ACCESSED@h - - srdi. r15,r16,60 /* Check for user region */ - std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */ - beq normal_tlb_miss_user - - li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */ - oris r11,r11,_PAGE_ACCESSED@h - /* XXX replace the RMW cycles with immediate loads + writes */ - mfspr r10,SPRN_MAS1 - rlwinm r10,r10,0,16,1 /* Clear TID */ - mtspr SPRN_MAS1,r10 - beq+ cr1,normal_tlb_miss - - /* We got a crappy address, just fault */ - TLB_MISS_EPILOG_ERROR - b exc_instruction_storage_book3e - -/* - * This is the guts of the first-level TLB miss handler for direct - * misses. We are entered with: - * - * r16 = faulting address - * r15 = region ID - * r14 = crap (free to use) - * r13 = PACA - * r12 = TLB exception frame in PACA - * r11 = PTE permission mask - * r10 = crap (free to use) - */ -normal_tlb_miss_user: -#ifdef CONFIG_PPC_KUAP - mfspr r14,SPRN_MAS1 - rlwinm. r14,r14,0,0x3fff0000 - beq- normal_tlb_miss_access_fault /* KUAP fault */ -#endif -normal_tlb_miss: - /* So we first construct the page table address. We do that by - * shifting the bottom of the address (not the region ID) by - * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and - * or'ing the fourth high bit. - * - * NOTE: For 64K pages, we do things slightly differently in - * order to handle the weird page table format used by linux - */ - srdi r15,r16,44 - oris r10,r15,0x1 - rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4 - sldi r15,r10,44 - clrrdi r14,r14,19 - or r10,r15,r14 - - ld r14,0(r10) - -finish_normal_tlb_miss: - /* Check if required permissions are met */ - andc. r15,r11,r14 - bne- normal_tlb_miss_access_fault - - /* Now we build the MAS: - * - * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG - * MAS 1 : Almost fully setup - * - PID already updated by caller if necessary - * - TSIZE need change if !base page size, not - * yet implemented for now - * MAS 2 : Defaults not useful, need to be redone - * MAS 3+7 : Needs to be done - * - * TODO: mix up code below for better scheduling - */ - clrrdi r10,r16,12 /* Clear low crap in EA */ - rlwimi r10,r14,32-19,27,31 /* Insert WIMGE */ - mtspr SPRN_MAS2,r10 - - /* Check page size, if not standard, update MAS1 */ - rldicl r10,r14,64-8,64-8 - cmpldi cr0,r10,BOOK3E_PAGESZ_4K - beq- 1f - mfspr r11,SPRN_MAS1 - rlwimi r11,r14,31,21,24 - rlwinm r11,r11,0,21,19 - mtspr SPRN_MAS1,r11 -1: - /* Move RPN in position */ - rldicr r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT - clrldi r15,r11,12 /* Clear crap at the top */ - rlwimi r15,r14,32-8,22,25 /* Move in U bits */ - rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */ - - /* Mask out SW and UW if !DIRTY (XXX optimize this !) */ - andi. r11,r14,_PAGE_DIRTY - bne 1f - li r11,MAS3_SW|MAS3_UW - andc r15,r15,r11 -1: - srdi r16,r15,32 - mtspr SPRN_MAS3,r15 - mtspr SPRN_MAS7,r16 - - tlbwe - -normal_tlb_miss_done: - /* We don't bother with restoring DEAR or ESR since we know we are - * level 0 and just going back to userland. They are only needed - * if you are going to take an access fault - */ - TLB_MISS_EPILOG_SUCCESS - rfi - -normal_tlb_miss_access_fault: - /* We need to check if it was an instruction miss */ - andi. r10,r11,_PAGE_BAP_UX - bne 1f - ld r14,EX_TLB_DEAR(r12) - ld r15,EX_TLB_ESR(r12) - mtspr SPRN_DEAR,r14 - mtspr SPRN_ESR,r15 - TLB_MISS_EPILOG_ERROR - b exc_data_storage_book3e -1: TLB_MISS_EPILOG_ERROR - b exc_instruction_storage_book3e - - /* * This is the guts of the second-level TLB miss handler for direct * misses. We are entered with: @@ -893,201 +662,6 @@ virt_page_table_tlb_miss_whacko_fault: TLB_MISS_EPILOG_ERROR b exc_data_storage_book3e - -/************************************************************** - * * - * TLB miss handling for Book3E with hw page table support * - * * - **************************************************************/ - - -/* Data TLB miss */ - START_EXCEPTION(data_tlb_miss_htw) - TLB_MISS_PROLOG - - /* Now we handle the fault proper. We only save DEAR in normal - * fault case since that's the only interesting values here. - * We could probably also optimize by not saving SRR0/1 in the - * linear mapping case but I'll leave that for later - */ - mfspr r14,SPRN_ESR - mfspr r16,SPRN_DEAR /* get faulting address */ - srdi r11,r16,44 /* get region */ - xoris r11,r11,0xc - cmpldi cr0,r11,0 /* linear mapping ? */ - beq tlb_load_linear /* yes -> go to linear map load */ - cmpldi cr1,r11,1 /* vmalloc mapping ? */ - - /* We do the user/kernel test for the PID here along with the RW test - */ - srdi. r11,r16,60 /* Check for user region */ - ld r15,PACAPGD(r13) /* Load user pgdir */ - beq htw_tlb_miss - - /* XXX replace the RMW cycles with immediate loads + writes */ -1: mfspr r10,SPRN_MAS1 - rlwinm r10,r10,0,16,1 /* Clear TID */ - mtspr SPRN_MAS1,r10 - ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ - beq+ cr1,htw_tlb_miss - - /* We got a crappy address, just fault with whatever DEAR and ESR - * are here - */ - TLB_MISS_EPILOG_ERROR - b exc_data_storage_book3e - -/* Instruction TLB miss */ - START_EXCEPTION(instruction_tlb_miss_htw) - TLB_MISS_PROLOG - - /* If we take a recursive fault, the second level handler may need - * to know whether we are handling a data or instruction fault in - * order to get to the right store fault handler. We provide that - * info by keeping a crazy value for ESR in r14 - */ - li r14,-1 /* store to exception frame is done later */ - - /* Now we handle the fault proper. We only save DEAR in the non - * linear mapping case since we know the linear mapping case will - * not re-enter. We could indeed optimize and also not save SRR0/1 - * in the linear mapping case but I'll leave that for later - * - * Faulting address is SRR0 which is already in r16 - */ - srdi r11,r16,44 /* get region */ - xoris r11,r11,0xc - cmpldi cr0,r11,0 /* linear mapping ? */ - beq tlb_load_linear /* yes -> go to linear map load */ - cmpldi cr1,r11,1 /* vmalloc mapping ? */ - - /* We do the user/kernel test for the PID here along with the RW test - */ - srdi. r11,r16,60 /* Check for user region */ - ld r15,PACAPGD(r13) /* Load user pgdir */ - beq htw_tlb_miss - - /* XXX replace the RMW cycles with immediate loads + writes */ -1: mfspr r10,SPRN_MAS1 - rlwinm r10,r10,0,16,1 /* Clear TID */ - mtspr SPRN_MAS1,r10 - ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ - beq+ htw_tlb_miss - - /* We got a crappy address, just fault */ - TLB_MISS_EPILOG_ERROR - b exc_instruction_storage_book3e - - -/* - * This is the guts of the second-level TLB miss handler for direct - * misses. We are entered with: - * - * r16 = virtual page table faulting address - * r15 = PGD pointer - * r14 = ESR - * r13 = PACA - * r12 = TLB exception frame in PACA - * r11 = crap (free to use) - * r10 = crap (free to use) - * - * It can be re-entered by the linear mapping miss handler. However, to - * avoid too much complication, it will save/restore things for us - */ -htw_tlb_miss: -#ifdef CONFIG_PPC_KUAP - mfspr r10,SPRN_MAS1 - rlwinm. r10,r10,0,0x3fff0000 - beq- htw_tlb_miss_fault /* KUAP fault */ -#endif - /* Search if we already have a TLB entry for that virtual address, and - * if we do, bail out. - * - * MAS1:IND should be already set based on MAS4 - */ - PPC_TLBSRX_DOT(0,R16) - beq htw_tlb_miss_done - - /* Now, we need to walk the page tables. First check if we are in - * range. - */ - rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 - bne- htw_tlb_miss_fault - - /* Get the PGD pointer */ - cmpldi cr0,r15,0 - beq- htw_tlb_miss_fault - - /* Get to PGD entry */ - rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3 - clrrdi r10,r11,3 - ldx r15,r10,r15 - cmpdi cr0,r15,0 - bge htw_tlb_miss_fault - - /* Get to PUD entry */ - rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3 - clrrdi r10,r11,3 - ldx r15,r10,r15 - cmpdi cr0,r15,0 - bge htw_tlb_miss_fault - - /* Get to PMD entry */ - rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3 - clrrdi r10,r11,3 - ldx r15,r10,r15 - cmpdi cr0,r15,0 - bge htw_tlb_miss_fault - - /* Ok, we're all right, we can now create an indirect entry for - * a 1M or 256M page. - * - * The last trick is now that because we use "half" pages for - * the HTW (1M IND is 2K and 256M IND is 32K) we need to account - * for an added LSB bit to the RPN. For 64K pages, there is no - * problem as we already use 32K arrays (half PTE pages), but for - * 4K page we need to extract a bit from the virtual address and - * insert it into the "PA52" bit of the RPN. - */ - rlwimi r15,r16,32-9,20,20 - /* Now we build the MAS: - * - * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG - * MAS 1 : Almost fully setup - * - PID already updated by caller if necessary - * - TSIZE for now is base ind page size always - * MAS 2 : Use defaults - * MAS 3+7 : Needs to be done - */ - ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) - - srdi r16,r10,32 - mtspr SPRN_MAS3,r10 - mtspr SPRN_MAS7,r16 - - tlbwe - -htw_tlb_miss_done: - /* We don't bother with restoring DEAR or ESR since we know we are - * level 0 and just going back to userland. They are only needed - * if you are going to take an access fault - */ - TLB_MISS_EPILOG_SUCCESS - rfi - -htw_tlb_miss_fault: - /* We need to check if it was an instruction miss. We know this - * though because r14 would contain -1 - */ - cmpdi cr0,r14,-1 - beq 1f - mtspr SPRN_DEAR,r16 - mtspr SPRN_ESR,r14 - TLB_MISS_EPILOG_ERROR - b exc_data_storage_book3e -1: TLB_MISS_EPILOG_ERROR - b exc_instruction_storage_book3e - /* * This is the guts of "any" level TLB miss handler for kernel linear * mapping misses. We are entered with: diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index a490724e84ad..603a0f652ba6 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -43,11 +43,9 @@ static char *cmdline __initdata; int numa_cpu_lookup_table[NR_CPUS]; cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; -struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(node_to_cpumask_map); -EXPORT_SYMBOL(node_data); static int primary_domain_index; static int n_mem_addr_cells, n_mem_size_cells; @@ -896,7 +894,7 @@ static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, static int __init parse_numa_properties(void) { - struct device_node *memory; + struct device_node *memory, *pci; int default_nid = 0; unsigned long i; const __be32 *associativity; @@ -1010,6 +1008,18 @@ new_range: goto new_range; } + for_each_node_by_name(pci, "pci") { + int nid = NUMA_NO_NODE; + + associativity = of_get_associativity(pci); + if (associativity) { + nid = associativity_to_nid(associativity); + initialize_form1_numa_distance(associativity); + } + if (likely(nid >= 0) && !node_online(nid)) + node_set_online(nid); + } + /* * Now do the same thing for each MEMBLOCK listed in the * ibm,dynamic-memory property in the @@ -1083,27 +1093,9 @@ void __init dump_numa_cpu_topology(void) static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) { u64 spanned_pages = end_pfn - start_pfn; - const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); - u64 nd_pa; - void *nd; - int tnid; - - nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); - if (!nd_pa) - panic("Cannot allocate %zu bytes for node %d data\n", - nd_size, nid); - - nd = __va(nd_pa); - - /* report and initialize */ - pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n", - nd_pa, nd_pa + nd_size - 1); - tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (tnid != nid) - pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); - - node_data[nid] = nd; - memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); + + alloc_node_data(nid); + NODE_DATA(nid)->node_id = nid; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = spanned_pages; @@ -1344,7 +1336,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) return nid; } -static u64 hot_add_drconf_memory_max(void) +u64 hot_add_drconf_memory_max(void) { struct device_node *memory = NULL; struct device_node *dn = NULL; diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c index 8c31802f97e8..77e55eac16e4 100644 --- a/arch/powerpc/mm/pgtable-frag.c +++ b/arch/powerpc/mm/pgtable-frag.c @@ -25,7 +25,7 @@ void pte_frag_destroy(void *pte_frag) count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ if (atomic_sub_and_test(PTE_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } } @@ -56,19 +56,17 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) { void *ret = NULL; struct ptdesc *ptdesc; + gfp_t gfp = PGALLOC_GFP; - if (!kernel) { - ptdesc = pagetable_alloc(PGALLOC_GFP | __GFP_ACCOUNT, 0); - if (!ptdesc) - return NULL; - if (!pagetable_pte_ctor(ptdesc)) { - pagetable_free(ptdesc); - return NULL; - } - } else { - ptdesc = pagetable_alloc(PGALLOC_GFP, 0); - if (!ptdesc) - return NULL; + if (!kernel) + gfp |= __GFP_ACCOUNT; + + ptdesc = pagetable_alloc(gfp, 0); + if (!ptdesc) + return NULL; + if (!pagetable_pte_ctor(mm, ptdesc)) { + pagetable_free(ptdesc); + return NULL; } atomic_set(&ptdesc->pt_frag_refcount, 1); @@ -111,7 +109,7 @@ static void pte_free_now(struct rcu_head *head) struct ptdesc *ptdesc; ptdesc = container_of(head, struct ptdesc, pt_rcu_head); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } @@ -124,22 +122,20 @@ void pte_fragment_free(unsigned long *table, int kernel) BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0); if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) { - if (kernel) - pagetable_free(ptdesc); - else if (folio_test_clear_active(ptdesc_folio(ptdesc))) - call_rcu(&ptdesc->pt_rcu_head, pte_free_now); - else + if (kernel || !folio_test_clear_active(ptdesc_folio(ptdesc))) pte_free_now(&ptdesc->pt_rcu_head); + else + call_rcu(&ptdesc->pt_rcu_head, pte_free_now); } } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) { - struct page *page; + struct folio *folio; - page = virt_to_page(pgtable); - SetPageActive(page); + folio = virt_to_folio(pgtable); + folio_set_active(folio); pte_fragment_free((unsigned long *)pgtable, 0); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 9e7ba9c3851f..61df5aed7989 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -297,11 +297,14 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, } #if defined(CONFIG_PPC_8xx) -void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - pte_t pte, unsigned long sz) + +#if defined(CONFIG_SPLIT_PTE_PTLOCKS) || defined(CONFIG_SPLIT_PMD_PTLOCKS) +/* We need the same lock to protect the PMD table and the two PTE tables. */ +#error "8M hugetlb folios are incompatible with split page table locks" +#endif + +static void __set_huge_pte_at(pmd_t *pmd, pte_t *ptep, pte_basic_t val) { - pmd_t *pmd = pmd_off(mm, addr); - pte_basic_t val; pte_basic_t *entry = (pte_basic_t *)ptep; int num, i; @@ -311,15 +314,60 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, */ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); - pte = set_pte_filter(pte, addr); - - val = pte_val(pte); - num = number_of_cells_per_pte(pmd, val, 1); for (i = 0; i < num; i++, entry++, val += SZ_4K) *entry = val; } + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte, unsigned long sz) +{ + pmd_t *pmdp = pmd_off(mm, addr); + + pte = set_pte_filter(pte, addr); + + if (sz == SZ_8M) { /* Flag both PMD entries as 8M and fill both page tables */ + *pmdp = __pmd(pmd_val(*pmdp) | _PMD_PAGE_8M); + *(pmdp + 1) = __pmd(pmd_val(*(pmdp + 1)) | _PMD_PAGE_8M); + + __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp, 0), pte_val(pte)); + __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp + 1, 0), pte_val(pte) + SZ_4M); + } else { + __set_huge_pte_at(pmdp, ptep, pte_val(pte)); + } +} +#else +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte, unsigned long sz) +{ + unsigned long pdsize; + int i; + + pte = set_pte_filter(pte, addr); + + /* + * Make sure hardware valid bit is not set. We don't do + * tlb flush for this update. + */ + VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); + + if (sz < PMD_SIZE) + pdsize = PAGE_SIZE; + else if (sz < PUD_SIZE) + pdsize = PMD_SIZE; + else if (sz < P4D_SIZE) + pdsize = PUD_SIZE; + else if (sz < PGDIR_SIZE) + pdsize = P4D_SIZE; + else + pdsize = PGDIR_SIZE; + + for (i = 0; i < sz / pdsize; i++, ptep++, addr += pdsize) { + __set_pte_at(mm, addr, ptep, pte, 0); + pte = __pte(pte_val(pte) + ((unsigned long long)pdsize / PAGE_SIZE << PFN_PTE_SHIFT)); + } +} #endif #endif /* CONFIG_HUGETLB_PAGE */ @@ -350,7 +398,7 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr) */ if (pmd_none(*pmd)) return; - pte = pte_offset_map_nolock(mm, pmd, addr, &ptl); + pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl); BUG_ON(!pte); assert_spin_locked(ptl); pte_unmap(pte); @@ -367,11 +415,10 @@ unsigned long vmalloc_to_phys(void *va) EXPORT_SYMBOL_GPL(vmalloc_to_phys); /* - * We have 4 cases for pgds and pmds: + * We have 3 cases for pgds and pmds: * (1) invalid (all zeroes) * (2) pointer to next table, as normal; bottom 6 bits == 0 * (3) leaf pte for huge page _PAGE_PTE set - * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table * * So long as we atomically load page table pointers we are safe against teardown, * we can follow the address down to the page and take a ref on it. @@ -382,11 +429,12 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, bool *is_thp, unsigned *hpage_shift) { pgd_t *pgdp; +#ifdef CONFIG_PPC64 p4d_t p4d, *p4dp; pud_t pud, *pudp; +#endif pmd_t pmd, *pmdp; pte_t *ret_pte; - hugepd_t *hpdp = NULL; unsigned pdshift; if (hpage_shift) @@ -401,8 +449,12 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, * page fault or a page unmap. The return pte_t * is still not * stable. So should be checked there for above conditions. * Top level is an exception because it is folded into p4d. + * + * On PPC32, P4D/PUD/PMD are folded into PGD so go straight to + * PMD level. */ pgdp = pgdir + pgd_index(ea); +#ifdef CONFIG_PPC64 p4dp = p4d_offset(pgdp, ea); p4d = READ_ONCE(*p4dp); pdshift = P4D_SHIFT; @@ -415,11 +467,6 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, goto out; } - if (is_hugepd(__hugepd(p4d_val(p4d)))) { - hpdp = (hugepd_t *)&p4d; - goto out_huge; - } - /* * Even if we end up with an unmap, the pgtable will not * be freed, because we do an rcu free and here we are @@ -437,13 +484,11 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, goto out; } - if (is_hugepd(__hugepd(pud_val(pud)))) { - hpdp = (hugepd_t *)&pud; - goto out_huge; - } - - pdshift = PMD_SHIFT; pmdp = pmd_offset(&pud, ea); +#else + pmdp = pmd_offset(pud_offset(p4d_offset(pgdp, ea), ea), ea); +#endif + pdshift = PMD_SHIFT; pmd = READ_ONCE(*pmdp); /* @@ -476,19 +521,8 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, goto out; } - if (is_hugepd(__hugepd(pmd_val(pmd)))) { - hpdp = (hugepd_t *)&pmd; - goto out_huge; - } - return pte_offset_kernel(&pmd, ea); -out_huge: - if (!hpdp) - return NULL; - - ret_pte = hugepte_offset(*hpdp, ea, pdshift); - pdshift = hugepd_shift(*hpdp); out: if (hpage_shift) *hpage_shift = pdshift; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index face94977cb2..15276068f657 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -48,15 +48,10 @@ notrace void __init early_ioremap_init(void) early_ioremap_setup(); } -static void __init *early_alloc_pgtable(unsigned long size) +void __init *early_alloc_pgtable(unsigned long size) { - void *ptr = memblock_alloc(size, size); + return memblock_alloc_or_panic(size, size); - if (!ptr) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, size, size); - - return ptr; } pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) @@ -130,31 +125,41 @@ void __init mapin_ram(void) } } -void mark_initmem_nx(void) +static int __mark_initmem_nx(void) { unsigned long numpages = PFN_UP((unsigned long)_einittext) - PFN_DOWN((unsigned long)_sinittext); + int err; - mmu_mark_initmem_nx(); + err = mmu_mark_initmem_nx(); if (!v_block_mapped((unsigned long)_sinittext)) { - set_memory_nx((unsigned long)_sinittext, numpages); - set_memory_rw((unsigned long)_sinittext, numpages); + err = set_memory_nx((unsigned long)_sinittext, numpages); + if (err) + return err; + err = set_memory_rw((unsigned long)_sinittext, numpages); } + return err; +} + +void mark_initmem_nx(void) +{ + int err = __mark_initmem_nx(); + + if (err) + panic("%s() failed, err = %d\n", __func__, err); } #ifdef CONFIG_STRICT_KERNEL_RWX -void mark_rodata_ro(void) +static int __mark_rodata_ro(void) { unsigned long numpages; if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE)) pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n"); - if (v_block_mapped((unsigned long)_stext + 1)) { - mmu_mark_rodata_ro(); - return; - } + if (v_block_mapped((unsigned long)_stext + 1)) + return mmu_mark_rodata_ro(); /* * mark text and rodata as read only. __end_rodata is set by @@ -164,6 +169,14 @@ void mark_rodata_ro(void) numpages = PFN_UP((unsigned long)__end_rodata) - PFN_DOWN((unsigned long)_stext); - set_memory_ro((unsigned long)_stext, numpages); + return set_memory_ro((unsigned long)_stext, numpages); +} + +void mark_rodata_ro(void) +{ + int err = __mark_rodata_ro(); + + if (err) + panic("%s() failed, err = %d\n", __func__, err); } #endif diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 9b99113cb51a..6621cfc3baf8 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -102,7 +102,7 @@ struct page *p4d_page(p4d_t p4d) { if (p4d_leaf(p4d)) { if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) - VM_WARN_ON(!p4d_huge(p4d)); + VM_WARN_ON(!p4d_leaf(p4d)); return pte_page(p4d_pte(p4d)); } return virt_to_page(p4d_pgtable(p4d)); @@ -113,7 +113,7 @@ struct page *pud_page(pud_t pud) { if (pud_leaf(pud)) { if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) - VM_WARN_ON(!pud_huge(pud)); + VM_WARN_ON(!pud_leaf(pud)); return pte_page(pud_pte(pud)); } return virt_to_page(pud_pgtable(pud)); @@ -132,7 +132,7 @@ struct page *pmd_page(pmd_t pmd) * enabled so these checks can't be used. */ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) - VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd))); + VM_WARN_ON(!pmd_leaf(pmd)); return pte_page(pmd_pte(pmd)); } return virt_to_page(pmd_page_vaddr(pmd)); diff --git a/arch/powerpc/mm/ptdump/Makefile b/arch/powerpc/mm/ptdump/Makefile index dc896d2874f3..0f7a050f327e 100644 --- a/arch/powerpc/mm/ptdump/Makefile +++ b/arch/powerpc/mm/ptdump/Makefile @@ -2,7 +2,7 @@ obj-y += ptdump.o -obj-$(CONFIG_4xx) += shared.o +obj-$(CONFIG_44x) += shared.o obj-$(CONFIG_PPC_8xx) += 8xx.o obj-$(CONFIG_PPC_E500) += shared.o obj-$(CONFIG_PPC_BOOK3S_32) += shared.o diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c index 9a601587836b..a6baa6166d94 100644 --- a/arch/powerpc/mm/ptdump/hashpagetable.c +++ b/arch/powerpc/mm/ptdump/hashpagetable.c @@ -491,7 +491,7 @@ static void walk_vmemmap(struct pg_state *st) * Traverse the vmemmaped memory and dump pages that are in the hash * pagetable. */ - while (ptr->list) { + while (ptr) { hpte_find(st, ptr->virt_addr, mmu_vmemmap_psize); ptr = ptr->list; } diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 9dc239967b77..b2358d794855 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -298,6 +298,38 @@ static void populate_markers(void) #endif } +static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte) +{ + note_page(pt_st, addr, 4, pte_val(pte)); +} + +static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd) +{ + note_page(pt_st, addr, 3, pmd_val(pmd)); +} + +static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud) +{ + note_page(pt_st, addr, 2, pud_val(pud)); +} + +static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d) +{ + note_page(pt_st, addr, 1, p4d_val(p4d)); +} + +static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd) +{ + note_page(pt_st, addr, 0, pgd_val(pgd)); +} + +static void note_page_flush(struct ptdump_state *pt_st) +{ + pte_t pte_zero = {0}; + + note_page(pt_st, 0, -1, pte_val(pte_zero)); +} + static int ptdump_show(struct seq_file *m, void *v) { struct pg_state st = { @@ -305,7 +337,12 @@ static int ptdump_show(struct seq_file *m, void *v) .marker = address_markers, .level = -1, .ptdump = { - .note_page = note_page, + .note_page_pte = note_page_pte, + .note_page_pmd = note_page_pmd, + .note_page_pud = note_page_pud, + .note_page_p4d = note_page_p4d, + .note_page_pgd = note_page_pgd, + .note_page_flush = note_page_flush, .range = ptdump_range, } }; @@ -338,7 +375,12 @@ bool ptdump_check_wx(void) .level = -1, .check_wx = true, .ptdump = { - .note_page = note_page, + .note_page_pte = note_page_pte, + .note_page_pmd = note_page_pmd, + .note_page_pud = note_page_pud, + .note_page_p4d = note_page_p4d, + .note_page_pgd = note_page_pgd, + .note_page_flush = note_page_flush, .range = ptdump_range, } }; diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index cdea5dccaefe..4c26912c2e3c 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -12,6 +12,7 @@ #include <asm/types.h> #include <asm/ppc-opcode.h> +#include <linux/build_bug.h> #ifdef CONFIG_PPC64_ELF_ABI_V1 #define FUNCTION_DESCR_SIZE 24 @@ -21,6 +22,9 @@ #define CTX_NIA(ctx) ((unsigned long)ctx->idx * 4) +#define SZL sizeof(unsigned long) +#define BPF_INSN_SAFETY 64 + #define PLANT_INSTR(d, idx, instr) \ do { if (d) { (d)[idx] = instr; } idx++; } while (0) #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) @@ -47,8 +51,16 @@ EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ } while (0) -/* Sign-extended 32-bit immediate load */ +/* + * Sign-extended 32-bit immediate load + * + * If this is a dummy pass (!image), account for + * maximum possible instructions. + */ #define PPC_LI32(d, i) do { \ + if (!image) \ + ctx->idx += 2; \ + else { \ if ((int)(uintptr_t)(i) >= -32768 && \ (int)(uintptr_t)(i) < 32768) \ EMIT(PPC_RAW_LI(d, i)); \ @@ -56,10 +68,15 @@ EMIT(PPC_RAW_LIS(d, IMM_H(i))); \ if (IMM_L(i)) \ EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \ - } } while(0) + } \ + } } while (0) #ifdef CONFIG_PPC64 +/* If dummy pass (!image), account for maximum possible instructions */ #define PPC_LI64(d, i) do { \ + if (!image) \ + ctx->idx += 5; \ + else { \ if ((long)(i) >= -2147483648 && \ (long)(i) < 2147483648) \ PPC_LI32(d, i); \ @@ -80,7 +97,20 @@ if ((uintptr_t)(i) & 0x000000000000ffffULL) \ EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) & \ 0xffff)); \ - } } while (0) + } \ + } } while (0) +#define PPC_LI_ADDR PPC_LI64 + +#ifndef CONFIG_PPC_KERNEL_PCREL +#define PPC64_LOAD_PACA() \ + EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))) +#else +#define PPC64_LOAD_PACA() do {} while (0) +#endif +#else +#define PPC_LI64(d, i) BUILD_BUG() +#define PPC_LI_ADDR PPC_LI32 +#define PPC64_LOAD_PACA() BUILD_BUG() #endif /* @@ -165,6 +195,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code u32 *addrs, int pass, bool extra_pass); void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx); void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx); +void bpf_jit_build_fentry_stubs(u32 *image, struct codegen_context *ctx); void bpf_jit_realloc_regs(struct codegen_context *ctx); int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr); diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 0f9a21783329..c0684733e9d6 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -18,15 +18,85 @@ #include <linux/bpf.h> #include <asm/kprobes.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include "bpf_jit.h" +/* These offsets are from bpf prog end and stay the same across progs */ +static int bpf_jit_ool_stub, bpf_jit_long_branch_stub; + static void bpf_jit_fill_ill_insns(void *area, unsigned int size) { memset32(area, BREAKPOINT_INSTRUCTION, size / 4); } +void dummy_tramp(void); + +asm ( +" .pushsection .text, \"ax\", @progbits ;" +" .global dummy_tramp ;" +" .type dummy_tramp, @function ;" +"dummy_tramp: ;" +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE +" blr ;" +#else +/* LR is always in r11, so we don't need a 'mflr r11' here */ +" mtctr 11 ;" +" mtlr 0 ;" +" bctr ;" +#endif +" .size dummy_tramp, .-dummy_tramp ;" +" .popsection ;" +); + +void bpf_jit_build_fentry_stubs(u32 *image, struct codegen_context *ctx) +{ + int ool_stub_idx, long_branch_stub_idx; + + /* + * Out-of-line stub: + * mflr r0 + * [b|bl] tramp + * mtlr r0 // only with CONFIG_PPC_FTRACE_OUT_OF_LINE + * b bpf_func + 4 + */ + ool_stub_idx = ctx->idx; + EMIT(PPC_RAW_MFLR(_R0)); + EMIT(PPC_RAW_NOP()); + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + EMIT(PPC_RAW_MTLR(_R0)); + WARN_ON_ONCE(!is_offset_in_branch_range(4 - (long)ctx->idx * 4)); + EMIT(PPC_RAW_BRANCH(4 - (long)ctx->idx * 4)); + + /* + * Long branch stub: + * .long <dummy_tramp_addr> + * mflr r11 + * bcl 20,31,$+4 + * mflr r12 + * ld r12, -8-SZL(r12) + * mtctr r12 + * mtlr r11 // needed to retain ftrace ABI + * bctr + */ + if (image) + *((unsigned long *)&image[ctx->idx]) = (unsigned long)dummy_tramp; + ctx->idx += SZL / 4; + long_branch_stub_idx = ctx->idx; + EMIT(PPC_RAW_MFLR(_R11)); + EMIT(PPC_RAW_BCL4()); + EMIT(PPC_RAW_MFLR(_R12)); + EMIT(PPC_RAW_LL(_R12, _R12, -8-SZL)); + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_MTLR(_R11)); + EMIT(PPC_RAW_BCTR()); + + if (!bpf_jit_ool_stub) { + bpf_jit_ool_stub = (ctx->idx - ool_stub_idx) * 4; + bpf_jit_long_branch_stub = (ctx->idx - long_branch_stub_idx) * 4; + } +} + int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr) { if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) { @@ -222,10 +292,10 @@ skip_init_ctx: fp->bpf_func = (void *)fimage; fp->jited = 1; - fp->jited_len = proglen + FUNCTION_DESCR_SIZE; + fp->jited_len = cgctx.idx * 4 + FUNCTION_DESCR_SIZE; if (!fp->is_func || extra_pass) { - if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) { + if (bpf_jit_binary_pack_finalize(fhdr, hdr)) { fp = org_fp; goto out_addrs; } @@ -348,7 +418,7 @@ void bpf_jit_free(struct bpf_prog *fp) * before freeing it. */ if (jit_data) { - bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr); + bpf_jit_binary_pack_finalize(jit_data->fhdr, jit_data->hdr); kvfree(jit_data->addrs); kfree(jit_data); } @@ -359,3 +429,775 @@ void bpf_jit_free(struct bpf_prog *fp) bpf_prog_unlock_free(fp); } + +bool bpf_jit_supports_kfunc_call(void) +{ + return true; +} + +bool bpf_jit_supports_far_kfunc_call(void) +{ + return IS_ENABLED(CONFIG_PPC64); +} + +void *arch_alloc_bpf_trampoline(unsigned int size) +{ + return bpf_prog_pack_alloc(size, bpf_jit_fill_ill_insns); +} + +void arch_free_bpf_trampoline(void *image, unsigned int size) +{ + bpf_prog_pack_free(image, size); +} + +int arch_protect_bpf_trampoline(void *image, unsigned int size) +{ + return 0; +} + +static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ctx, + struct bpf_tramp_link *l, int regs_off, int retval_off, + int run_ctx_off, bool save_ret) +{ + struct bpf_prog *p = l->link.prog; + ppc_inst_t branch_insn; + u32 jmp_idx; + int ret = 0; + + /* Save cookie */ + if (IS_ENABLED(CONFIG_PPC64)) { + PPC_LI64(_R3, l->cookie); + EMIT(PPC_RAW_STD(_R3, _R1, run_ctx_off + offsetof(struct bpf_tramp_run_ctx, + bpf_cookie))); + } else { + PPC_LI32(_R3, l->cookie >> 32); + PPC_LI32(_R4, l->cookie); + EMIT(PPC_RAW_STW(_R3, _R1, + run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie))); + EMIT(PPC_RAW_STW(_R4, _R1, + run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie) + 4)); + } + + /* __bpf_prog_enter(p, &bpf_tramp_run_ctx) */ + PPC_LI_ADDR(_R3, p); + EMIT(PPC_RAW_MR(_R25, _R3)); + EMIT(PPC_RAW_ADDI(_R4, _R1, run_ctx_off)); + ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, + (unsigned long)bpf_trampoline_enter(p)); + if (ret) + return ret; + + /* Remember prog start time returned by __bpf_prog_enter */ + EMIT(PPC_RAW_MR(_R26, _R3)); + + /* + * if (__bpf_prog_enter(p) == 0) + * goto skip_exec_of_prog; + * + * Emit a nop to be later patched with conditional branch, once offset is known + */ + EMIT(PPC_RAW_CMPLI(_R3, 0)); + jmp_idx = ctx->idx; + EMIT(PPC_RAW_NOP()); + + /* p->bpf_func(ctx) */ + EMIT(PPC_RAW_ADDI(_R3, _R1, regs_off)); + if (!p->jited) + PPC_LI_ADDR(_R4, (unsigned long)p->insnsi); + /* Account for max possible instructions during dummy pass for size calculation */ + if (image && !create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx], + (unsigned long)p->bpf_func, + BRANCH_SET_LINK)) { + image[ctx->idx] = ppc_inst_val(branch_insn); + ctx->idx++; + } else { + EMIT(PPC_RAW_LL(_R12, _R25, offsetof(struct bpf_prog, bpf_func))); + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_BCTRL()); + } + + if (save_ret) + EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); + + /* Fix up branch */ + if (image) { + if (create_cond_branch(&branch_insn, &image[jmp_idx], + (unsigned long)&image[ctx->idx], COND_EQ << 16)) + return -EINVAL; + image[jmp_idx] = ppc_inst_val(branch_insn); + } + + /* __bpf_prog_exit(p, start_time, &bpf_tramp_run_ctx) */ + EMIT(PPC_RAW_MR(_R3, _R25)); + EMIT(PPC_RAW_MR(_R4, _R26)); + EMIT(PPC_RAW_ADDI(_R5, _R1, run_ctx_off)); + ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, + (unsigned long)bpf_trampoline_exit(p)); + + return ret; +} + +static int invoke_bpf_mod_ret(u32 *image, u32 *ro_image, struct codegen_context *ctx, + struct bpf_tramp_links *tl, int regs_off, int retval_off, + int run_ctx_off, u32 *branches) +{ + int i; + + /* + * The first fmod_ret program will receive a garbage return value. + * Set this to 0 to avoid confusing the program. + */ + EMIT(PPC_RAW_LI(_R3, 0)); + EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); + for (i = 0; i < tl->nr_links; i++) { + if (invoke_bpf_prog(image, ro_image, ctx, tl->links[i], regs_off, retval_off, + run_ctx_off, true)) + return -EINVAL; + + /* + * mod_ret prog stored return value after prog ctx. Emit: + * if (*(u64 *)(ret_val) != 0) + * goto do_fexit; + */ + EMIT(PPC_RAW_LL(_R3, _R1, retval_off)); + EMIT(PPC_RAW_CMPLI(_R3, 0)); + + /* + * Save the location of the branch and generate a nop, which is + * replaced with a conditional jump once do_fexit (i.e. the + * start of the fexit invocation) is finalized. + */ + branches[i] = ctx->idx; + EMIT(PPC_RAW_NOP()); + } + + return 0; +} + +static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_context *ctx, + int func_frame_offset, int r4_off) +{ + if (IS_ENABLED(CONFIG_PPC64)) { + /* See bpf_jit_stack_tailcallcnt() */ + int tailcallcnt_offset = 6 * 8; + + EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); + EMIT(PPC_RAW_STL(_R3, _R1, -tailcallcnt_offset)); + } else { + /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ + EMIT(PPC_RAW_LL(_R4, _R1, r4_off)); + } +} + +static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_context *ctx, + int func_frame_offset, int r4_off) +{ + if (IS_ENABLED(CONFIG_PPC64)) { + /* See bpf_jit_stack_tailcallcnt() */ + int tailcallcnt_offset = 6 * 8; + + EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset)); + EMIT(PPC_RAW_STL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); + } else { + /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ + EMIT(PPC_RAW_STL(_R4, _R1, r4_off)); + } +} + +static void bpf_trampoline_save_args(u32 *image, struct codegen_context *ctx, int func_frame_offset, + int nr_regs, int regs_off) +{ + int param_save_area_offset; + + param_save_area_offset = func_frame_offset; /* the two frames we alloted */ + param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */ + + for (int i = 0; i < nr_regs; i++) { + if (i < 8) { + EMIT(PPC_RAW_STL(_R3 + i, _R1, regs_off + i * SZL)); + } else { + EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL)); + EMIT(PPC_RAW_STL(_R3, _R1, regs_off + i * SZL)); + } + } +} + +/* Used when restoring just the register parameters when returning back */ +static void bpf_trampoline_restore_args_regs(u32 *image, struct codegen_context *ctx, + int nr_regs, int regs_off) +{ + for (int i = 0; i < nr_regs && i < 8; i++) + EMIT(PPC_RAW_LL(_R3 + i, _R1, regs_off + i * SZL)); +} + +/* Used when we call into the traced function. Replicate parameter save area */ +static void bpf_trampoline_restore_args_stack(u32 *image, struct codegen_context *ctx, + int func_frame_offset, int nr_regs, int regs_off) +{ + int param_save_area_offset; + + param_save_area_offset = func_frame_offset; /* the two frames we alloted */ + param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */ + + for (int i = 8; i < nr_regs; i++) { + EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL)); + EMIT(PPC_RAW_STL(_R3, _R1, STACK_FRAME_MIN_SIZE + i * SZL)); + } + bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off); +} + +static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image, + void *rw_image_end, void *ro_image, + const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, + void *func_addr) +{ + int regs_off, nregs_off, ip_off, run_ctx_off, retval_off, nvr_off, alt_lr_off, r4_off = 0; + int i, ret, nr_regs, bpf_frame_size = 0, bpf_dummy_frame_size = 0, func_frame_offset; + struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; + struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; + struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; + struct codegen_context codegen_ctx, *ctx; + u32 *image = (u32 *)rw_image; + ppc_inst_t branch_insn; + u32 *branches = NULL; + bool save_ret; + + if (IS_ENABLED(CONFIG_PPC32)) + return -EOPNOTSUPP; + + nr_regs = m->nr_args; + /* Extra registers for struct arguments */ + for (i = 0; i < m->nr_args; i++) + if (m->arg_size[i] > SZL) + nr_regs += round_up(m->arg_size[i], SZL) / SZL - 1; + + if (nr_regs > MAX_BPF_FUNC_ARGS) + return -EOPNOTSUPP; + + ctx = &codegen_ctx; + memset(ctx, 0, sizeof(*ctx)); + + /* + * Generated stack layout: + * + * func prev back chain [ back chain ] + * [ ] + * bpf prog redzone/tailcallcnt [ ... ] 64 bytes (64-bit powerpc) + * [ ] -- + * LR save area [ r0 save (64-bit) ] | header + * [ r0 save (32-bit) ] | + * dummy frame for unwind [ back chain 1 ] -- + * [ padding ] align stack frame + * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc + * alt_lr_off [ real lr (ool stub)] optional - actual lr + * [ r26 ] + * nvr_off [ r25 ] nvr save area + * retval_off [ return value ] + * [ reg argN ] + * [ ... ] + * regs_off [ reg_arg1 ] prog ctx context + * nregs_off [ args count ] + * ip_off [ traced function ] + * [ ... ] + * run_ctx_off [ bpf_tramp_run_ctx ] + * [ reg argN ] + * [ ... ] + * param_save_area [ reg_arg1 ] min 8 doublewords, per ABI + * [ TOC save (64-bit) ] -- + * [ LR save (64-bit) ] | header + * [ LR save (32-bit) ] | + * bpf trampoline frame [ back chain 2 ] -- + * + */ + + /* Minimum stack frame header */ + bpf_frame_size = STACK_FRAME_MIN_SIZE; + + /* + * Room for parameter save area. + * + * As per the ABI, this is required if we call into the traced + * function (BPF_TRAMP_F_CALL_ORIG): + * - if the function takes more than 8 arguments for the rest to spill onto the stack + * - or, if the function has variadic arguments + * - or, if this functions's prototype was not available to the caller + * + * Reserve space for at least 8 registers for now. This can be optimized later. + */ + bpf_frame_size += (nr_regs > 8 ? nr_regs : 8) * SZL; + + /* Room for struct bpf_tramp_run_ctx */ + run_ctx_off = bpf_frame_size; + bpf_frame_size += round_up(sizeof(struct bpf_tramp_run_ctx), SZL); + + /* Room for IP address argument */ + ip_off = bpf_frame_size; + if (flags & BPF_TRAMP_F_IP_ARG) + bpf_frame_size += SZL; + + /* Room for args count */ + nregs_off = bpf_frame_size; + bpf_frame_size += SZL; + + /* Room for args */ + regs_off = bpf_frame_size; + bpf_frame_size += nr_regs * SZL; + + /* Room for return value of func_addr or fentry prog */ + retval_off = bpf_frame_size; + save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); + if (save_ret) + bpf_frame_size += SZL; + + /* Room for nvr save area */ + nvr_off = bpf_frame_size; + bpf_frame_size += 2 * SZL; + + /* Optional save area for actual LR in case of ool ftrace */ + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { + alt_lr_off = bpf_frame_size; + bpf_frame_size += SZL; + } + + if (IS_ENABLED(CONFIG_PPC32)) { + if (nr_regs < 2) { + r4_off = bpf_frame_size; + bpf_frame_size += SZL; + } else { + r4_off = regs_off + SZL; + } + } + + /* Padding to align stack frame, if any */ + bpf_frame_size = round_up(bpf_frame_size, SZL * 2); + + /* Dummy frame size for proper unwind - includes 64-bytes red zone for 64-bit powerpc */ + bpf_dummy_frame_size = STACK_FRAME_MIN_SIZE + 64; + + /* Offset to the traced function's stack frame */ + func_frame_offset = bpf_dummy_frame_size + bpf_frame_size; + + /* Create dummy frame for unwind, store original return value */ + EMIT(PPC_RAW_STL(_R0, _R1, PPC_LR_STKOFF)); + /* Protect red zone where tail call count goes */ + EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_dummy_frame_size)); + + /* Create our stack frame */ + EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_frame_size)); + + /* 64-bit: Save TOC and load kernel TOC */ + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) { + EMIT(PPC_RAW_STD(_R2, _R1, 24)); + PPC64_LOAD_PACA(); + } + + /* 32-bit: save tail call count in r4 */ + if (IS_ENABLED(CONFIG_PPC32) && nr_regs < 2) + EMIT(PPC_RAW_STL(_R4, _R1, r4_off)); + + bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, regs_off); + + /* Save our return address */ + EMIT(PPC_RAW_MFLR(_R3)); + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off)); + else + EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); + + /* + * Save ip address of the traced function. + * We could recover this from LR, but we will need to address for OOL trampoline, + * and optional GEP area. + */ + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & BPF_TRAMP_F_IP_ARG) { + EMIT(PPC_RAW_LWZ(_R4, _R3, 4)); + EMIT(PPC_RAW_SLWI(_R4, _R4, 6)); + EMIT(PPC_RAW_SRAWI(_R4, _R4, 6)); + EMIT(PPC_RAW_ADD(_R3, _R3, _R4)); + EMIT(PPC_RAW_ADDI(_R3, _R3, 4)); + } + + if (flags & BPF_TRAMP_F_IP_ARG) + EMIT(PPC_RAW_STL(_R3, _R1, ip_off)); + + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) + /* Fake our LR for unwind */ + EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); + + /* Save function arg count -- see bpf_get_func_arg_cnt() */ + EMIT(PPC_RAW_LI(_R3, nr_regs)); + EMIT(PPC_RAW_STL(_R3, _R1, nregs_off)); + + /* Save nv regs */ + EMIT(PPC_RAW_STL(_R25, _R1, nvr_off)); + EMIT(PPC_RAW_STL(_R26, _R1, nvr_off + SZL)); + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + PPC_LI_ADDR(_R3, (unsigned long)im); + ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, + (unsigned long)__bpf_tramp_enter); + if (ret) + return ret; + } + + for (i = 0; i < fentry->nr_links; i++) + if (invoke_bpf_prog(image, ro_image, ctx, fentry->links[i], regs_off, retval_off, + run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET)) + return -EINVAL; + + if (fmod_ret->nr_links) { + branches = kcalloc(fmod_ret->nr_links, sizeof(u32), GFP_KERNEL); + if (!branches) + return -ENOMEM; + + if (invoke_bpf_mod_ret(image, ro_image, ctx, fmod_ret, regs_off, retval_off, + run_ctx_off, branches)) { + ret = -EINVAL; + goto cleanup; + } + } + + /* Call the traced function */ + if (flags & BPF_TRAMP_F_CALL_ORIG) { + /* + * The address in LR save area points to the correct point in the original function + * with both PPC_FTRACE_OUT_OF_LINE as well as with traditional ftrace instruction + * sequence + */ + EMIT(PPC_RAW_LL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); + EMIT(PPC_RAW_MTCTR(_R3)); + + /* Replicate tail_call_cnt before calling the original BPF prog */ + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + bpf_trampoline_setup_tail_call_cnt(image, ctx, func_frame_offset, r4_off); + + /* Restore args */ + bpf_trampoline_restore_args_stack(image, ctx, func_frame_offset, nr_regs, regs_off); + + /* Restore TOC for 64-bit */ + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + EMIT(PPC_RAW_LD(_R2, _R1, 24)); + EMIT(PPC_RAW_BCTRL()); + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + PPC64_LOAD_PACA(); + + /* Store return value for bpf prog to access */ + EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); + + /* Restore updated tail_call_cnt */ + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + bpf_trampoline_restore_tail_call_cnt(image, ctx, func_frame_offset, r4_off); + + /* Reserve space to patch branch instruction to skip fexit progs */ + if (ro_image) /* image is NULL for dummy pass */ + im->ip_after_call = &((u32 *)ro_image)[ctx->idx]; + EMIT(PPC_RAW_NOP()); + } + + /* Update branches saved in invoke_bpf_mod_ret with address of do_fexit */ + for (i = 0; i < fmod_ret->nr_links && image; i++) { + if (create_cond_branch(&branch_insn, &image[branches[i]], + (unsigned long)&image[ctx->idx], COND_NE << 16)) { + ret = -EINVAL; + goto cleanup; + } + + image[branches[i]] = ppc_inst_val(branch_insn); + } + + for (i = 0; i < fexit->nr_links; i++) + if (invoke_bpf_prog(image, ro_image, ctx, fexit->links[i], regs_off, retval_off, + run_ctx_off, false)) { + ret = -EINVAL; + goto cleanup; + } + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + if (ro_image) /* image is NULL for dummy pass */ + im->ip_epilogue = &((u32 *)ro_image)[ctx->idx]; + PPC_LI_ADDR(_R3, im); + ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, + (unsigned long)__bpf_tramp_exit); + if (ret) + goto cleanup; + } + + if (flags & BPF_TRAMP_F_RESTORE_REGS) + bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off); + + /* Restore return value of func_addr or fentry prog */ + if (save_ret) + EMIT(PPC_RAW_LL(_R3, _R1, retval_off)); + + /* Restore nv regs */ + EMIT(PPC_RAW_LL(_R26, _R1, nvr_off + SZL)); + EMIT(PPC_RAW_LL(_R25, _R1, nvr_off)); + + /* Epilogue */ + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + EMIT(PPC_RAW_LD(_R2, _R1, 24)); + if (flags & BPF_TRAMP_F_SKIP_FRAME) { + /* Skip the traced function and return to parent */ + EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); + EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); + EMIT(PPC_RAW_MTLR(_R0)); + EMIT(PPC_RAW_BLR()); + } else { + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { + EMIT(PPC_RAW_LL(_R0, _R1, alt_lr_off)); + EMIT(PPC_RAW_MTLR(_R0)); + EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); + EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); + EMIT(PPC_RAW_BLR()); + } else { + EMIT(PPC_RAW_LL(_R0, _R1, bpf_frame_size + PPC_LR_STKOFF)); + EMIT(PPC_RAW_MTCTR(_R0)); + EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); + EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); + EMIT(PPC_RAW_MTLR(_R0)); + EMIT(PPC_RAW_BCTR()); + } + } + + /* Make sure the trampoline generation logic doesn't overflow */ + if (image && WARN_ON_ONCE(&image[ctx->idx] > (u32 *)rw_image_end - BPF_INSN_SAFETY)) { + ret = -EFAULT; + goto cleanup; + } + ret = ctx->idx * 4 + BPF_INSN_SAFETY * 4; + +cleanup: + kfree(branches); + return ret; +} + +int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, void *func_addr) +{ + struct bpf_tramp_image im; + int ret; + + ret = __arch_prepare_bpf_trampoline(&im, NULL, NULL, NULL, m, flags, tlinks, func_addr); + return ret; +} + +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, + const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, + void *func_addr) +{ + u32 size = image_end - image; + void *rw_image, *tmp; + int ret; + + /* + * rw_image doesn't need to be in module memory range, so we can + * use kvmalloc. + */ + rw_image = kvmalloc(size, GFP_KERNEL); + if (!rw_image) + return -ENOMEM; + + ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m, + flags, tlinks, func_addr); + if (ret < 0) + goto out; + + if (bpf_jit_enable > 1) + bpf_jit_dump(1, ret - BPF_INSN_SAFETY * 4, 1, rw_image); + + tmp = bpf_arch_text_copy(image, rw_image, size); + if (IS_ERR(tmp)) + ret = PTR_ERR(tmp); + +out: + kvfree(rw_image); + return ret; +} + +static int bpf_modify_inst(void *ip, ppc_inst_t old_inst, ppc_inst_t new_inst) +{ + ppc_inst_t org_inst; + + if (copy_inst_from_kernel_nofault(&org_inst, ip)) { + pr_err("0x%lx: fetching instruction failed\n", (unsigned long)ip); + return -EFAULT; + } + + if (!ppc_inst_equal(org_inst, old_inst)) { + pr_err("0x%lx: expected (%08lx) != found (%08lx)\n", + (unsigned long)ip, ppc_inst_as_ulong(old_inst), ppc_inst_as_ulong(org_inst)); + return -EINVAL; + } + + if (ppc_inst_equal(old_inst, new_inst)) + return 0; + + return patch_instruction(ip, new_inst); +} + +static void do_isync(void *info __maybe_unused) +{ + isync(); +} + +/* + * A 3-step process for bpf prog entry: + * 1. At bpf prog entry, a single nop/b: + * bpf_func: + * [nop|b] ool_stub + * 2. Out-of-line stub: + * ool_stub: + * mflr r0 + * [b|bl] <bpf_prog>/<long_branch_stub> + * mtlr r0 // CONFIG_PPC_FTRACE_OUT_OF_LINE only + * b bpf_func + 4 + * 3. Long branch stub: + * long_branch_stub: + * .long <branch_addr>/<dummy_tramp> + * mflr r11 + * bcl 20,31,$+4 + * mflr r12 + * ld r12, -16(r12) + * mtctr r12 + * mtlr r11 // needed to retain ftrace ABI + * bctr + * + * dummy_tramp is used to reduce synchronization requirements. + * + * When attaching a bpf trampoline to a bpf prog, we do not need any + * synchronization here since we always have a valid branch target regardless + * of the order in which the above stores are seen. dummy_tramp ensures that + * the long_branch stub goes to a valid destination on other cpus, even when + * the branch to the long_branch stub is seen before the updated trampoline + * address. + * + * However, when detaching a bpf trampoline from a bpf prog, or if changing + * the bpf trampoline address, we need synchronization to ensure that other + * cpus can no longer branch into the older trampoline so that it can be + * safely freed. bpf_tramp_image_put() uses rcu_tasks to ensure all cpus + * make forward progress, but we still need to ensure that other cpus + * execute isync (or some CSI) so that they don't go back into the + * trampoline again. + */ +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, + void *old_addr, void *new_addr) +{ + unsigned long bpf_func, bpf_func_end, size, offset; + ppc_inst_t old_inst, new_inst; + int ret = 0, branch_flags; + char name[KSYM_NAME_LEN]; + + if (IS_ENABLED(CONFIG_PPC32)) + return -EOPNOTSUPP; + + bpf_func = (unsigned long)ip; + branch_flags = poke_type == BPF_MOD_CALL ? BRANCH_SET_LINK : 0; + + /* We currently only support poking bpf programs */ + if (!__bpf_address_lookup(bpf_func, &size, &offset, name)) { + pr_err("%s (0x%lx): kernel/modules are not supported\n", __func__, bpf_func); + return -EOPNOTSUPP; + } + + /* + * If we are not poking at bpf prog entry, then we are simply patching in/out + * an unconditional branch instruction at im->ip_after_call + */ + if (offset) { + if (poke_type != BPF_MOD_JUMP) { + pr_err("%s (0x%lx): calls are not supported in bpf prog body\n", __func__, + bpf_func); + return -EOPNOTSUPP; + } + old_inst = ppc_inst(PPC_RAW_NOP()); + if (old_addr) + if (create_branch(&old_inst, ip, (unsigned long)old_addr, 0)) + return -ERANGE; + new_inst = ppc_inst(PPC_RAW_NOP()); + if (new_addr) + if (create_branch(&new_inst, ip, (unsigned long)new_addr, 0)) + return -ERANGE; + mutex_lock(&text_mutex); + ret = bpf_modify_inst(ip, old_inst, new_inst); + mutex_unlock(&text_mutex); + + /* Make sure all cpus see the new instruction */ + smp_call_function(do_isync, NULL, 1); + return ret; + } + + bpf_func_end = bpf_func + size; + + /* Address of the jmp/call instruction in the out-of-line stub */ + ip = (void *)(bpf_func_end - bpf_jit_ool_stub + 4); + + if (!is_offset_in_branch_range((long)ip - 4 - bpf_func)) { + pr_err("%s (0x%lx): bpf prog too large, ool stub out of branch range\n", __func__, + bpf_func); + return -ERANGE; + } + + old_inst = ppc_inst(PPC_RAW_NOP()); + if (old_addr) { + if (is_offset_in_branch_range(ip - old_addr)) + create_branch(&old_inst, ip, (unsigned long)old_addr, branch_flags); + else + create_branch(&old_inst, ip, bpf_func_end - bpf_jit_long_branch_stub, + branch_flags); + } + new_inst = ppc_inst(PPC_RAW_NOP()); + if (new_addr) { + if (is_offset_in_branch_range(ip - new_addr)) + create_branch(&new_inst, ip, (unsigned long)new_addr, branch_flags); + else + create_branch(&new_inst, ip, bpf_func_end - bpf_jit_long_branch_stub, + branch_flags); + } + + mutex_lock(&text_mutex); + + /* + * 1. Update the address in the long branch stub: + * If new_addr is out of range, we will have to use the long branch stub, so patch new_addr + * here. Otherwise, revert to dummy_tramp, but only if we had patched old_addr here. + */ + if ((new_addr && !is_offset_in_branch_range(new_addr - ip)) || + (old_addr && !is_offset_in_branch_range(old_addr - ip))) + ret = patch_ulong((void *)(bpf_func_end - bpf_jit_long_branch_stub - SZL), + (new_addr && !is_offset_in_branch_range(new_addr - ip)) ? + (unsigned long)new_addr : (unsigned long)dummy_tramp); + if (ret) + goto out; + + /* 2. Update the branch/call in the out-of-line stub */ + ret = bpf_modify_inst(ip, old_inst, new_inst); + if (ret) + goto out; + + /* 3. Update instruction at bpf prog entry */ + ip = (void *)bpf_func; + if (!old_addr || !new_addr) { + if (!old_addr) { + old_inst = ppc_inst(PPC_RAW_NOP()); + create_branch(&new_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0); + } else { + new_inst = ppc_inst(PPC_RAW_NOP()); + create_branch(&old_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0); + } + ret = bpf_modify_inst(ip, old_inst, new_inst); + } + +out: + mutex_unlock(&text_mutex); + + /* + * Sync only if we are not attaching a trampoline to a bpf prog so the older + * trampoline can be freed safely. + */ + if (old_addr) + smp_call_function(do_isync, NULL, 1); + + return ret; +} diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index 2f39c50ca729..0aace304dfe1 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -127,13 +127,16 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) { int i; + /* Instruction for trampoline attach */ + EMIT(PPC_RAW_NOP()); + /* Initialize tail_call_cnt, to be skipped if we do tail calls. */ if (ctx->seen & SEEN_TAILCALL) EMIT(PPC_RAW_LI(_R4, 0)); else EMIT(PPC_RAW_NOP()); -#define BPF_TAILCALL_PROLOGUE_SIZE 4 +#define BPF_TAILCALL_PROLOGUE_SIZE 8 if (bpf_has_stack_frame(ctx)) EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx))); @@ -198,6 +201,8 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) bpf_jit_emit_common_epilogue(image, ctx); EMIT(PPC_RAW_BLR()); + + bpf_jit_build_fentry_stubs(image, ctx); } /* Relative offset needs to be calculated based on final image location */ @@ -308,7 +313,6 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code u64 func_addr; u32 true_cond; u32 tmp_idx; - int j; if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) && (BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) && @@ -450,10 +454,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code } break; case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ - EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg)); + if (off) + EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, src_reg)); + else + EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg)); break; case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ - EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg)); + if (off) + EMIT(PPC_RAW_DIVW(_R0, src2_reg, src_reg)); + else + EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg)); EMIT(PPC_RAW_MULW(_R0, src_reg, _R0)); EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0)); break; @@ -467,10 +477,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code if (imm == 1) { EMIT(PPC_RAW_MR(dst_reg, src2_reg)); } else if (is_power_of_2((u32)imm)) { - EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm))); + if (off) + EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, ilog2(imm))); + else + EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm))); } else { PPC_LI32(_R0, imm); - EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0)); + if (off) + EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, _R0)); + else + EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0)); } break; case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ @@ -480,11 +496,19 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code if (!is_power_of_2((u32)imm)) { bpf_set_seen_register(ctx, tmp_reg); PPC_LI32(tmp_reg, imm); - EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg)); + if (off) + EMIT(PPC_RAW_DIVW(_R0, src2_reg, tmp_reg)); + else + EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg)); EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0)); EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0)); } else if (imm == 1) { EMIT(PPC_RAW_LI(dst_reg, 0)); + } else if (off) { + EMIT(PPC_RAW_SRAWI(_R0, src2_reg, ilog2(imm))); + EMIT(PPC_RAW_ADDZE(_R0, _R0)); + EMIT(PPC_RAW_SLWI(_R0, _R0, ilog2(imm))); + EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0)); } else { imm = ilog2((u32)imm); EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31)); @@ -497,11 +521,21 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code imm = -imm; if (!is_power_of_2(imm)) return -EOPNOTSUPP; - if (imm == 1) + if (imm == 1) { EMIT(PPC_RAW_LI(dst_reg, 0)); - else + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } else if (off) { + EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31)); + EMIT(PPC_RAW_XOR(dst_reg, src2_reg, dst_reg_h)); + EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg)); + EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31)); + EMIT(PPC_RAW_XOR(dst_reg, dst_reg, dst_reg_h)); + EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg)); + EMIT(PPC_RAW_SUBFE(dst_reg_h, dst_reg_h, dst_reg_h)); + } else { EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31)); - EMIT(PPC_RAW_LI(dst_reg_h, 0)); + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } break; case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ if (!imm) @@ -727,15 +761,30 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code * MOV */ case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */ - if (dst_reg == src_reg) - break; - EMIT(PPC_RAW_MR(dst_reg, src_reg)); - EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h)); + if (off == 8) { + EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31)); + } else if (off == 16) { + EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31)); + } else if (off == 32 && dst_reg == src_reg) { + EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31)); + } else if (off == 32) { + EMIT(PPC_RAW_MR(dst_reg, src_reg)); + EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31)); + } else if (dst_reg != src_reg) { + EMIT(PPC_RAW_MR(dst_reg, src_reg)); + EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h)); + } break; case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */ /* special mov32 for zext */ if (imm == 1) EMIT(PPC_RAW_LI(dst_reg_h, 0)); + else if (off == 8) + EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); + else if (off == 16) + EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); else if (dst_reg != src_reg) EMIT(PPC_RAW_MR(dst_reg, src_reg)); break; @@ -751,6 +800,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code * BPF_FROM_BE/LE */ case BPF_ALU | BPF_END | BPF_FROM_LE: + case BPF_ALU64 | BPF_END | BPF_FROM_LE: switch (imm) { case 16: /* Copy 16 bits to upper part */ @@ -785,6 +835,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg)); break; } + if (BPF_CLASS(code) == BPF_ALU64 && imm != 64) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); break; case BPF_ALU | BPF_END | BPF_FROM_BE: switch (imm) { @@ -852,6 +904,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code /* Get offset into TMP_REG */ EMIT(PPC_RAW_LI(tmp_reg, off)); + /* + * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync' + * before and after the operation. + * + * This is a requirement in the Linux Kernel Memory Model. + * See __cmpxchg_u32() in asm/cmpxchg.h as an example. + */ + if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); tmp_idx = ctx->idx * 4; /* load value from memory into r0 */ EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0)); @@ -905,6 +966,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code /* For the BPF_FETCH variant, get old data into src_reg */ if (imm & BPF_FETCH) { + /* Emit 'sync' to enforce full ordering */ + if (IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); EMIT(PPC_RAW_MR(ret_reg, ax_reg)); if (!fp->aux->verifier_zext) EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */ @@ -918,11 +982,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code * BPF_LDX */ case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEMSX | BPF_B: case BPF_LDX | BPF_PROBE_MEM | BPF_B: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEMSX | BPF_H: case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ + case BPF_LDX | BPF_MEMSX | BPF_W: case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_DW: /* @@ -931,7 +1001,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code * load only if addr is kernel address (see is_kernel_addr()), otherwise * set dst_reg=0 and move on. */ - if (BPF_MODE(code) == BPF_PROBE_MEM) { + if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) { PPC_LI32(_R0, TASK_SIZE - off); EMIT(PPC_RAW_CMPLW(src_reg, _R0)); PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4); @@ -953,30 +1023,48 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code * as there are two load instructions for dst_reg_h & dst_reg * respectively. */ - if (size == BPF_DW) + if (size == BPF_DW || + (size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) PPC_JMP((ctx->idx + 3) * 4); else PPC_JMP((ctx->idx + 2) * 4); } - switch (size) { - case BPF_B: - EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); - break; - case BPF_H: - EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); - break; - case BPF_W: - EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); - break; - case BPF_DW: - EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off)); - EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4)); - break; - } + if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) { + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg)); + break; + case BPF_H: + EMIT(PPC_RAW_LHA(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); + break; + } + if (!fp->aux->verifier_zext) + EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31)); - if (size != BPF_DW && !fp->aux->verifier_zext) - EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } else { + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); + break; + case BPF_DW: + EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off)); + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4)); + break; + } + if (size != BPF_DW && !fp->aux->verifier_zext) + EMIT(PPC_RAW_LI(dst_reg_h, 0)); + } if (BPF_MODE(code) == BPF_PROBE_MEM) { int insn_idx = ctx->idx - 1; @@ -1010,13 +1098,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code * 16 byte instruction that uses two 'struct bpf_insn' */ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ - tmp_idx = ctx->idx; PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm); PPC_LI32(dst_reg, (u32)insn[i].imm); - /* padding to allow full 4 instructions for later patching */ - if (!image) - for (j = ctx->idx - tmp_idx; j < 4; j++) - EMIT(PPC_RAW_NOP()); /* Adjust for two bpf instructions */ addrs[++i] = ctx->idx * 4; break; @@ -1068,6 +1151,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code case BPF_JMP | BPF_JA: PPC_JMP(addrs[i + 1 + off]); break; + case BPF_JMP32 | BPF_JA: + PPC_JMP(addrs[i + 1 + imm]); + break; case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 79f23974a320..5daa77aee7f7 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -84,7 +84,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) } /* - * When not setting up our own stackframe, the redzone usage is: + * When not setting up our own stackframe, the redzone (288 bytes) usage is: * * [ prev sp ] <------------- * [ ... ] | @@ -92,7 +92,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * [ nv gpr save area ] 5*8 * [ tail_call_cnt ] 8 * [ local_tmp_var ] 16 - * [ unused red zone ] 208 bytes protected + * [ unused red zone ] 224 */ static int bpf_jit_stack_local(struct codegen_context *ctx) { @@ -126,6 +126,9 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) { int i; + /* Instruction for trampoline attach */ + EMIT(PPC_RAW_NOP()); + #ifndef CONFIG_PPC_KERNEL_PCREL if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))); @@ -200,31 +203,66 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0))); EMIT(PPC_RAW_BLR()); + + bpf_jit_build_fentry_stubs(image, ctx); } -static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func) +int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) { unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0; long reladdr; - if (WARN_ON_ONCE(!core_kernel_text(func_addr))) - return -EINVAL; - - if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) { - reladdr = func_addr - CTX_NIA(ctx); - - if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) { - pr_err("eBPF: address of %ps out of range of pcrel address.\n", - (void *)func); - return -ERANGE; - } - /* pla r12,addr */ - EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr)); - EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr)); + /* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */ + if (!func) { + for (int i = 0; i < 5; i++) + EMIT(PPC_RAW_NOP()); + /* elfv1 needs an additional instruction to load addr from descriptor */ + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) + EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_MTCTR(_R12)); - EMIT(PPC_RAW_BCTR()); + EMIT(PPC_RAW_BCTRL()); + return 0; + } + +#ifdef CONFIG_PPC_KERNEL_PCREL + reladdr = func_addr - local_paca->kernelbase; + /* + * If fimage is NULL (the initial pass to find image size), + * account for the maximum no. of instructions possible. + */ + if (!fimage) { + ctx->idx += 7; + return 0; + } else if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) { + EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase))); + /* Align for subsequent prefix instruction */ + if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8)) + EMIT(PPC_RAW_NOP()); + /* paddi r12,r12,addr */ + EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr)); + EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr)); } else { + unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx); + bool alignment_needed = !IS_ALIGNED(pc, 8); + + reladdr = func_addr - (alignment_needed ? pc + 4 : pc); + + if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) { + if (alignment_needed) + EMIT(PPC_RAW_NOP()); + /* pla r12,addr */ + EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr)); + EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr)); + } else { + /* We can clobber r12 */ + PPC_LI64(_R12, func); + } + } + EMIT(PPC_RAW_MTCTR(_R12)); + EMIT(PPC_RAW_BCTRL()); +#else + if (core_kernel_text(func_addr)) { reladdr = func_addr - kernel_toc_addr(); if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func); @@ -235,40 +273,34 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr))); EMIT(PPC_RAW_MTCTR(_R12)); EMIT(PPC_RAW_BCTRL()); + } else { + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) { + /* func points to the function descriptor */ + PPC_LI64(bpf_to_ppc(TMP_REG_2), func); + /* Load actual entry point from function descriptor */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0)); + /* ... and move it to CTR */ + EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1))); + /* + * Load TOC from function descriptor at offset 8. + * We can clobber r2 since we get called through a + * function pointer (so caller will save/restore r2). + */ + if (is_module_text_address(func_addr)) + EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8)); + } else { + PPC_LI64(_R12, func); + EMIT(PPC_RAW_MTCTR(_R12)); + } + EMIT(PPC_RAW_BCTRL()); + /* + * Load r2 with kernel TOC as kernel TOC is used if function address falls + * within core kernel text. + */ + if (is_module_text_address(func_addr)) + EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc))); } - - return 0; -} - -int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) -{ - unsigned int i, ctx_idx = ctx->idx; - - if (WARN_ON_ONCE(func && is_module_text_address(func))) - return -EINVAL; - - /* skip past descriptor if elf v1 */ - func += FUNCTION_DESCR_SIZE; - - /* Load function address into r12 */ - PPC_LI64(_R12, func); - - /* For bpf-to-bpf function calls, the callee's address is unknown - * until the last extra pass. As seen above, we use PPC_LI64() to - * load the callee's address, but this may optimize the number of - * instructions required based on the nature of the address. - * - * Since we don't want the number of instructions emitted to increase, - * we pad the optimized PPC_LI64() call with NOPs to guarantee that - * we always have a five-instruction sequence, which is the maximum - * that PPC_LI64() can emit. - */ - if (!image) - for (i = ctx->idx - ctx_idx; i < 5; i++) - EMIT(PPC_RAW_NOP()); - - EMIT(PPC_RAW_MTCTR(_R12)); - EMIT(PPC_RAW_BCTRL()); +#endif return 0; } @@ -283,9 +315,9 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o */ int b2p_bpf_array = bpf_to_ppc(BPF_REG_2); int b2p_index = bpf_to_ppc(BPF_REG_3); - int bpf_tailcall_prologue_size = 8; + int bpf_tailcall_prologue_size = 12; - if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) + if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) bpf_tailcall_prologue_size += 4; /* skip past the toc load */ /* @@ -387,7 +419,6 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code u64 imm64; u32 true_cond; u32 tmp_idx; - int j; /* * addrs[] maps a BPF bytecode address into a real offset from @@ -467,20 +498,33 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */ case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */ if (BPF_OP(code) == BPF_MOD) { - EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg)); + if (off) + EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg)); + else + EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg)); + EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg)); EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); } else - EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg)); + if (off) + EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg)); + else + EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */ if (BPF_OP(code) == BPF_MOD) { - EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg)); + if (off) + EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg)); + else + EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg)); EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg)); EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); } else - EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg)); + if (off) + EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg)); + else + EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg)); break; case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */ @@ -501,19 +545,31 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code switch (BPF_CLASS(code)) { case BPF_ALU: if (BPF_OP(code) == BPF_MOD) { - EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg)); + if (off) + EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg)); EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg)); EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); } else - EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg)); + if (off) + EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg)); break; case BPF_ALU64: if (BPF_OP(code) == BPF_MOD) { - EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg)); + if (off) + EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg)); EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg)); EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg)); } else - EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg)); + if (off) + EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg)); + else + EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg)); break; } goto bpf_alu32_trunc; @@ -633,8 +689,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code /* special mov32 for zext */ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31)); break; - } - EMIT(PPC_RAW_MR(dst_reg, src_reg)); + } else if (off == 8) { + EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); + } else if (off == 16) { + EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); + } else if (off == 32) { + EMIT(PPC_RAW_EXTSW(dst_reg, src_reg)); + } else if (dst_reg != src_reg) + EMIT(PPC_RAW_MR(dst_reg, src_reg)); goto bpf_alu32_trunc; case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ @@ -656,11 +718,12 @@ bpf_alu32_trunc: */ case BPF_ALU | BPF_END | BPF_FROM_LE: case BPF_ALU | BPF_END | BPF_FROM_BE: + case BPF_ALU64 | BPF_END | BPF_FROM_LE: #ifdef __BIG_ENDIAN__ if (BPF_SRC(code) == BPF_FROM_BE) goto emit_clear; #else /* !__BIG_ENDIAN__ */ - if (BPF_SRC(code) == BPF_FROM_LE) + if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE) goto emit_clear; #endif switch (imm) { @@ -803,6 +866,15 @@ emit_clear: /* Get offset into TMP_REG_1 */ EMIT(PPC_RAW_LI(tmp1_reg, off)); + /* + * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync' + * before and after the operation. + * + * This is a requirement in the Linux Kernel Memory Model. + * See __cmpxchg_u64() in asm/cmpxchg.h as an example. + */ + if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); tmp_idx = ctx->idx * 4; /* load value from memory into TMP_REG_2 */ if (size == BPF_DW) @@ -865,6 +937,9 @@ emit_clear: PPC_BCC_SHORT(COND_NE, tmp_idx); if (imm & BPF_FETCH) { + /* Emit 'sync' to enforce full ordering */ + if (IS_ENABLED(CONFIG_SMP)) + EMIT(PPC_RAW_SYNC()); EMIT(PPC_RAW_MR(ret_reg, _R0)); /* * Skip unnecessary zero-extension for 32-bit cmpxchg. @@ -881,13 +956,19 @@ emit_clear: */ /* dst = *(u8 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEMSX | BPF_B: case BPF_LDX | BPF_PROBE_MEM | BPF_B: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: /* dst = *(u16 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEMSX | BPF_H: case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: /* dst = *(u32 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEMSX | BPF_W: case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_MEM | BPF_DW: case BPF_LDX | BPF_PROBE_MEM | BPF_DW: @@ -897,7 +978,7 @@ emit_clear: * load only if addr is kernel address (see is_kernel_addr()), otherwise * set dst_reg=0 and move on. */ - if (BPF_MODE(code) == BPF_PROBE_MEM) { + if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) { EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off)); if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) PPC_LI64(tmp2_reg, 0x8000000000000000ul); @@ -910,30 +991,47 @@ emit_clear: * Check if 'off' is word aligned for BPF_DW, because * we might generate two instructions. */ - if (BPF_SIZE(code) == BPF_DW && (off & 3)) + if ((BPF_SIZE(code) == BPF_DW || + (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) && + (off & 3)) PPC_JMP((ctx->idx + 3) * 4); else PPC_JMP((ctx->idx + 2) * 4); } - switch (size) { - case BPF_B: - EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); - break; - case BPF_H: - EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); - break; - case BPF_W: - EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); - break; - case BPF_DW: - if (off % 4) { - EMIT(PPC_RAW_LI(tmp1_reg, off)); - EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg)); - } else { - EMIT(PPC_RAW_LD(dst_reg, src_reg, off)); + if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) { + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg)); + break; + case BPF_H: + EMIT(PPC_RAW_LHA(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWA(dst_reg, src_reg, off)); + break; + } + } else { + switch (size) { + case BPF_B: + EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); + break; + case BPF_H: + EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); + break; + case BPF_W: + EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); + break; + case BPF_DW: + if (off % 4) { + EMIT(PPC_RAW_LI(tmp1_reg, off)); + EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg)); + } else { + EMIT(PPC_RAW_LD(dst_reg, src_reg, off)); + } + break; } - break; } if (size != BPF_DW && insn_is_zext(&insn[i + 1])) @@ -954,12 +1052,7 @@ emit_clear: case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ imm64 = ((u64)(u32) insn[i].imm) | (((u64)(u32) insn[i+1].imm) << 32); - tmp_idx = ctx->idx; PPC_LI64(dst_reg, imm64); - /* padding to allow full 5 instructions for later patching */ - if (!image) - for (j = ctx->idx - tmp_idx; j < 5; j++) - EMIT(PPC_RAW_NOP()); /* Adjust for two bpf instructions */ addrs[++i] = ctx->idx * 4; break; @@ -992,11 +1085,7 @@ emit_clear: if (ret < 0) return ret; - if (func_addr_fixed) - ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr); - else - ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); - + ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); if (ret) return ret; @@ -1010,6 +1099,9 @@ emit_clear: case BPF_JMP | BPF_JA: PPC_JMP(addrs[i + 1 + off]); break; + case BPF_JMP32 | BPF_JA: + PPC_JMP(addrs[i + 1 + imm]); + break; case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c index 308a2e40d7be..1d2972229e3a 100644 --- a/arch/powerpc/perf/8xx-pmu.c +++ b/arch/powerpc/perf/8xx-pmu.c @@ -14,7 +14,7 @@ #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/ptrace.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/inst.h> #define PERF_8xx_ID_CPU_CYCLES 1 diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile index 4f53d0b97539..7f53fcb7495a 100644 --- a/arch/powerpc/perf/Makefile +++ b/arch/powerpc/perf/Makefile @@ -16,6 +16,10 @@ obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o obj-$(CONFIG_HV_PERF_CTRS) += hv-24x7.o hv-gpci.o hv-common.o +obj-$(CONFIG_VPA_PMU) += vpa-pmu.o + +obj-$(CONFIG_KVM_BOOK3S_HV_PMU) += kvm-hv-pmu.o + obj-$(CONFIG_PPC_8xx) += 8xx-pmu.o obj-$(CONFIG_PPC64) += $(obj64-y) diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 6b4434dd0ff3..26aa26482c9a 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -51,7 +51,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re lr = regs->link; sp = regs->gpr[1]; - perf_callchain_store(entry, perf_instruction_pointer(regs)); + perf_callchain_store(entry, perf_arch_instruction_pointer(regs)); if (!validate_sp(sp, current)) return; diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c index ea8cfe3806dc..ddcc2d8aa64a 100644 --- a/arch/powerpc/perf/callchain_32.c +++ b/arch/powerpc/perf/callchain_32.c @@ -139,7 +139,7 @@ void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, long level = 0; unsigned int __user *fp, *uregs; - next_ip = perf_instruction_pointer(regs); + next_ip = perf_arch_instruction_pointer(regs); lr = regs->link; sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c index 488e8a21a11e..115d1c105e8a 100644 --- a/arch/powerpc/perf/callchain_64.c +++ b/arch/powerpc/perf/callchain_64.c @@ -74,7 +74,7 @@ void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, struct signal_frame_64 __user *sigframe; unsigned long __user *fp, *uregs; - next_ip = perf_instruction_pointer(regs); + next_ip = perf_arch_instruction_pointer(regs); lr = regs->link; sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 6b5f8a94e7d8..8b0081441f85 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -16,7 +16,7 @@ #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/ptrace.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/hw_irq.h> #include <asm/interrupt.h> @@ -132,7 +132,10 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) static inline void power_pmu_bhrb_enable(struct perf_event *event) {} static inline void power_pmu_bhrb_disable(struct perf_event *event) {} -static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {} +static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) +{ +} static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {} static void pmao_restore_workaround(bool ebb) { } #endif /* CONFIG_PPC32 */ @@ -266,51 +269,44 @@ static inline u32 perf_flags_from_msr(struct pt_regs *regs) static inline u32 perf_get_misc_flags(struct pt_regs *regs) { bool use_siar = regs_use_siar(regs); - unsigned long mmcra = regs->dsisr; - int marked = mmcra & MMCRA_SAMPLE_ENABLE; + unsigned long siar; + unsigned long addr; if (!use_siar) return perf_flags_from_msr(regs); /* - * Check the address in SIAR to identify the - * privilege levels since the SIER[MSR_HV, MSR_PR] - * bits are not set for marked events in power10 - * DD1. - */ - if (marked && (ppmu->flags & PPMU_P10_DD1)) { - unsigned long siar = mfspr(SPRN_SIAR); - if (siar) { - if (is_kernel_addr(siar)) - return PERF_RECORD_MISC_KERNEL; - return PERF_RECORD_MISC_USER; - } else { - if (is_kernel_addr(regs->nip)) - return PERF_RECORD_MISC_KERNEL; - return PERF_RECORD_MISC_USER; - } - } - - /* * If we don't have flags in MMCRA, rather than using * the MSR, we intuit the flags from the address in * SIAR which should give slightly more reliable * results */ if (ppmu->flags & PPMU_NO_SIPR) { - unsigned long siar = mfspr(SPRN_SIAR); + siar = mfspr(SPRN_SIAR); if (is_kernel_addr(siar)) return PERF_RECORD_MISC_KERNEL; return PERF_RECORD_MISC_USER; } /* PR has priority over HV, so order below is important */ - if (regs_sipr(regs)) - return PERF_RECORD_MISC_USER; - - if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV)) + if (regs_sipr(regs)) { + if (!(ppmu->flags & PPMU_P10)) + return PERF_RECORD_MISC_USER; + } else if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV)) return PERF_RECORD_MISC_HYPERVISOR; + /* + * Check the address in SIAR to identify the + * privilege levels since the SIER[MSR_HV, MSR_PR] + * bits are not set correctly in power10 sometimes + */ + if (ppmu->flags & PPMU_P10) { + siar = mfspr(SPRN_SIAR); + addr = siar ? siar : regs->nip; + if (!is_kernel_addr(addr)) + return PERF_RECORD_MISC_USER; + } + return PERF_RECORD_MISC_KERNEL; } @@ -451,7 +447,8 @@ static void power_pmu_bhrb_disable(struct perf_event *event) /* Called from ctxsw to prevent one process's branch entries to * mingle with the other process's entries during context switch. */ -static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { if (!ppmu->bhrb_nr) return; @@ -2229,6 +2226,10 @@ static struct pmu power_pmu = { #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \ PERF_SAMPLE_PHYS_ADDR | \ PERF_SAMPLE_DATA_PAGE_SIZE) + +#define SIER_TYPE_SHIFT 15 +#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT) + /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled @@ -2238,6 +2239,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, struct pt_regs *regs) { u64 period = event->hw.sample_period; + const u64 last_period = event->hw.last_period; s64 prev, delta, left; int record = 0; @@ -2298,12 +2300,28 @@ static void record_and_restart(struct perf_event *event, unsigned long val, record = 0; /* + * SIER[46-48] presents instruction type of the sampled instruction. + * In ISA v3.0 and before values "0" and "7" are considered reserved. + * In ISA v3.1, value "7" has been used to indicate "larx/stcx". + * Drop the sample if "type" has reserved values for this field with a + * ISA version check. + */ + if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && + ppmu->get_mem_data_src) { + val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT; + if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) { + record = 0; + atomic64_inc(&event->lost_samples); + } + } + + /* * Finally record data if requested. */ if (record) { struct perf_sample_data data; - perf_sample_data_init(&data, ~0ULL, event->hw.last_period); + perf_sample_data_init(&data, ~0ULL, last_period); if (event->attr.sample_type & PERF_SAMPLE_ADDR_TYPE) perf_get_data_addr(event, regs, &data.addr); @@ -2326,12 +2344,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, ppmu->get_mem_weight(&data.weight.full, event->attr.sample_type); data.sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; } - if (perf_event_overflow(event, &data, regs)) - power_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } else if (period) { /* Account for interrupt in case of invalid SIAR */ - if (perf_event_account_interrupt(event)) - power_pmu_stop(event, 0); + perf_event_account_interrupt(event); } } @@ -2339,7 +2355,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, * Called from generic code to get the misc flags (i.e. processor mode) * for an event_id. */ -unsigned long perf_misc_flags(struct pt_regs *regs) +unsigned long perf_arch_misc_flags(struct pt_regs *regs) { u32 flags = perf_get_misc_flags(regs); @@ -2353,7 +2369,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs) * Called from generic code to get the instruction pointer * for an event_id. */ -unsigned long perf_instruction_pointer(struct pt_regs *regs) +unsigned long perf_arch_instruction_pointer(struct pt_regs *regs) { unsigned long siar = mfspr(SPRN_SIAR); diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index 1a53ab08447c..7120ab20cbfe 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c @@ -590,6 +590,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, struct pt_regs *regs) { u64 period = event->hw.sample_period; + const u64 last_period = event->hw.last_period; s64 prev, delta, left; int record = 0; @@ -632,10 +633,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (record) { struct perf_sample_data data; - perf_sample_data_init(&data, 0, event->hw.last_period); + perf_sample_data_init(&data, 0, last_period); - if (perf_event_overflow(event, &data, regs)) - fsl_emb_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } } diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 057ec2e3451d..b0768f3d2893 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -425,16 +425,6 @@ static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp) return kasprintf(gfp, "%.*s", max_len, maybe_str); } -static ssize_t device_show_string(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dev_ext_attribute *d; - - d = container_of(attr, struct dev_ext_attribute, attr); - - return sprintf(buf, "%s\n", (char *)d->var); -} - static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1008,7 +998,7 @@ e_out: } static ssize_t catalog_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, + const struct bin_attribute *bin_attr, char *buf, loff_t offset, size_t count) { long hret; @@ -1118,14 +1108,14 @@ PAGE_0_ATTR(catalog_version, "%lld\n", (unsigned long long)be64_to_cpu(page_0->version)); PAGE_0_ATTR(catalog_len, "%lld\n", (unsigned long long)be32_to_cpu(page_0->length) * 4096); -static BIN_ATTR_RO(catalog, 0/* real length varies */); +static const BIN_ATTR_RO(catalog, 0/* real length varies */); static DEVICE_ATTR_RO(domains); static DEVICE_ATTR_RO(sockets); static DEVICE_ATTR_RO(chipspersocket); static DEVICE_ATTR_RO(coresperchip); static DEVICE_ATTR_RO(cpumask); -static struct bin_attribute *if_bin_attrs[] = { +static const struct bin_attribute *const if_bin_attrs[] = { &bin_attr_catalog, NULL, }; @@ -1151,7 +1141,7 @@ static struct attribute *if_attrs[] = { static const struct attribute_group if_group = { .name = "interface", - .bin_attrs = if_bin_attrs, + .bin_attrs_new = if_bin_attrs, .attrs = if_attrs, }; diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 56301b2bc8ae..2b3547fdba4a 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -319,10 +319,18 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags, return; } - sier = mfspr(SPRN_SIER); + /* + * Use regs-dar for SPRN_SIER which is saved + * during perf_read_regs at the beginning + * of the PMU interrupt handler to avoid multiple + * reads of SPRN_SIER + */ + sier = regs->dar; val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; - if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) + if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) { + dsrc->val = 0; return; + } idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT; sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT; @@ -338,8 +346,12 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags, * to determine the exact instruction type. If the sampling * criteria is neither load or store, set the type as default * to NA. + * + * Use regs->dsisr for MMCRA which is saved during perf_read_regs + * at the beginning of the PMU interrupt handler to avoid + * multiple reads of SPRN_MMCRA */ - mmcra = mfspr(SPRN_MMCRA); + mmcra = regs->dsisr; op_type = (mmcra >> MMCRA_SAMP_ELIG_SHIFT) & MMCRA_SAMP_ELIG_MASK; switch (op_type) { diff --git a/arch/powerpc/perf/kvm-hv-pmu.c b/arch/powerpc/perf/kvm-hv-pmu.c new file mode 100644 index 000000000000..ae264c9080ef --- /dev/null +++ b/arch/powerpc/perf/kvm-hv-pmu.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Description: PMUs specific to running nested KVM-HV guests + * on Book3S processors (specifically POWER9 and later). + */ + +#define pr_fmt(fmt) "kvmppc-pmu: " fmt + +#include "asm-generic/local64.h" +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/ratelimit.h> +#include <linux/kvm_host.h> +#include <linux/gfp_types.h> +#include <linux/pgtable.h> +#include <linux/perf_event.h> +#include <linux/spinlock_types.h> +#include <linux/spinlock.h> + +#include <asm/types.h> +#include <asm/kvm_ppc.h> +#include <asm/kvm_book3s.h> +#include <asm/mmu.h> +#include <asm/pgalloc.h> +#include <asm/pte-walk.h> +#include <asm/reg.h> +#include <asm/plpar_wrappers.h> +#include <asm/firmware.h> + +#include "asm/guest-state-buffer.h" + +enum kvmppc_pmu_eventid { + KVMPPC_EVENT_HOST_HEAP, + KVMPPC_EVENT_HOST_HEAP_MAX, + KVMPPC_EVENT_HOST_PGTABLE, + KVMPPC_EVENT_HOST_PGTABLE_MAX, + KVMPPC_EVENT_HOST_PGTABLE_RECLAIM, + KVMPPC_EVENT_MAX, +}; + +#define KVMPPC_PMU_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR_ID(_name, kvmppc_events_sysfs_show, _id) + +static ssize_t kvmppc_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +/* Holds the hostwide stats */ +static struct kvmppc_hostwide_stats { + u64 guest_heap; + u64 guest_heap_max; + u64 guest_pgtable_size; + u64 guest_pgtable_size_max; + u64 guest_pgtable_reclaim; +} l0_stats; + +/* Protect access to l0_stats */ +static DEFINE_SPINLOCK(lock_l0_stats); + +/* GSB related structs needed to talk to L0 */ +static struct kvmppc_gs_msg *gsm_l0_stats; +static struct kvmppc_gs_buff *gsb_l0_stats; +static struct kvmppc_gs_parser gsp_l0_stats; + +static struct attribute *kvmppc_pmu_events_attr[] = { + KVMPPC_PMU_EVENT_ATTR(host_heap, KVMPPC_EVENT_HOST_HEAP), + KVMPPC_PMU_EVENT_ATTR(host_heap_max, KVMPPC_EVENT_HOST_HEAP_MAX), + KVMPPC_PMU_EVENT_ATTR(host_pagetable, KVMPPC_EVENT_HOST_PGTABLE), + KVMPPC_PMU_EVENT_ATTR(host_pagetable_max, KVMPPC_EVENT_HOST_PGTABLE_MAX), + KVMPPC_PMU_EVENT_ATTR(host_pagetable_reclaim, KVMPPC_EVENT_HOST_PGTABLE_RECLAIM), + NULL, +}; + +static const struct attribute_group kvmppc_pmu_events_group = { + .name = "events", + .attrs = kvmppc_pmu_events_attr, +}; + +PMU_FORMAT_ATTR(event, "config:0-5"); +static struct attribute *kvmppc_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group kvmppc_pmu_format_group = { + .name = "format", + .attrs = kvmppc_pmu_format_attr, +}; + +static const struct attribute_group *kvmppc_pmu_attr_groups[] = { + &kvmppc_pmu_events_group, + &kvmppc_pmu_format_group, + NULL, +}; + +/* + * Issue the hcall to get the L0-host stats. + * Should be called with l0-stat lock held + */ +static int kvmppc_update_l0_stats(void) +{ + int rc; + + /* With HOST_WIDE flags guestid and vcpuid will be ignored */ + rc = kvmppc_gsb_recv(gsb_l0_stats, KVMPPC_GS_FLAGS_HOST_WIDE); + if (rc) + goto out; + + /* Parse the guest state buffer is successful */ + rc = kvmppc_gse_parse(&gsp_l0_stats, gsb_l0_stats); + if (rc) + goto out; + + /* Update the l0 returned stats*/ + memset(&l0_stats, 0, sizeof(l0_stats)); + rc = kvmppc_gsm_refresh_info(gsm_l0_stats, gsb_l0_stats); + +out: + return rc; +} + +/* Update the value of the given perf_event */ +static int kvmppc_pmu_event_update(struct perf_event *event) +{ + int rc; + u64 curr_val, prev_val; + unsigned long flags; + unsigned int config = event->attr.config; + + /* Ensure no one else is modifying the l0_stats */ + spin_lock_irqsave(&lock_l0_stats, flags); + + rc = kvmppc_update_l0_stats(); + if (!rc) { + switch (config) { + case KVMPPC_EVENT_HOST_HEAP: + curr_val = l0_stats.guest_heap; + break; + case KVMPPC_EVENT_HOST_HEAP_MAX: + curr_val = l0_stats.guest_heap_max; + break; + case KVMPPC_EVENT_HOST_PGTABLE: + curr_val = l0_stats.guest_pgtable_size; + break; + case KVMPPC_EVENT_HOST_PGTABLE_MAX: + curr_val = l0_stats.guest_pgtable_size_max; + break; + case KVMPPC_EVENT_HOST_PGTABLE_RECLAIM: + curr_val = l0_stats.guest_pgtable_reclaim; + break; + default: + rc = -ENOENT; + break; + } + } + + spin_unlock_irqrestore(&lock_l0_stats, flags); + + /* If no error than update the perf event */ + if (!rc) { + prev_val = local64_xchg(&event->hw.prev_count, curr_val); + if (curr_val > prev_val) + local64_add(curr_val - prev_val, &event->count); + } + + return rc; +} + +static int kvmppc_pmu_event_init(struct perf_event *event) +{ + unsigned int config = event->attr.config; + + pr_debug("%s: Event(%p) id=%llu cpu=%x on_cpu=%x config=%u", + __func__, event, event->id, event->cpu, + event->oncpu, config); + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (config >= KVMPPC_EVENT_MAX) + return -EINVAL; + + local64_set(&event->hw.prev_count, 0); + local64_set(&event->count, 0); + + return 0; +} + +static void kvmppc_pmu_del(struct perf_event *event, int flags) +{ + kvmppc_pmu_event_update(event); +} + +static int kvmppc_pmu_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + return kvmppc_pmu_event_update(event); + return 0; +} + +static void kvmppc_pmu_read(struct perf_event *event) +{ + kvmppc_pmu_event_update(event); +} + +/* Return the size of the needed guest state buffer */ +static size_t hostwide_get_size(struct kvmppc_gs_msg *gsm) + +{ + size_t size = 0; + const u16 ids[] = { + KVMPPC_GSID_L0_GUEST_HEAP, + KVMPPC_GSID_L0_GUEST_HEAP_MAX, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, + KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM + }; + + for (int i = 0; i < ARRAY_SIZE(ids); i++) + size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i])); + return size; +} + +/* Populate the request guest state buffer */ +static int hostwide_fill_info(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm) +{ + int rc = 0; + struct kvmppc_hostwide_stats *stats = gsm->data; + + /* + * It doesn't matter what values are put into request buffer as + * they are going to be overwritten anyways. But for the sake of + * testcode and symmetry contents of existing stats are put + * populated into the request guest state buffer. + */ + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_HEAP, + stats->guest_heap); + + if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_HEAP_MAX, + stats->guest_heap_max); + + if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, + stats->guest_pgtable_size); + if (!rc && + kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, + stats->guest_pgtable_size_max); + if (!rc && + kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM, + stats->guest_pgtable_reclaim); + + return rc; +} + +/* Parse and update the host wide stats from returned gsb */ +static int hostwide_refresh_info(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb) +{ + struct kvmppc_gs_parser gsp = { 0 }; + struct kvmppc_hostwide_stats *stats = gsm->data; + struct kvmppc_gs_elem *gse; + int rc; + + rc = kvmppc_gse_parse(&gsp, gsb); + if (rc < 0) + return rc; + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP); + if (gse) + stats->guest_heap = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + if (gse) + stats->guest_heap_max = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + if (gse) + stats->guest_pgtable_size = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + if (gse) + stats->guest_pgtable_size_max = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + if (gse) + stats->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse); + + return 0; +} + +/* gsb-message ops for setting up/parsing */ +static struct kvmppc_gs_msg_ops gsb_ops_l0_stats = { + .get_size = hostwide_get_size, + .fill_info = hostwide_fill_info, + .refresh_info = hostwide_refresh_info, +}; + +static int kvmppc_init_hostwide(void) +{ + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&lock_l0_stats, flags); + + /* already registered ? */ + if (gsm_l0_stats) { + rc = 0; + goto out; + } + + /* setup the Guest state message/buffer to talk to L0 */ + gsm_l0_stats = kvmppc_gsm_new(&gsb_ops_l0_stats, &l0_stats, + GSM_SEND, GFP_KERNEL); + if (!gsm_l0_stats) { + rc = -ENOMEM; + goto out; + } + + /* Populate the Idents */ + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP); + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + + /* allocate GSB. Guest/Vcpu Id is ignored */ + gsb_l0_stats = kvmppc_gsb_new(kvmppc_gsm_size(gsm_l0_stats), 0, 0, + GFP_KERNEL); + if (!gsb_l0_stats) { + rc = -ENOMEM; + goto out; + } + + /* ask the ops to fill in the info */ + rc = kvmppc_gsm_fill_info(gsm_l0_stats, gsb_l0_stats); + +out: + if (rc) { + if (gsm_l0_stats) + kvmppc_gsm_free(gsm_l0_stats); + if (gsb_l0_stats) + kvmppc_gsb_free(gsb_l0_stats); + gsm_l0_stats = NULL; + gsb_l0_stats = NULL; + } + spin_unlock_irqrestore(&lock_l0_stats, flags); + return rc; +} + +static void kvmppc_cleanup_hostwide(void) +{ + unsigned long flags; + + spin_lock_irqsave(&lock_l0_stats, flags); + + if (gsm_l0_stats) + kvmppc_gsm_free(gsm_l0_stats); + if (gsb_l0_stats) + kvmppc_gsb_free(gsb_l0_stats); + gsm_l0_stats = NULL; + gsb_l0_stats = NULL; + + spin_unlock_irqrestore(&lock_l0_stats, flags); +} + +/* L1 wide counters PMU */ +static struct pmu kvmppc_pmu = { + .module = THIS_MODULE, + .task_ctx_nr = perf_sw_context, + .name = "kvm-hv", + .event_init = kvmppc_pmu_event_init, + .add = kvmppc_pmu_add, + .del = kvmppc_pmu_del, + .read = kvmppc_pmu_read, + .attr_groups = kvmppc_pmu_attr_groups, + .type = -1, + .scope = PERF_PMU_SCOPE_SYS_WIDE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, +}; + +static int __init kvmppc_register_pmu(void) +{ + int rc = -EOPNOTSUPP; + + /* only support events for nestedv2 right now */ + if (kvmhv_is_nestedv2()) { + rc = kvmppc_init_hostwide(); + if (rc) + goto out; + + /* Register the pmu */ + rc = perf_pmu_register(&kvmppc_pmu, kvmppc_pmu.name, -1); + if (rc) + goto out; + + pr_info("Registered kvm-hv pmu"); + } + +out: + return rc; +} + +static void __exit kvmppc_unregister_pmu(void) +{ + if (kvmhv_is_nestedv2()) { + kvmppc_cleanup_hostwide(); + + if (kvmppc_pmu.type != -1) + perf_pmu_unregister(&kvmppc_pmu); + + pr_info("kvmhv_pmu unregistered.\n"); + } +} + +module_init(kvmppc_register_pmu); +module_exit(kvmppc_unregister_pmu); +MODULE_DESCRIPTION("KVM PPC Book3s-hv PMU"); +MODULE_AUTHOR("Vaibhav Jain <vaibhav@linux.ibm.com>"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c index 62a68b6b2d4b..bb57b7cfe640 100644 --- a/arch/powerpc/perf/power10-pmu.c +++ b/arch/powerpc/perf/power10-pmu.c @@ -593,7 +593,8 @@ static struct power_pmu power10_pmu = { .get_mem_weight = isa207_get_mem_weight, .disable_pmc = isa207_disable_pmc, .flags = PPMU_HAS_SIER | PPMU_ARCH_207S | - PPMU_ARCH_31 | PPMU_HAS_ATTR_CONFIG1, + PPMU_ARCH_31 | PPMU_HAS_ATTR_CONFIG1 | + PPMU_P10, .n_generic = ARRAY_SIZE(power10_generic_events), .generic_events = power10_generic_events, .cache_events = &power10_cache_events, diff --git a/arch/powerpc/perf/vpa-pmu.c b/arch/powerpc/perf/vpa-pmu.c new file mode 100644 index 000000000000..840733468959 --- /dev/null +++ b/arch/powerpc/perf/vpa-pmu.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Performance monitoring support for Virtual Processor Area(VPA) based counters + * + * Copyright (C) 2024 IBM Corporation + */ +#define pr_fmt(fmt) "vpa_pmu: " fmt + +#include <linux/module.h> +#include <linux/perf_event.h> +#include <asm/kvm_ppc.h> +#include <asm/kvm_book3s_64.h> + +#define MODULE_VERS "1.0" +#define MODULE_NAME "pseries_vpa_pmu" + +#define EVENT(_name, _code) enum{_name = _code} + +#define VPA_PMU_EVENT_VAR(_id) event_attr_##_id +#define VPA_PMU_EVENT_PTR(_id) (&event_attr_##_id.attr.attr) + +static ssize_t vpa_pmu_events_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define VPA_PMU_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR(_name, VPA_PMU_EVENT_VAR(_id), _id, \ + vpa_pmu_events_sysfs_show) + +EVENT(L1_TO_L2_CS_LAT, 0x1); +EVENT(L2_TO_L1_CS_LAT, 0x2); +EVENT(L2_RUNTIME_AGG, 0x3); + +VPA_PMU_EVENT_ATTR(l1_to_l2_lat, L1_TO_L2_CS_LAT); +VPA_PMU_EVENT_ATTR(l2_to_l1_lat, L2_TO_L1_CS_LAT); +VPA_PMU_EVENT_ATTR(l2_runtime_agg, L2_RUNTIME_AGG); + +static struct attribute *vpa_pmu_events_attr[] = { + VPA_PMU_EVENT_PTR(L1_TO_L2_CS_LAT), + VPA_PMU_EVENT_PTR(L2_TO_L1_CS_LAT), + VPA_PMU_EVENT_PTR(L2_RUNTIME_AGG), + NULL +}; + +static const struct attribute_group vpa_pmu_events_group = { + .name = "events", + .attrs = vpa_pmu_events_attr, +}; + +PMU_FORMAT_ATTR(event, "config:0-31"); +static struct attribute *vpa_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group vpa_pmu_format_group = { + .name = "format", + .attrs = vpa_pmu_format_attr, +}; + +static const struct attribute_group *vpa_pmu_attr_groups[] = { + &vpa_pmu_events_group, + &vpa_pmu_format_group, + NULL +}; + +static int vpa_pmu_event_init(struct perf_event *event) +{ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* it does not support event sampling mode */ + if (is_sampling_event(event)) + return -EOPNOTSUPP; + + /* no branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + /* Invalid event code */ + if ((event->attr.config <= 0) || (event->attr.config > 3)) + return -EINVAL; + + return 0; +} + +static unsigned long get_counter_data(struct perf_event *event) +{ + unsigned int config = event->attr.config; + u64 data; + + switch (config) { + case L1_TO_L2_CS_LAT: + if (event->attach_state & PERF_ATTACH_TASK) + data = kvmhv_get_l1_to_l2_cs_time_vcpu(); + else + data = kvmhv_get_l1_to_l2_cs_time(); + break; + case L2_TO_L1_CS_LAT: + if (event->attach_state & PERF_ATTACH_TASK) + data = kvmhv_get_l2_to_l1_cs_time_vcpu(); + else + data = kvmhv_get_l2_to_l1_cs_time(); + break; + case L2_RUNTIME_AGG: + if (event->attach_state & PERF_ATTACH_TASK) + data = kvmhv_get_l2_runtime_agg_vcpu(); + else + data = kvmhv_get_l2_runtime_agg(); + break; + default: + data = 0; + break; + } + + return data; +} + +static int vpa_pmu_add(struct perf_event *event, int flags) +{ + u64 data; + + kvmhv_set_l2_counters_status(smp_processor_id(), true); + + data = get_counter_data(event); + local64_set(&event->hw.prev_count, data); + + return 0; +} + +static void vpa_pmu_read(struct perf_event *event) +{ + u64 prev_data, new_data, final_data; + + prev_data = local64_read(&event->hw.prev_count); + new_data = get_counter_data(event); + final_data = new_data - prev_data; + + local64_add(final_data, &event->count); +} + +static void vpa_pmu_del(struct perf_event *event, int flags) +{ + vpa_pmu_read(event); + + /* + * Disable vpa counter accumulation + */ + kvmhv_set_l2_counters_status(smp_processor_id(), false); +} + +static struct pmu vpa_pmu = { + .module = THIS_MODULE, + .task_ctx_nr = perf_sw_context, + .name = "vpa_pmu", + .event_init = vpa_pmu_event_init, + .add = vpa_pmu_add, + .del = vpa_pmu_del, + .read = vpa_pmu_read, + .attr_groups = vpa_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, +}; + +static int __init pseries_vpa_pmu_init(void) +{ + /* + * List of current Linux on Power platforms and + * this driver is supported only in PowerVM LPAR + * (L1) platform. + * + * Enabled Linux on Power Platforms + * ---------------------------------------- + * [X] PowerVM LPAR (L1) + * [ ] KVM Guest On PowerVM KoP(L2) + * [ ] Baremetal(PowerNV) + * [ ] KVM Guest On PowerNV + */ + if (!firmware_has_feature(FW_FEATURE_LPAR) || is_kvm_guest()) + return -ENODEV; + + perf_pmu_register(&vpa_pmu, vpa_pmu.name, -1); + pr_info("Virtual Processor Area PMU registered.\n"); + + return 0; +} + +static void __exit pseries_vpa_pmu_cleanup(void) +{ + perf_pmu_unregister(&vpa_pmu); + pr_info("Virtual Processor Area PMU unregistered.\n"); +} + +module_init(pseries_vpa_pmu_init); +module_exit(pseries_vpa_pmu_cleanup); +MODULE_DESCRIPTION("Perf Driver for pSeries VPA pmu counter"); +MODULE_AUTHOR("Kajol Jain <kjain@linux.ibm.com>"); +MODULE_AUTHOR("Madhavan Srinivasan <maddy@linux.ibm.com>"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig deleted file mode 100644 index b3c466c50535..000000000000 --- a/arch/powerpc/platforms/40x/Kconfig +++ /dev/null @@ -1,78 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config ACADIA - bool "Acadia" - depends on 40x - select PPC40x_SIMPLE - select 405EZ - help - This option enables support for the AMCC 405EZ Acadia evaluation board. - -config HOTFOOT - bool "Hotfoot" - depends on 40x - select PPC40x_SIMPLE - select FORCE_PCI - help - This option enables support for the ESTEEM 195E Hotfoot board. - -config KILAUEA - bool "Kilauea" - depends on 40x - select 405EX - select PPC40x_SIMPLE - select PPC4xx_PCI_EXPRESS - select FORCE_PCI - select PCI_MSI - help - This option enables support for the AMCC PPC405EX evaluation board. - -config MAKALU - bool "Makalu" - depends on 40x - select 405EX - select FORCE_PCI - select PPC4xx_PCI_EXPRESS - select PPC40x_SIMPLE - help - This option enables support for the AMCC PPC405EX board. - -config OBS600 - bool "OpenBlockS 600" - depends on 40x - select 405EX - select PPC40x_SIMPLE - help - This option enables support for PlatHome OpenBlockS 600 server - -config PPC40x_SIMPLE - bool "Simple PowerPC 40x board support" - depends on 40x - help - This option enables the simple PowerPC 40x platform support. - -config 405EX - bool - select IBM_EMAC_EMAC4 if IBM_EMAC - select IBM_EMAC_RGMII if IBM_EMAC - -config 405EZ - bool - select IBM_EMAC_NO_FLOW_CTRL if IBM_EMAC - select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC - select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC - -config PPC4xx_GPIO - bool "PPC4xx GPIO support" - depends on 40x - select GPIOLIB - select OF_GPIO_MM_GPIOCHIP - help - Enable gpiolib support for ppc40x based boards - -config APM8018X - bool "APM8018X" - depends on 40x - select PPC40x_SIMPLE - help - This option enables support for the AppliedMicro APM8018X evaluation - board. diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile deleted file mode 100644 index 122de98527c4..000000000000 --- a/arch/powerpc/platforms/40x/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c deleted file mode 100644 index 294ab2728588..000000000000 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ /dev/null @@ -1,74 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic PowerPC 40x platform support - * - * Copyright 2008 IBM Corporation - * - * This implements simple platform support for PowerPC 44x chips. This is - * mostly used for eval boards or other simple and "generic" 44x boards. If - * your board has custom functions or hardware, then you will likely want to - * implement your own board.c file to accommodate it. - */ - -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <asm/ppc4xx.h> -#include <asm/time.h> -#include <asm/udbg.h> -#include <asm/uic.h> - -#include <linux/init.h> -#include <linux/of_platform.h> - -static const struct of_device_id ppc40x_of_bus[] __initconst = { - { .compatible = "ibm,plb3", }, - { .compatible = "ibm,plb4", }, - { .compatible = "ibm,opb", }, - { .compatible = "ibm,ebc", }, - { .compatible = "simple-bus", }, - {}, -}; - -static int __init ppc40x_device_probe(void) -{ - of_platform_bus_probe(NULL, ppc40x_of_bus, NULL); - - return 0; -} -machine_device_initcall(ppc40x_simple, ppc40x_device_probe); - -/* This is the list of boards that can be supported by this simple - * platform code. This does _not_ mean the boards are compatible, - * as they most certainly are not from a device tree perspective. - * However, their differences are handled by the device tree and the - * drivers and therefore they don't need custom board support files. - * - * Again, if your board needs to do things differently then create a - * board.c file for it rather than adding it to this list. - */ -static const char * const board[] __initconst = { - "amcc,acadia", - "amcc,haleakala", - "amcc,kilauea", - "amcc,makalu", - "apm,klondike", - "est,hotfoot", - "plathome,obs600", - NULL -}; - -static int __init ppc40x_probe(void) -{ - pci_set_flags(PCI_REASSIGN_ALL_RSRC); - return 1; -} - -define_machine(ppc40x_simple) { - .name = "PowerPC 40x Platform", - .compatibles = board, - .probe = ppc40x_probe, - .progress = udbg_progress, - .init_IRQ = uic_init_tree, - .get_irq = uic_get_irq, - .restart = ppc4xx_reset_system, -}; diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile index 5ba031f57652..ca7b1bb442d9 100644 --- a/arch/powerpc/platforms/44x/Makefile +++ b/arch/powerpc/platforms/44x/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += misc_44x.o machine_check.o +obj-y += misc_44x.o machine_check.o uic.o soc.o ifneq ($(CONFIG_PPC4xx_CPM),y) obj-y += idle.o endif @@ -12,3 +12,7 @@ obj-$(CONFIG_CANYONLANDS)+= canyonlands.o obj-$(CONFIG_CURRITUCK) += ppc476.o obj-$(CONFIG_AKEBONO) += ppc476.o obj-$(CONFIG_FSP2) += fsp2.o +obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o +obj-$(CONFIG_PPC4xx_CPM) += cpm.o +obj-$(CONFIG_PPC4xx_GPIO) += gpio.o diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/44x/cpm.c index 670f8ad4465b..670f8ad4465b 100644 --- a/arch/powerpc/platforms/4xx/cpm.c +++ b/arch/powerpc/platforms/44x/cpm.c diff --git a/arch/powerpc/platforms/4xx/gpio.c b/arch/powerpc/platforms/44x/gpio.c index e5f2319e5cbe..d540e261d85a 100644 --- a/arch/powerpc/platforms/4xx/gpio.c +++ b/arch/powerpc/platforms/44x/gpio.c @@ -75,8 +75,7 @@ __ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) clrbits32(®s->or, GPIO_MASK(gpio)); } -static void -ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); unsigned long flags; @@ -88,6 +87,8 @@ ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) spin_unlock_irqrestore(&chip->lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); + + return 0; } static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -179,7 +180,7 @@ static int __init ppc4xx_add_gpiochips(void) gc->direction_input = ppc4xx_gpio_dir_in; gc->direction_output = ppc4xx_gpio_dir_out; gc->get = ppc4xx_gpio_get; - gc->set = ppc4xx_gpio_set; + gc->set_rv = ppc4xx_gpio_set; ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc); if (ret) diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/44x/hsta_msi.c index c6bd846b0d65..c6bd846b0d65 100644 --- a/arch/powerpc/platforms/4xx/hsta_msi.c +++ b/arch/powerpc/platforms/44x/hsta_msi.c diff --git a/arch/powerpc/platforms/44x/machine_check.c b/arch/powerpc/platforms/44x/machine_check.c index 5d19daacd78a..85ff33a8d9b6 100644 --- a/arch/powerpc/platforms/44x/machine_check.c +++ b/arch/powerpc/platforms/44x/machine_check.c @@ -9,6 +9,21 @@ #include <asm/reg.h> #include <asm/cacheflush.h> +int machine_check_4xx(struct pt_regs *regs) +{ + unsigned long reason = regs->esr; + + if (reason & ESR_IMCP) { + printk("Instruction"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + } else + printk("Data"); + + printk(" machine check in kernel mode.\n"); + + return 0; +} + int machine_check_440A(struct pt_regs *regs) { unsigned long reason = regs->esr; diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/44x/pci.c index 48626615b18b..364aeb86ab64 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/44x/pci.c @@ -94,10 +94,8 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, struct resource *res) { u64 size; - const u32 *ranges; - int rlen; - int pna = of_n_addr_cells(hose->dn); - int np = pna + 5; + struct of_range_parser parser; + struct of_range range; /* Default */ res->start = 0; @@ -105,18 +103,15 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, res->end = size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; - /* Get dma-ranges property */ - ranges = of_get_property(hose->dn, "dma-ranges", &rlen); - if (ranges == NULL) + if (of_pci_dma_range_parser_init(&parser, hose->dn)) goto out; - /* Walk it */ - while ((rlen -= np * 4) >= 0) { - u32 pci_space = ranges[0]; - u64 pci_addr = of_read_number(ranges + 1, 2); - u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3); - size = of_read_number(ranges + pna + 3, 2); - ranges += np; + for_each_of_range(&parser, &range) { + u32 pci_space = range.flags; + u64 pci_addr = range.bus_addr; + u64 cpu_addr = range.cpu_addr; + size = range.size; + if (cpu_addr == OF_BAD_ADDR || size == 0) continue; @@ -1263,102 +1258,6 @@ static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { #endif /* CONFIG_44x */ -#ifdef CONFIG_40x - -static int __init ppc405ex_pciex_core_init(struct device_node *np) -{ - /* Nothing to do, return 2 ports */ - return 2; -} - -static void __init ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port) -{ - /* Assert the PE0_PHY reset */ - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000); - msleep(1); - - /* deassert the PE0_hotreset */ - if (port->endpoint) - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000); - else - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000); - - /* poll for phy !reset */ - /* XXX FIXME add timeout */ - while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000)) - ; - - /* deassert the PE0_gpl_utl_reset */ - mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000); -} - -static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port) -{ - u32 val; - - if (port->endpoint) - val = PTYPE_LEGACY_ENDPOINT; - else - val = PTYPE_ROOT_PORT; - - mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, - 1 << 24 | val << 20 | LNKW_X1 << 12); - - mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000); - mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000); - mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000); - mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003); - - /* - * Only reset the PHY when no link is currently established. - * This is for the Atheros PCIe board which has problems to establish - * the link (again) after this PHY reset. All other currently tested - * PCIe boards don't show this problem. - * This has to be re-tested and fixed in a later release! - */ - val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP); - if (!(val & 0x00001000)) - ppc405ex_pcie_phy_reset(port); - - dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */ - - port->has_ibpre = 1; - - return ppc4xx_pciex_port_reset_sdr(port); -} - -static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port) -{ - dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0); - - /* - * Set buffer allocations and then assert VRB and TXE. - */ - out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000); - out_be32(port->utl_base + PEUTL_INTR, 0x02000000); - out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000); - out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000); - out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000); - out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000); - out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000); - out_be32(port->utl_base + PEUTL_PCTL, 0x80800066); - - out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000); - - return 0; -} - -static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = -{ - .want_sdr = true, - .core_init = ppc405ex_pciex_core_init, - .port_init_hw = ppc405ex_pciex_init_port_hw, - .setup_utl = ppc405ex_pciex_init_utl, - .check_link = ppc4xx_pciex_check_link_sdr, -}; - -#endif /* CONFIG_40x */ - #ifdef CONFIG_476FPE static int __init ppc_476fpe_pciex_core_init(struct device_node *np) { @@ -1427,10 +1326,6 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np) if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx")) ppc4xx_pciex_hwops = &apm821xx_pcie_hwops; #endif /* CONFIG_44x */ -#ifdef CONFIG_40x - if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) - ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops; -#endif #ifdef CONFIG_476FPE if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe") || of_device_is_compatible(np, "ibm,plb-pciex-476gtr")) diff --git a/arch/powerpc/platforms/4xx/pci.h b/arch/powerpc/platforms/44x/pci.h index bb4821938ab1..bb4821938ab1 100644 --- a/arch/powerpc/platforms/4xx/pci.h +++ b/arch/powerpc/platforms/44x/pci.h diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index 164cbcd4588e..e7b7bdaad341 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c @@ -95,7 +95,7 @@ static int avr_probe(struct i2c_client *client) } static const struct i2c_device_id avr_id[] = { - { "akebono-avr", 0 }, + { "akebono-avr" }, { } }; diff --git a/arch/powerpc/platforms/4xx/soc.c b/arch/powerpc/platforms/44x/soc.c index 5412e6b21e10..5412e6b21e10 100644 --- a/arch/powerpc/platforms/4xx/soc.c +++ b/arch/powerpc/platforms/44x/soc.c diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/44x/uic.c index e3e148b9dd18..85daf841fd3f 100644 --- a/arch/powerpc/platforms/4xx/uic.c +++ b/arch/powerpc/platforms/44x/uic.c @@ -37,7 +37,7 @@ #define UIC_VR 0x7 #define UIC_VCR 0x8 -struct uic *primary_uic; +static struct uic *primary_uic; struct uic { int index; @@ -254,8 +254,9 @@ static struct uic * __init uic_init_one(struct device_node *node) } uic->dcrbase = *dcrreg; - uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops, - uic); + uic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), + NR_UIC_INTS, &uic_host_ops, + uic); if (! uic->irqhost) return NULL; /* FIXME: panic? */ @@ -291,7 +292,7 @@ void __init uic_init_tree(void) if (!primary_uic) panic("Unable to initialize primary UIC %pOF\n", np); - irq_set_default_host(primary_uic->irqhost); + irq_set_default_domain(primary_uic->irqhost); of_node_put(np); /* The scan again for cascaded UICs */ @@ -327,5 +328,5 @@ unsigned int uic_get_irq(void) msr = mfdcr(primary_uic->dcrbase + UIC_MSR); src = 32 - ffs(msr); - return irq_linear_revmap(primary_uic->irqhost, src); + return irq_find_mapping(primary_uic->irqhost, src); } diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile deleted file mode 100644 index 2071a0abe09b..000000000000 --- a/arch/powerpc/platforms/4xx/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-y += uic.o machine_check.o -obj-$(CONFIG_4xx_SOC) += soc.o -obj-$(CONFIG_PCI) += pci.o -obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o -obj-$(CONFIG_PPC4xx_CPM) += cpm.o -obj-$(CONFIG_PPC4xx_GPIO) += gpio.o diff --git a/arch/powerpc/platforms/4xx/machine_check.c b/arch/powerpc/platforms/4xx/machine_check.c deleted file mode 100644 index a905da1d6f41..000000000000 --- a/arch/powerpc/platforms/4xx/machine_check.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - */ - -#include <linux/kernel.h> -#include <linux/printk.h> -#include <linux/ptrace.h> - -#include <asm/reg.h> - -int machine_check_4xx(struct pt_regs *regs) -{ - unsigned long reason = regs->esr; - - if (reason & ESR_IMCP) { - printk("Instruction"); - mtspr(SPRN_ESR, reason & ~ESR_IMCP); - } else - printk("Data"); - printk(" machine check in kernel mode.\n"); - - return 0; -} diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index e995eb30bf09..2cf3c6237337 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -188,7 +188,8 @@ mpc5121_ads_cpld_pic_init(void) cpld_pic_node = of_node_get(np); - cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL); + cpld_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 16, + &cpld_pic_host_ops, NULL); if (!cpld_pic_host) { printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); goto end; diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c index 4a25b6b48615..9668b052cd4b 100644 --- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c +++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c @@ -504,7 +504,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match); static struct platform_driver mpc512x_lpbfifo_driver = { .probe = mpc512x_lpbfifo_probe, - .remove_new = mpc512x_lpbfifo_remove, + .remove = mpc512x_lpbfifo_remove, .driver = { .name = DRV_NAME, .of_match_table = mpc512x_lpbfifo_match, diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index 8f75e9574c27..8c1f3b629fc7 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -279,7 +279,7 @@ static void __init mpc512x_setup_diu(void) * and so negatively affect boot time. Instead we reserve the * already configured frame buffer area so that it won't be * destroyed. The starting address of the area to reserve and - * also it's length is passed to memblock_reserve(). It will be + * also its length is passed to memblock_reserve(). It will be * freed later on first open of fbdev, when splash image is not * needed any more. */ diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 37a67120f257..a7172f9ebaad 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c @@ -13,6 +13,7 @@ #include <generated/utsrelease.h> #include <linux/pci.h> #include <linux/of.h> +#include <linux/seq_file.h> #include <asm/dma.h> #include <asm/time.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index 0b12647e7b42..0ec2522ee4ad 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -203,7 +203,8 @@ lite5200_wakeup: /* HIDs, MSR */ LOAD_SPRN(HID1, 0x19) - LOAD_SPRN(HID2, 0x1a) + /* FIXME: Should this use HID2_G2_LE? */ + LOAD_SPRN(HID2_750FX, 0x1a) /* address translation is tricky (see turn_on_mmu) */ @@ -283,7 +284,8 @@ SYM_FUNC_START_LOCAL(save_regs) SAVE_SPRN(HID0, 0x18) SAVE_SPRN(HID1, 0x19) - SAVE_SPRN(HID2, 0x1a) + /* FIXME: Should this use HID2_G2_LE? */ + SAVE_SPRN(HID2_750FX, 0x1a) mfmsr r10 stw r10, (4*0x1b)(r4) /*SAVE_SPRN(LR, 0x1c) have to save it before the call */ diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 19626cd42406..bc7f83cfec1d 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -168,7 +168,7 @@ static void __init media5200_init_irq(void) spin_lock_init(&media5200_irq.lock); - media5200_irq.irqhost = irq_domain_add_linear(fpga_np, + media5200_irq.irqhost = irq_domain_create_linear(of_fwnode_handle(fpga_np), MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq); if (!media5200_irq.irqhost) goto out; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index b4938e344f71..253421ffb4e5 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c @@ -12,12 +12,10 @@ #undef DEBUG -#include <linux/gpio.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/of_address.h> #include <linux/of_platform.h> -#include <linux/of_gpio.h> #include <linux/export.h> #include <asm/io.h> #include <asm/mpc52xx.h> diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 581059527c36..bda707d848a6 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -48,6 +48,7 @@ * the output mode. This driver does not change the output mode setting. */ +#include <linux/gpio/driver.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -56,7 +57,6 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/property.h> @@ -247,9 +247,9 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) if (!cascade_virq) return; - gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt); + gpt->irqhost = irq_domain_create_linear(of_fwnode_handle(node), 1, &mpc52xx_gpt_irq_ops, gpt); if (!gpt->irqhost) { - dev_err(gpt->dev, "irq_domain_add_linear() failed\n"); + dev_err(gpt->dev, "irq_domain_create_linear() failed\n"); return; } @@ -280,7 +280,7 @@ static int mpc52xx_gpt_gpio_get(struct gpio_chip *gc, unsigned int gpio) return (in_be32(&gpt->regs->status) >> 8) & 1; } -static void +static int mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v) { struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc); @@ -293,6 +293,8 @@ mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v) raw_spin_lock_irqsave(&gpt->lock, flags); clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_GPIO_MASK, r); raw_spin_unlock_irqrestore(&gpt->lock, flags); + + return 0; } static int mpc52xx_gpt_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -334,7 +336,7 @@ static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt) gpt->gc.direction_input = mpc52xx_gpt_gpio_dir_in; gpt->gc.direction_output = mpc52xx_gpt_gpio_dir_out; gpt->gc.get = mpc52xx_gpt_gpio_get; - gpt->gc.set = mpc52xx_gpt_gpio_set; + gpt->gc.set_rv = mpc52xx_gpt_gpio_set; gpt->gc.base = -1; gpt->gc.parent = gpt->dev; @@ -369,7 +371,7 @@ struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq) mutex_lock(&mpc52xx_gpt_list_mutex); list_for_each(pos, &mpc52xx_gpt_list) { gpt = container_of(pos, struct mpc52xx_gpt_priv, list); - if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) { + if (gpt->irqhost && irq == irq_find_mapping(gpt->irqhost, 0)) { mutex_unlock(&mpc52xx_gpt_list_mutex); return gpt; } @@ -644,7 +646,6 @@ static int mpc52xx_wdt_release(struct inode *inode, struct file *file) static const struct file_operations mpc52xx_wdt_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .write = mpc52xx_wdt_write, .unlocked_ioctl = mpc52xx_wdt_ioctl, .compat_ioctl = compat_ptr_ioctl, diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 1e0a5e9644dc..eb6a4e745c08 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -446,14 +446,14 @@ void __init mpc52xx_init_irq(void) * As last step, add an irq host to translate the real * hw irq information provided by the ofw to linux virq */ - mpc52xx_irqhost = irq_domain_add_linear(picnode, + mpc52xx_irqhost = irq_domain_create_linear(of_fwnode_handle(picnode), MPC52xx_IRQ_HIGHTESTHWIRQ, &mpc52xx_irqhost_ops, NULL); if (!mpc52xx_irqhost) panic(__FILE__ ": Cannot allocate the IRQ host\n"); - irq_set_default_host(mpc52xx_irqhost); + irq_set_default_domain(mpc52xx_irqhost); pr_info("MPC52xx PIC is up and running!\n"); } @@ -515,5 +515,5 @@ unsigned int mpc52xx_get_irq(void) return 0; } - return irq_linear_revmap(mpc52xx_irqhost, irq); + return irq_find_mapping(mpc52xx_irqhost, irq); } diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 3dc65ce1f175..8f918916e631 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c @@ -128,7 +128,7 @@ static int ep8248e_mdio_probe(struct platform_device *ofdev) bus->name = "ep8248e-mdio-bitbang"; bus->parent = &ofdev->dev; - snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start); ret = of_mdiobus_register(bus, ofdev->dev.of_node); if (ret) diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c index c86da3f2b74b..99f0f0f41876 100644 --- a/arch/powerpc/platforms/82xx/km82xx.c +++ b/arch/powerpc/platforms/82xx/km82xx.c @@ -27,15 +27,15 @@ static void __init km82xx_pic_init(void) { - struct device_node *np = of_find_compatible_node(NULL, NULL, - "fsl,pq2-pic"); + struct device_node *np __free(device_node); + np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic"); + if (!np) { pr_err("PIC init: can not find cpm-pic node\n"); return; } cpm2_pic_init(np); - of_node_put(np); } struct cpm_pin { diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 4d8fa9ed1a67..6e37dfc6c5c9 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -92,10 +92,11 @@ static void mcu_power_off(void) mutex_unlock(&mcu->lock); } -static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct mcu *mcu = gpiochip_get_data(gc); u8 bit = 1 << (4 + gpio); + int ret; mutex_lock(&mcu->lock); if (val) @@ -103,14 +104,16 @@ static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) else mcu->reg_ctrl |= bit; - i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, mcu->reg_ctrl); + ret = i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, + mcu->reg_ctrl); mutex_unlock(&mcu->lock); + + return ret; } static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - mcu_gpio_set(gc, gpio, val); - return 0; + return mcu_gpio_set(gc, gpio, val); } static int mcu_gpiochip_add(struct mcu *mcu) @@ -123,7 +126,7 @@ static int mcu_gpiochip_add(struct mcu *mcu) gc->can_sleep = 1; gc->ngpio = MCU_NUM_GPIO; gc->base = -1; - gc->set = mcu_gpio_set; + gc->set_rv = mcu_gpio_set; gc->direction_output = mcu_gpio_dir_out; gc->parent = dev; diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S index bc6bd4d0ae96..6a62ed6082c9 100644 --- a/arch/powerpc/platforms/83xx/suspend-asm.S +++ b/arch/powerpc/platforms/83xx/suspend-asm.S @@ -68,7 +68,8 @@ _GLOBAL(mpc83xx_enter_deep_sleep) mfspr r5, SPRN_HID0 mfspr r6, SPRN_HID1 - mfspr r7, SPRN_HID2 + /* FIXME: Should this use SPRN_HID2_G2_LE? */ + mfspr r7, SPRN_HID2_750FX stw r5, SS_HID+0(r3) stw r6, SS_HID+4(r3) @@ -396,7 +397,8 @@ mpc83xx_deep_resume: mtspr SPRN_HID0, r5 mtspr SPRN_HID1, r6 - mtspr SPRN_HID2, r7 + /* FIXME: Should this use SPRN_HID2_G2_LE? */ + mtspr SPRN_HID2_750FX, r7 lwz r4, SS_IABR+0(r3) lwz r5, SS_IABR+4(r3) diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 9315a3b69d6d..604c1b4b6d45 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -40,27 +40,6 @@ config BSC9132_QDS and dual StarCore SC3850 DSP cores. Manufacturer : Freescale Semiconductor, Inc -config MPC8540_ADS - bool "Freescale MPC8540 ADS" - select DEFAULT_UIMAGE - help - This option enables support for the MPC 8540 ADS board - -config MPC8560_ADS - bool "Freescale MPC8560 ADS" - select DEFAULT_UIMAGE - select CPM2 - help - This option enables support for the MPC 8560 ADS board - -config MPC85xx_CDS - bool "Freescale MPC85xx CDS" - select DEFAULT_UIMAGE - select PPC_I8259 - select HAVE_RAPIDIO - help - This option enables support for the MPC85xx CDS board - config MPC85xx_MDS bool "Freescale MPC8568 MDS / MPC8569 MDS / P1021 MDS" select DEFAULT_UIMAGE diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 34ce21f42623..e635b27ee718 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -143,7 +143,7 @@ static struct platform_driver gpio_halt_driver = { .of_match_table = gpio_halt_match, }, .probe = gpio_halt_probe, - .remove_new = gpio_halt_remove, + .remove = gpio_halt_remove, }; module_platform_driver(gpio_halt_driver); diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 40aa58206888..32fa5fb557c0 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -23,7 +23,7 @@ #include <asm/mpic.h> #include <asm/cacheflush.h> #include <asm/dbell.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/cputhreads.h> #include <asm/fsl_pm.h> @@ -398,6 +398,7 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) hard_irq_disable(); mpic_teardown_this_cpu(secondary); +#ifdef CONFIG_CRASH_DUMP if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) { /* * We enter the crash kernel on whatever cpu crashed, @@ -406,9 +407,11 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) */ disable_threadbit = 1; disable_cpu = cpu_first_thread_sibling(cpu); - } else if (sibling != crashing_cpu && - cpu_thread_in_core(cpu) == 0 && - cpu_thread_in_core(sibling) != 0) { + } else if (sibling == crashing_cpu) { + return; + } +#endif + if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) { disable_threadbit = 2; disable_cpu = sibling; } diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 60e0b8947ce6..4b69fb321a68 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -83,7 +83,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) if (cause >> (i + 16)) break; } - return irq_linear_revmap(socrates_fpga_pic_irq_host, + return irq_find_mapping(socrates_fpga_pic_irq_host, (irq_hw_number_t)i); } @@ -278,7 +278,7 @@ void __init socrates_fpga_pic_init(struct device_node *pic) int i; /* Setup an irq_domain structure */ - socrates_fpga_pic_irq_host = irq_domain_add_linear(pic, + socrates_fpga_pic_irq_host = irq_domain_create_linear(of_fwnode_handle(pic), SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL); if (socrates_fpga_pic_irq_host == NULL) { pr_err("FPGA PIC: Unable to allocate host\n"); diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c index 767eed98a0a8..d4fbb6eff38a 100644 --- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c +++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c @@ -149,4 +149,5 @@ static int __init t1042rdb_diu_init(void) early_initcall(t1042rdb_diu_init); +MODULE_DESCRIPTION("Freescale T1042 DIU driver"); MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 8a7e55acf090..9be33e41af6d 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -12,7 +12,7 @@ #include <linux/delay.h> #include <linux/pgtable.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/page.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index a14d9d8997a4..8623aebfac48 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig @@ -195,6 +195,13 @@ config PIN_TLB_IMMR CONFIG_PIN_TLB_DATA is also selected, it will reduce CONFIG_PIN_TLB_DATA to 24 Mbytes. +config PIN_TLB_TEXT + bool "Pinned TLB for TEXT" + depends on PIN_TLB + default y + help + This pins kernel text with 8M pages. + endmenu endmenu diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c index a18fc7c99f83..a49d4a9ab3bc 100644 --- a/arch/powerpc/platforms/8xx/cpm1-ic.c +++ b/arch/powerpc/platforms/8xx/cpm1-ic.c @@ -59,7 +59,7 @@ static int cpm_get_irq(struct irq_desc *desc) cpm_vec = in_be16(&data->reg->cpic_civr); cpm_vec >>= 11; - return irq_linear_revmap(data->host, cpm_vec); + return irq_find_mapping(data->host, cpm_vec); } static void cpm_cascade(struct irq_desc *desc) @@ -110,7 +110,8 @@ static int cpm_pic_probe(struct platform_device *pdev) out_be32(&data->reg->cpic_cimr, 0); - data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data); + data->host = irq_domain_create_linear(of_fwnode_handle(dev->of_node), + 64, &cpm_pic_host_ops, data); if (!data->host) return -ENODEV; diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c index b24d4102fbf6..7462c221115c 100644 --- a/arch/powerpc/platforms/8xx/cpm1.c +++ b/arch/powerpc/platforms/8xx/cpm1.c @@ -45,7 +45,7 @@ #include <sysdev/fsl_soc.h> #ifdef CONFIG_8xx_GPIO -#include <linux/gpio/legacy-of-mm-gpiochip.h> +#include <linux/gpio/driver.h> #endif #define CPM_MAP_SIZE (0x4000) @@ -376,7 +376,8 @@ int __init cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode) #ifdef CONFIG_8xx_GPIO struct cpm1_gpio16_chip { - struct of_mm_gpio_chip mm_gc; + struct gpio_chip gc; + void __iomem *regs; spinlock_t lock; /* shadowed data register to clear/set bits safely */ @@ -386,19 +387,17 @@ struct cpm1_gpio16_chip { int irq[16]; }; -static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) +static void cpm1_gpio16_save_regs(struct cpm1_gpio16_chip *cpm1_gc) { - struct cpm1_gpio16_chip *cpm1_gc = - container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; cpm1_gc->cpdata = in_be16(&iop->dat); } static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; u16 pin_mask; pin_mask = 1 << (15 - gpio); @@ -406,11 +405,9 @@ static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) return !!(in_be16(&iop->dat) & pin_mask); } -static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, - int value) +static void __cpm1_gpio16_set(struct cpm1_gpio16_chip *cpm1_gc, u16 pin_mask, int value) { - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; if (value) cpm1_gc->cpdata |= pin_mask; @@ -420,40 +417,39 @@ static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, out_be16(&iop->dat, cpm1_gc->cpdata); } -static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; u16 pin_mask = 1 << (15 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); - __cpm1_gpio16_set(mm_gc, pin_mask, value); + __cpm1_gpio16_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; } static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); return cpm1_gc->irq[gpio] ? : -ENXIO; } static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; unsigned long flags; u16 pin_mask = 1 << (15 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); setbits16(&iop->dir, pin_mask); - __cpm1_gpio16_set(mm_gc, pin_mask, val); + __cpm1_gpio16_set(cpm1_gc, pin_mask, val); spin_unlock_irqrestore(&cpm1_gc->lock, flags); @@ -462,9 +458,8 @@ static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; unsigned long flags; u16 pin_mask = 1 << (15 - gpio); @@ -481,11 +476,10 @@ int cpm1_gpiochip_add16(struct device *dev) { struct device_node *np = dev->of_node; struct cpm1_gpio16_chip *cpm1_gc; - struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; u16 mask; - cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + cpm1_gc = devm_kzalloc(dev, sizeof(*cpm1_gc), GFP_KERNEL); if (!cpm1_gc) return -ENOMEM; @@ -499,43 +493,50 @@ int cpm1_gpiochip_add16(struct device *dev) cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++); } - mm_gc = &cpm1_gc->mm_gc; - gc = &mm_gc->gc; - - mm_gc->save_regs = cpm1_gpio16_save_regs; + gc = &cpm1_gc->gc; + gc->base = -1; gc->ngpio = 16; gc->direction_input = cpm1_gpio16_dir_in; gc->direction_output = cpm1_gpio16_dir_out; gc->get = cpm1_gpio16_get; - gc->set = cpm1_gpio16_set; + gc->set_rv = cpm1_gpio16_set; gc->to_irq = cpm1_gpio16_to_irq; gc->parent = dev; gc->owner = THIS_MODULE; - return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + + cpm1_gc->regs = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(cpm1_gc->regs)) + return PTR_ERR(cpm1_gc->regs); + + cpm1_gpio16_save_regs(cpm1_gc); + + return devm_gpiochip_add_data(dev, gc, cpm1_gc); } struct cpm1_gpio32_chip { - struct of_mm_gpio_chip mm_gc; + struct gpio_chip gc; + void __iomem *regs; spinlock_t lock; /* shadowed data register to clear/set bits safely */ u32 cpdata; }; -static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) +static void cpm1_gpio32_save_regs(struct cpm1_gpio32_chip *cpm1_gc) { - struct cpm1_gpio32_chip *cpm1_gc = - container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; cpm1_gc->cpdata = in_be32(&iop->dat); } static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; u32 pin_mask; pin_mask = 1 << (31 - gpio); @@ -543,11 +544,9 @@ static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) return !!(in_be32(&iop->dat) & pin_mask); } -static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, - int value) +static void __cpm1_gpio32_set(struct cpm1_gpio32_chip *cpm1_gc, u32 pin_mask, int value) { - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; if (value) cpm1_gc->cpdata |= pin_mask; @@ -557,32 +556,32 @@ static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, out_be32(&iop->dat, cpm1_gc->cpdata); } -static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); - __cpm1_gpio32_set(mm_gc, pin_mask, value); + __cpm1_gpio32_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; } static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); setbits32(&iop->dir, pin_mask); - __cpm1_gpio32_set(mm_gc, pin_mask, val); + __cpm1_gpio32_set(cpm1_gc, pin_mask, val); spin_unlock_irqrestore(&cpm1_gc->lock, flags); @@ -591,9 +590,8 @@ static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); @@ -610,28 +608,35 @@ int cpm1_gpiochip_add32(struct device *dev) { struct device_node *np = dev->of_node; struct cpm1_gpio32_chip *cpm1_gc; - struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; - cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + cpm1_gc = devm_kzalloc(dev, sizeof(*cpm1_gc), GFP_KERNEL); if (!cpm1_gc) return -ENOMEM; spin_lock_init(&cpm1_gc->lock); - mm_gc = &cpm1_gc->mm_gc; - gc = &mm_gc->gc; - - mm_gc->save_regs = cpm1_gpio32_save_regs; + gc = &cpm1_gc->gc; + gc->base = -1; gc->ngpio = 32; gc->direction_input = cpm1_gpio32_dir_in; gc->direction_output = cpm1_gpio32_dir_out; gc->get = cpm1_gpio32_get; - gc->set = cpm1_gpio32_set; + gc->set_rv = cpm1_gpio32_set; gc->parent = dev; gc->owner = THIS_MODULE; - return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + + cpm1_gc->regs = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(cpm1_gc->regs)) + return PTR_ERR(cpm1_gc->regs); + + cpm1_gpio32_save_regs(cpm1_gc); + + return devm_gpiochip_add_data(dev, gc, cpm1_gc); } #endif /* CONFIG_8xx_GPIO */ diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c index ea6b0e523c60..933d6ab7f512 100644 --- a/arch/powerpc/platforms/8xx/pic.c +++ b/arch/powerpc/platforms/8xx/pic.c @@ -80,7 +80,7 @@ unsigned int mpc8xx_get_irq(void) if (irq == PIC_VEC_SPURRIOUS) return 0; - return irq_linear_revmap(mpc8xx_pic_host, irq); + return irq_find_mapping(mpc8xx_pic_host, irq); } @@ -146,7 +146,8 @@ void __init mpc8xx_pic_init(void) if (!siu_reg) goto out; - mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL); + mpc8xx_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 64, + &mpc8xx_pic_host_ops, NULL); if (!mpc8xx_pic_host) printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 1fd253f92a77..fea3766eac0f 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -7,7 +7,6 @@ source "arch/powerpc/platforms/chrp/Kconfig" source "arch/powerpc/platforms/512x/Kconfig" source "arch/powerpc/platforms/52xx/Kconfig" source "arch/powerpc/platforms/powermac/Kconfig" -source "arch/powerpc/platforms/maple/Kconfig" source "arch/powerpc/platforms/pasemi/Kconfig" source "arch/powerpc/platforms/ps3/Kconfig" source "arch/powerpc/platforms/cell/Kconfig" @@ -18,7 +17,6 @@ source "arch/powerpc/platforms/85xx/Kconfig" source "arch/powerpc/platforms/86xx/Kconfig" source "arch/powerpc/platforms/embedded6xx/Kconfig" source "arch/powerpc/platforms/44x/Kconfig" -source "arch/powerpc/platforms/40x/Kconfig" source "arch/powerpc/platforms/amigaone/Kconfig" source "arch/powerpc/platforms/book3s/Kconfig" source "arch/powerpc/platforms/microwatt/Kconfig" @@ -72,10 +70,6 @@ config PPC_DT_CPU_FTRS firmware provides this binding. If you're not sure say Y. -config UDBG_RTAS_CONSOLE - bool "RTAS based debug console" - depends on PPC_RTAS - config PPC_SMP_MUXED_IPI bool help @@ -188,12 +182,6 @@ config PPC_INDIRECT_PIO bool select GENERIC_IOMAP -config PPC_INDIRECT_MMIO - bool - -config PPC_IO_WORKAROUNDS - bool - source "drivers/cpufreq/Kconfig" menu "CPUIdle driver" diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index b2d8c0da2ad9..613b383ed8b3 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -43,19 +43,10 @@ config PPC_8xx select HAVE_ARCH_VMAP_STACK select HUGETLBFS -config 40x - bool "AMCC 40x" - select PPC_DCR_NATIVE - select PPC_UDBG_16550 - select 4xx_SOC - select HAVE_PCI - select PPC_KUEP if PPC_KUAP - config 44x bool "AMCC 44x, 46x or 47x" select PPC_DCR_NATIVE select PPC_UDBG_16550 - select 4xx_SOC select HAVE_PCI select PHYS_64BIT select PPC_KUEP @@ -93,11 +84,8 @@ config PPC_BOOK3S_64 bool "Server processors" select PPC_FPU select PPC_HAVE_PMU_SUPPORT - select HAVE_ARCH_TRANSPARENT_HUGEPAGE - select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_SPLIT_PMD_PTLOCK - select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_NUMA_BALANCING select HAVE_MOVE_PMD @@ -117,6 +105,14 @@ config PPC_BOOK3E_64 endchoice +config PPC_THP + def_bool y + depends on PPC_BOOK3S_64 + depends on PPC_RADIX_MMU || (PPC_64S_HASH_MMU && PAGE_SIZE_64KB) + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + choice prompt "CPU selection" help @@ -194,11 +190,6 @@ config E6500_CPU depends on !CC_IS_CLANG select PPC_HAS_LBARX_LHARX -config 405_CPU - bool "40x family" - depends on 40x - depends on !CC_IS_CLANG - config 440_CPU bool "440 (44x family)" depends on 44x @@ -264,7 +255,6 @@ config TARGET_CPU default "e6500" if E6500_CPU default "power4" if POWERPC64_CPU && !CPU_LITTLE_ENDIAN default "power8" if POWERPC64_CPU && CPU_LITTLE_ENDIAN - default "405" if 405_CPU default "440" if 440_CPU default "464" if 464_CPU default "476" if 476_CPU @@ -340,7 +330,7 @@ config FSL_EMB_PERF_EVENT_E500 config 4xx bool - depends on 40x || 44x + depends on 44x default y config BOOKE @@ -348,11 +338,6 @@ config BOOKE depends on PPC_E500 || 44x default y -config BOOKE_OR_40x - bool - depends on BOOKE || 40x - default y - config PTE_64BIT bool depends on 44x || PPC_E500 || PPC_86xx @@ -464,6 +449,19 @@ config PPC_RADIX_MMU_DEFAULT If you're unsure, say Y. +config PPC_RADIX_BROADCAST_TLBIE + bool + depends on PPC_RADIX_MMU + help + Power ISA v3.0 and later implementations in the Linux Compliancy Subset + and lower are not required to implement broadcast TLBIE instructions. + Platforms with CPUs that do implement TLBIE broadcast, that is, where + a TLB invalidation instruction performed on one CPU operates on the + TLBs of all CPUs in the system, should select this option. If this + option is selected, the disable_tlbie kernel command line option can + be used to cause global TLB invalidations to be done via IPIs; without + it, IPIs will be used unconditionally. + config PPC_KERNEL_PREFIXED depends on PPC_HAVE_PREFIXED_SUPPORT depends on CC_HAS_PREFIXED @@ -495,8 +493,8 @@ config PPC_KERNEL_PCREL This option builds the kernel with the pc relative ABI model. config PPC_KUEP - bool "Kernel Userspace Execution Prevention" if !40x - default y if !40x + bool "Kernel Userspace Execution Prevention" + default y help Enable support for Kernel Userspace Execution Prevention (KUEP) @@ -582,7 +580,7 @@ config NR_CPUS config NOT_COHERENT_CACHE bool - depends on 4xx || PPC_8xx || PPC_MPC512x || \ + depends on 44x || PPC_8xx || PPC_MPC512x || \ GAMECUBE_COMMON || AMIGAONE select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index 94470fb27c99..3cee4a842736 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile @@ -4,8 +4,6 @@ obj-$(CONFIG_FSL_ULI1575) += fsl_uli1575.o obj-$(CONFIG_PPC_PMAC) += powermac/ obj-$(CONFIG_PPC_CHRP) += chrp/ -obj-$(CONFIG_4xx) += 4xx/ -obj-$(CONFIG_40x) += 40x/ obj-$(CONFIG_44x) += 44x/ obj-$(CONFIG_PPC_MPC512x) += 512x/ obj-$(CONFIG_PPC_MPC52xx) += 52xx/ @@ -16,7 +14,6 @@ obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/ obj-$(CONFIG_PPC_86xx) += 86xx/ obj-$(CONFIG_PPC_POWERNV) += powernv/ obj-$(CONFIG_PPC_PSERIES) += pseries/ -obj-$(CONFIG_PPC_MAPLE) += maple/ obj-$(CONFIG_PPC_PASEMI) += pasemi/ obj-$(CONFIG_PPC_CELL) += cell/ obj-$(CONFIG_PPC_PS3) += ps3/ diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c index 2c8dc0886912..33f852a7625f 100644 --- a/arch/powerpc/platforms/amigaone/setup.c +++ b/arch/powerpc/platforms/amigaone/setup.c @@ -109,7 +109,7 @@ static void __init amigaone_init_IRQ(void) i8259_init(pic, int_ack); ppc_md.get_irq = i8259_irq; - irq_set_default_host(i8259_get_host()); + irq_set_default_domain(i8259_get_host()); } static int __init request_isa_regions(void) diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index f381b177ea06..dc6f75d3ac6e 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -464,7 +464,43 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +/* + * During mmap() paste address, mapping VMA is saved in VAS window + * struct which is used to unmap during migration if the window is + * still open. But the user space can remove this mapping with + * munmap() before closing the window and the VMA address will + * be invalid. Set VAS window VMA to NULL in this function which + * is called before VMA free. + */ +static void vas_mmap_close(struct vm_area_struct *vma) +{ + struct file *fp = vma->vm_file; + struct coproc_instance *cp_inst = fp->private_data; + struct vas_window *txwin; + + /* Should not happen */ + if (!cp_inst || !cp_inst->txwin) { + pr_err("No attached VAS window for the paste address mmap\n"); + return; + } + + txwin = cp_inst->txwin; + /* + * task_ref.vma is set in coproc_mmap() during mmap paste + * address. So it has to be the same VMA that is getting freed. + */ + if (WARN_ON(txwin->task_ref.vma != vma)) { + pr_err("Invalid paste address mmaping\n"); + return; + } + + mutex_lock(&txwin->task_ref.mmap_mutex); + txwin->task_ref.vma = NULL; + mutex_unlock(&txwin->task_ref.mmap_mutex); +} + static const struct vm_operations_struct vas_vm_ops = { + .close = vas_mmap_close, .fault = vas_mmap_fault, }; @@ -485,6 +521,15 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) return -EINVAL; } + /* + * Map complete page to the paste address. So the user + * space should pass 0ULL to the offset parameter. + */ + if (vma->vm_pgoff) { + pr_debug("Page offset unsupported to map paste address\n"); + return -EINVAL; + } + /* Ensure instance has an open send window */ if (!txwin) { pr_err("No send window open?\n"); diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 34669b060f36..db65bfcd1e74 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -3,42 +3,6 @@ config PPC_CELL select PPC_64S_HASH_MMU if PPC64 bool -config PPC_CELL_COMMON - bool - select PPC_CELL - select PPC_DCR_MMIO - select PPC_INDIRECT_PIO - select PPC_INDIRECT_MMIO - select PPC_HASH_MMU_NATIVE - select PPC_RTAS - select IRQ_EDGE_EOI_HANDLER - -config PPC_CELL_NATIVE - bool - select PPC_CELL_COMMON - select MPIC - select PPC_IO_WORKAROUNDS - select IBM_EMAC_EMAC4 if IBM_EMAC - select IBM_EMAC_RGMII if IBM_EMAC - select IBM_EMAC_ZMII if IBM_EMAC #test only - select IBM_EMAC_TAH if IBM_EMAC #test only - -config PPC_IBM_CELL_BLADE - bool "IBM Cell Blade" - depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN - select PPC_CELL_NATIVE - select PPC_OF_PLATFORM_PCI - select FORCE_PCI - select MMIO_NVRAM - select PPC_UDBG_16550 - select UDBG_RTAS_CONSOLE - -config AXON_MSI - bool - depends on PPC_IBM_CELL_BLADE && PCI_MSI - select IRQ_DOMAIN_NOMAP - default y - menu "Cell Broadband Engine options" depends on PPC_CELL @@ -57,48 +21,4 @@ config SPU_BASE bool select PPC_COPRO_BASE -config CBE_RAS - bool "RAS features for bare metal Cell BE" - depends on PPC_CELL_NATIVE - default y - -config PPC_IBM_CELL_RESETBUTTON - bool "IBM Cell Blade Pinhole reset button" - depends on CBE_RAS && PPC_IBM_CELL_BLADE - default y - help - Support Pinhole Resetbutton on IBM Cell blades. - This adds a method to trigger system reset via front panel pinhole button. - -config PPC_IBM_CELL_POWERBUTTON - tristate "IBM Cell Blade power button" - depends on PPC_IBM_CELL_BLADE && INPUT_EVDEV - default y - help - Support Powerbutton on IBM Cell blades. - This will enable the powerbutton as an input device. - -config CBE_THERM - tristate "CBE thermal support" - default m - depends on CBE_RAS && SPU_BASE - -config PPC_PMI - tristate - default y - depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON - help - PMI (Platform Management Interrupt) is a way to - communicate with the BMC (Baseboard Management Controller). - It is used in some IBM Cell blades. - -config CBE_CPUFREQ_SPU_GOVERNOR - tristate "CBE frequency scaling based on SPU usage" - depends on SPU_FS && CPU_FREQ - default m - help - This governor checks for spu usage to adjust the cpu frequency. - If no spu is running on a given cpu, that cpu will be throttled to - the minimal possible frequency. - endmenu diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index 7ea6692f67e2..7e5ff239c376 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile @@ -1,27 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o - -obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ - pmu.o spider-pci.o -obj-$(CONFIG_CBE_RAS) += ras.o - -obj-$(CONFIG_CBE_THERM) += cbe_thermal.o -obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o - -obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o - -ifdef CONFIG_SMP -obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o -endif - -# needed only when building loadable spufs.ko -spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o -spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o - obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ spu_syscalls.o \ - $(spu-priv1-y) \ - $(spu-manage-y) \ spufs/ - -obj-$(CONFIG_AXON_MSI) += axon_msi.o diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c deleted file mode 100644 index 28dc86744cac..000000000000 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ /dev/null @@ -1,481 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2007, Michael Ellerman, IBM Corporation. - */ - - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/msi.h> -#include <linux/export.h> -#include <linux/slab.h> -#include <linux/debugfs.h> -#include <linux/of.h> -#include <linux/of_irq.h> -#include <linux/platform_device.h> - -#include <asm/dcr.h> -#include <asm/machdep.h> - -#include "cell.h" - -/* - * MSIC registers, specified as offsets from dcr_base - */ -#define MSIC_CTRL_REG 0x0 - -/* Base Address registers specify FIFO location in BE memory */ -#define MSIC_BASE_ADDR_HI_REG 0x3 -#define MSIC_BASE_ADDR_LO_REG 0x4 - -/* Hold the read/write offsets into the FIFO */ -#define MSIC_READ_OFFSET_REG 0x5 -#define MSIC_WRITE_OFFSET_REG 0x6 - - -/* MSIC control register flags */ -#define MSIC_CTRL_ENABLE 0x0001 -#define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002 -#define MSIC_CTRL_IRQ_ENABLE 0x0008 -#define MSIC_CTRL_FULL_STOP_ENABLE 0x0010 - -/* - * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB. - * Currently we're using a 64KB FIFO size. - */ -#define MSIC_FIFO_SIZE_SHIFT 16 -#define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT) - -/* - * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits - * 8-9 of the MSIC control reg. - */ -#define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300) - -/* - * We need to mask the read/write offsets to make sure they stay within - * the bounds of the FIFO. Also they should always be 16-byte aligned. - */ -#define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu) - -/* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */ -#define MSIC_FIFO_ENTRY_SIZE 0x10 - - -struct axon_msic { - struct irq_domain *irq_domain; - __le32 *fifo_virt; - dma_addr_t fifo_phys; - dcr_host_t dcr_host; - u32 read_offset; -#ifdef DEBUG - u32 __iomem *trigger; -#endif -}; - -#ifdef DEBUG -void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic); -#else -static inline void axon_msi_debug_setup(struct device_node *dn, - struct axon_msic *msic) { } -#endif - - -static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) -{ - pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); - - dcr_write(msic->dcr_host, dcr_n, val); -} - -static void axon_msi_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct axon_msic *msic = irq_desc_get_handler_data(desc); - u32 write_offset, msi; - int idx; - int retry = 0; - - write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); - pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); - - /* write_offset doesn't wrap properly, so we have to mask it */ - write_offset &= MSIC_FIFO_SIZE_MASK; - - while (msic->read_offset != write_offset && retry < 100) { - idx = msic->read_offset / sizeof(__le32); - msi = le32_to_cpu(msic->fifo_virt[idx]); - msi &= 0xFFFF; - - pr_devel("axon_msi: woff %x roff %x msi %x\n", - write_offset, msic->read_offset, msi); - - if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { - generic_handle_irq(msi); - msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); - } else { - /* - * Reading the MSIC_WRITE_OFFSET_REG does not - * reliably flush the outstanding DMA to the - * FIFO buffer. Here we were reading stale - * data, so we need to retry. - */ - udelay(1); - retry++; - pr_devel("axon_msi: invalid irq 0x%x!\n", msi); - continue; - } - - if (retry) { - pr_devel("axon_msi: late irq 0x%x, retry %d\n", - msi, retry); - retry = 0; - } - - msic->read_offset += MSIC_FIFO_ENTRY_SIZE; - msic->read_offset &= MSIC_FIFO_SIZE_MASK; - } - - if (retry) { - printk(KERN_WARNING "axon_msi: irq timed out\n"); - - msic->read_offset += MSIC_FIFO_ENTRY_SIZE; - msic->read_offset &= MSIC_FIFO_SIZE_MASK; - } - - chip->irq_eoi(&desc->irq_data); -} - -static struct axon_msic *find_msi_translator(struct pci_dev *dev) -{ - struct irq_domain *irq_domain; - struct device_node *dn, *tmp; - const phandle *ph; - struct axon_msic *msic = NULL; - - dn = of_node_get(pci_device_to_OF_node(dev)); - if (!dn) { - dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); - return NULL; - } - - for (; dn; dn = of_get_next_parent(dn)) { - ph = of_get_property(dn, "msi-translator", NULL); - if (ph) - break; - } - - if (!ph) { - dev_dbg(&dev->dev, - "axon_msi: no msi-translator property found\n"); - goto out_error; - } - - tmp = dn; - dn = of_find_node_by_phandle(*ph); - of_node_put(tmp); - if (!dn) { - dev_dbg(&dev->dev, - "axon_msi: msi-translator doesn't point to a node\n"); - goto out_error; - } - - irq_domain = irq_find_host(dn); - if (!irq_domain) { - dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n", - dn); - goto out_error; - } - - msic = irq_domain->host_data; - -out_error: - of_node_put(dn); - - return msic; -} - -static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) -{ - struct device_node *dn; - int len; - const u32 *prop; - - dn = of_node_get(pci_device_to_OF_node(dev)); - if (!dn) { - dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); - return -ENODEV; - } - - for (; dn; dn = of_get_next_parent(dn)) { - if (!dev->no_64bit_msi) { - prop = of_get_property(dn, "msi-address-64", &len); - if (prop) - break; - } - - prop = of_get_property(dn, "msi-address-32", &len); - if (prop) - break; - } - - if (!prop) { - dev_dbg(&dev->dev, - "axon_msi: no msi-address-(32|64) properties found\n"); - of_node_put(dn); - return -ENOENT; - } - - switch (len) { - case 8: - msg->address_hi = prop[0]; - msg->address_lo = prop[1]; - break; - case 4: - msg->address_hi = 0; - msg->address_lo = prop[0]; - break; - default: - dev_dbg(&dev->dev, - "axon_msi: malformed msi-address-(32|64) property\n"); - of_node_put(dn); - return -EINVAL; - } - - of_node_put(dn); - - return 0; -} - -static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - unsigned int virq, rc; - struct msi_desc *entry; - struct msi_msg msg; - struct axon_msic *msic; - - msic = find_msi_translator(dev); - if (!msic) - return -ENODEV; - - rc = setup_msi_msg_address(dev, &msg); - if (rc) - return rc; - - msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) { - virq = irq_create_direct_mapping(msic->irq_domain); - if (!virq) { - dev_warn(&dev->dev, - "axon_msi: virq allocation failed!\n"); - return -1; - } - dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); - - irq_set_msi_desc(virq, entry); - msg.data = virq; - pci_write_msi_msg(virq, &msg); - } - - return 0; -} - -static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) -{ - struct msi_desc *entry; - - dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); - - msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) { - irq_set_msi_desc(entry->irq, NULL); - irq_dispose_mapping(entry->irq); - entry->irq = 0; - } -} - -static struct irq_chip msic_irq_chip = { - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, - .irq_shutdown = pci_msi_mask_irq, - .name = "AXON-MSI", -}; - -static int msic_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - irq_set_chip_data(virq, h->host_data); - irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); - - return 0; -} - -static const struct irq_domain_ops msic_host_ops = { - .map = msic_host_map, -}; - -static void axon_msi_shutdown(struct platform_device *device) -{ - struct axon_msic *msic = dev_get_drvdata(&device->dev); - u32 tmp; - - pr_devel("axon_msi: disabling %pOF\n", - irq_domain_get_of_node(msic->irq_domain)); - tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); - tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; - msic_dcr_write(msic, MSIC_CTRL_REG, tmp); -} - -static int axon_msi_probe(struct platform_device *device) -{ - struct device_node *dn = device->dev.of_node; - struct axon_msic *msic; - unsigned int virq; - int dcr_base, dcr_len; - - pr_devel("axon_msi: setting up dn %pOF\n", dn); - - msic = kzalloc(sizeof(*msic), GFP_KERNEL); - if (!msic) { - printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n", - dn); - goto out; - } - - dcr_base = dcr_resource_start(dn, 0); - dcr_len = dcr_resource_len(dn, 0); - - if (dcr_base == 0 || dcr_len == 0) { - printk(KERN_ERR - "axon_msi: couldn't parse dcr properties on %pOF\n", - dn); - goto out_free_msic; - } - - msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); - if (!DCR_MAP_OK(msic->dcr_host)) { - printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n", - dn); - goto out_free_msic; - } - - msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, - &msic->fifo_phys, GFP_KERNEL); - if (!msic->fifo_virt) { - printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n", - dn); - goto out_free_msic; - } - - virq = irq_of_parse_and_map(dn, 0); - if (!virq) { - printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n", - dn); - goto out_free_fifo; - } - memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - - /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ - msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); - if (!msic->irq_domain) { - printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n", - dn); - goto out_free_fifo; - } - - irq_set_handler_data(virq, msic); - irq_set_chained_handler(virq, axon_msi_cascade); - pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); - - /* Enable the MSIC hardware */ - msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); - msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, - msic->fifo_phys & 0xFFFFFFFF); - msic_dcr_write(msic, MSIC_CTRL_REG, - MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | - MSIC_CTRL_FIFO_SIZE); - - msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG) - & MSIC_FIFO_SIZE_MASK; - - dev_set_drvdata(&device->dev, msic); - - cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs; - cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs; - - axon_msi_debug_setup(dn, msic); - - printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn); - - return 0; - -out_free_fifo: - dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, - msic->fifo_phys); -out_free_msic: - kfree(msic); -out: - - return -1; -} - -static const struct of_device_id axon_msi_device_id[] = { - { - .compatible = "ibm,axon-msic" - }, - {} -}; - -static struct platform_driver axon_msi_driver = { - .probe = axon_msi_probe, - .shutdown = axon_msi_shutdown, - .driver = { - .name = "axon-msi", - .of_match_table = axon_msi_device_id, - }, -}; - -static int __init axon_msi_init(void) -{ - return platform_driver_register(&axon_msi_driver); -} -subsys_initcall(axon_msi_init); - - -#ifdef DEBUG -static int msic_set(void *data, u64 val) -{ - struct axon_msic *msic = data; - out_le32(msic->trigger, val); - return 0; -} - -static int msic_get(void *data, u64 *val) -{ - *val = 0; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n"); - -void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) -{ - char name[8]; - struct resource res; - - if (of_address_to_resource(dn, 0, &res)) { - pr_devel("axon_msi: couldn't get reg property\n"); - return; - } - - msic->trigger = ioremap(res.start, 0x4); - if (!msic->trigger) { - pr_devel("axon_msi: ioremap failed\n"); - return; - } - - snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn)); - - debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic); -} -#endif /* DEBUG */ diff --git a/arch/powerpc/platforms/cell/cbe_powerbutton.c b/arch/powerpc/platforms/cell/cbe_powerbutton.c deleted file mode 100644 index a3ee397486f6..000000000000 --- a/arch/powerpc/platforms/cell/cbe_powerbutton.c +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * driver for powerbutton on IBM cell blades - * - * (C) Copyright IBM Corp. 2005-2008 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/input.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/platform_device.h> -#include <asm/pmi.h> - -static struct input_dev *button_dev; -static struct platform_device *button_pdev; - -static void cbe_powerbutton_handle_pmi(pmi_message_t pmi_msg) -{ - BUG_ON(pmi_msg.type != PMI_TYPE_POWER_BUTTON); - - input_report_key(button_dev, KEY_POWER, 1); - input_sync(button_dev); - input_report_key(button_dev, KEY_POWER, 0); - input_sync(button_dev); -} - -static struct pmi_handler cbe_pmi_handler = { - .type = PMI_TYPE_POWER_BUTTON, - .handle_pmi_message = cbe_powerbutton_handle_pmi, -}; - -static int __init cbe_powerbutton_init(void) -{ - int ret = 0; - struct input_dev *dev; - - if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) { - printk(KERN_ERR "%s: Not a cell blade.\n", __func__); - ret = -ENODEV; - goto out; - } - - dev = input_allocate_device(); - if (!dev) { - ret = -ENOMEM; - printk(KERN_ERR "%s: Not enough memory.\n", __func__); - goto out; - } - - set_bit(EV_KEY, dev->evbit); - set_bit(KEY_POWER, dev->keybit); - - dev->name = "Power Button"; - dev->id.bustype = BUS_HOST; - - /* this makes the button look like an acpi power button - * no clue whether anyone relies on that though */ - dev->id.product = 0x02; - dev->phys = "LNXPWRBN/button/input0"; - - button_pdev = platform_device_register_simple("power_button", 0, NULL, 0); - if (IS_ERR(button_pdev)) { - ret = PTR_ERR(button_pdev); - goto out_free_input; - } - - dev->dev.parent = &button_pdev->dev; - ret = input_register_device(dev); - if (ret) { - printk(KERN_ERR "%s: Failed to register device\n", __func__); - goto out_free_pdev; - } - - button_dev = dev; - - ret = pmi_register_handler(&cbe_pmi_handler); - if (ret) { - printk(KERN_ERR "%s: Failed to register with pmi.\n", __func__); - goto out_free_pdev; - } - - goto out; - -out_free_pdev: - platform_device_unregister(button_pdev); -out_free_input: - input_free_device(dev); -out: - return ret; -} - -static void __exit cbe_powerbutton_exit(void) -{ - pmi_unregister_handler(&cbe_pmi_handler); - platform_device_unregister(button_pdev); - input_free_device(button_dev); -} - -module_init(cbe_powerbutton_init); -module_exit(cbe_powerbutton_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c deleted file mode 100644 index 99b3558753e9..000000000000 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ /dev/null @@ -1,298 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * cbe_regs.c - * - * Accessor routines for the various MMIO register blocks of the CBE - * - * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. - */ - -#include <linux/percpu.h> -#include <linux/types.h> -#include <linux/export.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/pgtable.h> - -#include <asm/io.h> -#include <asm/ptrace.h> -#include <asm/cell-regs.h> - -/* - * Current implementation uses "cpu" nodes. We build our own mapping - * array of cpu numbers to cpu nodes locally for now to allow interrupt - * time code to have a fast path rather than call of_get_cpu_node(). If - * we implement cpu hotplug, we'll have to install an appropriate notifier - * in order to release references to the cpu going away - */ -static struct cbe_regs_map -{ - struct device_node *cpu_node; - struct device_node *be_node; - struct cbe_pmd_regs __iomem *pmd_regs; - struct cbe_iic_regs __iomem *iic_regs; - struct cbe_mic_tm_regs __iomem *mic_tm_regs; - struct cbe_pmd_shadow_regs pmd_shadow_regs; -} cbe_regs_maps[MAX_CBE]; -static int cbe_regs_map_count; - -static struct cbe_thread_map -{ - struct device_node *cpu_node; - struct device_node *be_node; - struct cbe_regs_map *regs; - unsigned int thread_id; - unsigned int cbe_id; -} cbe_thread_map[NR_CPUS]; - -static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; -static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; - -static struct cbe_regs_map *cbe_find_map(struct device_node *np) -{ - int i; - struct device_node *tmp_np; - - if (!of_node_is_type(np, "spe")) { - for (i = 0; i < cbe_regs_map_count; i++) - if (cbe_regs_maps[i].cpu_node == np || - cbe_regs_maps[i].be_node == np) - return &cbe_regs_maps[i]; - return NULL; - } - - if (np->data) - return np->data; - - /* walk up path until cpu or be node was found */ - tmp_np = np; - do { - tmp_np = tmp_np->parent; - /* on a correct devicetree we wont get up to root */ - BUG_ON(!tmp_np); - } while (!of_node_is_type(tmp_np, "cpu") || - !of_node_is_type(tmp_np, "be")); - - np->data = cbe_find_map(tmp_np); - - return np->data; -} - -struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return map->pmd_regs; -} -EXPORT_SYMBOL_GPL(cbe_get_pmd_regs); - -struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return map->pmd_regs; -} -EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs); - -struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return &map->pmd_shadow_regs; -} - -struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return &map->pmd_shadow_regs; -} - -struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return map->iic_regs; -} - -struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return map->iic_regs; -} - -struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np) -{ - struct cbe_regs_map *map = cbe_find_map(np); - if (map == NULL) - return NULL; - return map->mic_tm_regs; -} - -struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) -{ - struct cbe_regs_map *map = cbe_thread_map[cpu].regs; - if (map == NULL) - return NULL; - return map->mic_tm_regs; -} -EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); - -u32 cbe_get_hw_thread_id(int cpu) -{ - return cbe_thread_map[cpu].thread_id; -} -EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); - -u32 cbe_cpu_to_node(int cpu) -{ - return cbe_thread_map[cpu].cbe_id; -} -EXPORT_SYMBOL_GPL(cbe_cpu_to_node); - -u32 cbe_node_to_cpu(int node) -{ - return cpumask_first(&cbe_local_mask[node]); - -} -EXPORT_SYMBOL_GPL(cbe_node_to_cpu); - -static struct device_node *__init cbe_get_be_node(int cpu_id) -{ - struct device_node *np; - - for_each_node_by_type (np, "be") { - int len,i; - const phandle *cpu_handle; - - cpu_handle = of_get_property(np, "cpus", &len); - - /* - * the CAB SLOF tree is non compliant, so we just assume - * there is only one node - */ - if (WARN_ON_ONCE(!cpu_handle)) - return np; - - for (i = 0; i < len; i++) { - struct device_node *ch_np = of_find_node_by_phandle(cpu_handle[i]); - struct device_node *ci_np = of_get_cpu_node(cpu_id, NULL); - - of_node_put(ch_np); - of_node_put(ci_np); - - if (ch_np == ci_np) - return np; - } - } - - return NULL; -} - -static void __init cbe_fill_regs_map(struct cbe_regs_map *map) -{ - if(map->be_node) { - struct device_node *be, *np, *parent_np; - - be = map->be_node; - - for_each_node_by_type(np, "pervasive") { - parent_np = of_get_parent(np); - if (parent_np == be) - map->pmd_regs = of_iomap(np, 0); - of_node_put(parent_np); - } - - for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") { - parent_np = of_get_parent(np); - if (parent_np == be) - map->iic_regs = of_iomap(np, 2); - of_node_put(parent_np); - } - - for_each_node_by_type(np, "mic-tm") { - parent_np = of_get_parent(np); - if (parent_np == be) - map->mic_tm_regs = of_iomap(np, 0); - of_node_put(parent_np); - } - } else { - struct device_node *cpu; - /* That hack must die die die ! */ - const struct address_prop { - unsigned long address; - unsigned int len; - } __attribute__((packed)) *prop; - - cpu = map->cpu_node; - - prop = of_get_property(cpu, "pervasive", NULL); - if (prop != NULL) - map->pmd_regs = ioremap(prop->address, prop->len); - - prop = of_get_property(cpu, "iic", NULL); - if (prop != NULL) - map->iic_regs = ioremap(prop->address, prop->len); - - prop = of_get_property(cpu, "mic-tm", NULL); - if (prop != NULL) - map->mic_tm_regs = ioremap(prop->address, prop->len); - } -} - - -void __init cbe_regs_init(void) -{ - int i; - unsigned int thread_id; - struct device_node *cpu; - - /* Build local fast map of CPUs */ - for_each_possible_cpu(i) { - cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); - cbe_thread_map[i].be_node = cbe_get_be_node(i); - cbe_thread_map[i].thread_id = thread_id; - } - - /* Find maps for each device tree CPU */ - for_each_node_by_type(cpu, "cpu") { - struct cbe_regs_map *map; - unsigned int cbe_id; - - cbe_id = cbe_regs_map_count++; - map = &cbe_regs_maps[cbe_id]; - - if (cbe_regs_map_count > MAX_CBE) { - printk(KERN_ERR "cbe_regs: More BE chips than supported" - "!\n"); - cbe_regs_map_count--; - of_node_put(cpu); - return; - } - of_node_put(map->cpu_node); - map->cpu_node = of_node_get(cpu); - - for_each_possible_cpu(i) { - struct cbe_thread_map *thread = &cbe_thread_map[i]; - - if (thread->cpu_node == cpu) { - thread->regs = map; - thread->cbe_id = cbe_id; - map->be_node = thread->be_node; - cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); - if(thread->thread_id == 0) - cpumask_set_cpu(i, &cbe_first_online_cpu); - } - } - - cbe_fill_regs_map(map); - } -} - diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c deleted file mode 100644 index 2f45428e32c8..000000000000 --- a/arch/powerpc/platforms/cell/cbe_thermal.c +++ /dev/null @@ -1,386 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * thermal support for the cell processor - * - * This module adds some sysfs attributes to cpu and spu nodes. - * Base for measurements are the digital thermal sensors (DTS) - * located on the chip. - * The accuracy is 2 degrees, starting from 65 up to 125 degrees celsius - * The attributes can be found under - * /sys/devices/system/cpu/cpuX/thermal - * /sys/devices/system/spu/spuX/thermal - * - * The following attributes are added for each node: - * temperature: - * contains the current temperature measured by the DTS - * throttle_begin: - * throttling begins when temperature is greater or equal to - * throttle_begin. Setting this value to 125 prevents throttling. - * throttle_end: - * throttling is being ceased, if the temperature is lower than - * throttle_end. Due to a delay between applying throttling and - * a reduced temperature this value should be less than throttle_begin. - * A value equal to throttle_begin provides only a very little hysteresis. - * throttle_full_stop: - * If the temperatrue is greater or equal to throttle_full_stop, - * full throttling is applied to the cpu or spu. This value should be - * greater than throttle_begin and throttle_end. Setting this value to - * 65 prevents the unit from running code at all. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/module.h> -#include <linux/device.h> -#include <linux/kernel.h> -#include <linux/cpu.h> -#include <linux/stringify.h> -#include <asm/spu.h> -#include <asm/io.h> -#include <asm/cell-regs.h> - -#include "spu_priv1_mmio.h" - -#define TEMP_MIN 65 -#define TEMP_MAX 125 - -#define DEVICE_PREFIX_ATTR(_prefix,_name,_mode) \ -struct device_attribute attr_ ## _prefix ## _ ## _name = { \ - .attr = { .name = __stringify(_name), .mode = _mode }, \ - .show = _prefix ## _show_ ## _name, \ - .store = _prefix ## _store_ ## _name, \ -}; - -static inline u8 reg_to_temp(u8 reg_value) -{ - return ((reg_value & 0x3f) << 1) + TEMP_MIN; -} - -static inline u8 temp_to_reg(u8 temp) -{ - return ((temp - TEMP_MIN) >> 1) & 0x3f; -} - -static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev) -{ - struct spu *spu; - - spu = container_of(dev, struct spu, dev); - - return cbe_get_pmd_regs(spu_devnode(spu)); -} - -/* returns the value for a given spu in a given register */ -static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg) -{ - union spe_reg value; - struct spu *spu; - - spu = container_of(dev, struct spu, dev); - value.val = in_be64(®->val); - - return value.spe[spu->spe_id]; -} - -static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr, - char *buf) -{ - u8 value; - struct cbe_pmd_regs __iomem *pmd_regs; - - pmd_regs = get_pmd_regs(dev); - - value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1); - - return sprintf(buf, "%d\n", reg_to_temp(value)); -} - -static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, int pos) -{ - u64 value; - - value = in_be64(&pmd_regs->tm_tpr.val); - /* access the corresponding byte */ - value >>= pos; - value &= 0x3F; - - return sprintf(buf, "%d\n", reg_to_temp(value)); -} - -static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos) -{ - u64 reg_value; - unsigned int temp; - u64 new_value; - int ret; - - ret = sscanf(buf, "%u", &temp); - - if (ret != 1 || temp < TEMP_MIN || temp > TEMP_MAX) - return -EINVAL; - - new_value = temp_to_reg(temp); - - reg_value = in_be64(&pmd_regs->tm_tpr.val); - - /* zero out bits for new value */ - reg_value &= ~(0xffull << pos); - /* set bits to new value */ - reg_value |= new_value << pos; - - out_be64(&pmd_regs->tm_tpr.val, reg_value); - return size; -} - -static ssize_t spu_show_throttle_end(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(get_pmd_regs(dev), buf, 0); -} - -static ssize_t spu_show_throttle_begin(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(get_pmd_regs(dev), buf, 8); -} - -static ssize_t spu_show_throttle_full_stop(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(get_pmd_regs(dev), buf, 16); -} - -static ssize_t spu_store_throttle_end(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(get_pmd_regs(dev), buf, size, 0); -} - -static ssize_t spu_store_throttle_begin(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(get_pmd_regs(dev), buf, size, 8); -} - -static ssize_t spu_store_throttle_full_stop(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(get_pmd_regs(dev), buf, size, 16); -} - -static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos) -{ - struct cbe_pmd_regs __iomem *pmd_regs; - u64 value; - - pmd_regs = cbe_get_cpu_pmd_regs(dev->id); - value = in_be64(&pmd_regs->ts_ctsr2); - - value = (value >> pos) & 0x3f; - - return sprintf(buf, "%d\n", reg_to_temp(value)); -} - - -/* shows the temperature of the DTS on the PPE, - * located near the linear thermal sensor */ -static ssize_t ppe_show_temp0(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return ppe_show_temp(dev, buf, 32); -} - -/* shows the temperature of the second DTS on the PPE */ -static ssize_t ppe_show_temp1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return ppe_show_temp(dev, buf, 0); -} - -static ssize_t ppe_show_throttle_end(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32); -} - -static ssize_t ppe_show_throttle_begin(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40); -} - -static ssize_t ppe_show_throttle_full_stop(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48); -} - -static ssize_t ppe_store_throttle_end(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32); -} - -static ssize_t ppe_store_throttle_begin(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40); -} - -static ssize_t ppe_store_throttle_full_stop(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48); -} - - -static struct device_attribute attr_spu_temperature = { - .attr = {.name = "temperature", .mode = 0400 }, - .show = spu_show_temp, -}; - -static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600); -static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600); -static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600); - - -static struct attribute *spu_attributes[] = { - &attr_spu_temperature.attr, - &attr_spu_throttle_end.attr, - &attr_spu_throttle_begin.attr, - &attr_spu_throttle_full_stop.attr, - NULL, -}; - -static const struct attribute_group spu_attribute_group = { - .name = "thermal", - .attrs = spu_attributes, -}; - -static struct device_attribute attr_ppe_temperature0 = { - .attr = {.name = "temperature0", .mode = 0400 }, - .show = ppe_show_temp0, -}; - -static struct device_attribute attr_ppe_temperature1 = { - .attr = {.name = "temperature1", .mode = 0400 }, - .show = ppe_show_temp1, -}; - -static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600); -static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600); -static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600); - -static struct attribute *ppe_attributes[] = { - &attr_ppe_temperature0.attr, - &attr_ppe_temperature1.attr, - &attr_ppe_throttle_end.attr, - &attr_ppe_throttle_begin.attr, - &attr_ppe_throttle_full_stop.attr, - NULL, -}; - -static struct attribute_group ppe_attribute_group = { - .name = "thermal", - .attrs = ppe_attributes, -}; - -/* - * initialize throttling with default values - */ -static int __init init_default_values(void) -{ - int cpu; - struct cbe_pmd_regs __iomem *pmd_regs; - struct device *dev; - union ppe_spe_reg tpr; - union spe_reg str1; - u64 str2; - union spe_reg cr1; - u64 cr2; - - /* TPR defaults */ - /* ppe - * 1F - no full stop - * 08 - dynamic throttling starts if over 80 degrees - * 03 - dynamic throttling ceases if below 70 degrees */ - tpr.ppe = 0x1F0803; - /* spe - * 10 - full stopped when over 96 degrees - * 08 - dynamic throttling starts if over 80 degrees - * 03 - dynamic throttling ceases if below 70 degrees - */ - tpr.spe = 0x100803; - - /* STR defaults */ - /* str1 - * 10 - stop 16 of 32 cycles - */ - str1.val = 0x1010101010101010ull; - /* str2 - * 10 - stop 16 of 32 cycles - */ - str2 = 0x10; - - /* CR defaults */ - /* cr1 - * 4 - normal operation - */ - cr1.val = 0x0404040404040404ull; - /* cr2 - * 4 - normal operation - */ - cr2 = 0x04; - - for_each_possible_cpu (cpu) { - pr_debug("processing cpu %d\n", cpu); - dev = get_cpu_device(cpu); - - if (!dev) { - pr_info("invalid dev pointer for cbe_thermal\n"); - return -EINVAL; - } - - pmd_regs = cbe_get_cpu_pmd_regs(dev->id); - - if (!pmd_regs) { - pr_info("invalid CBE regs pointer for cbe_thermal\n"); - return -EINVAL; - } - - out_be64(&pmd_regs->tm_str2, str2); - out_be64(&pmd_regs->tm_str1.val, str1.val); - out_be64(&pmd_regs->tm_tpr.val, tpr.val); - out_be64(&pmd_regs->tm_cr1.val, cr1.val); - out_be64(&pmd_regs->tm_cr2, cr2); - } - - return 0; -} - - -static int __init thermal_init(void) -{ - int rc = init_default_values(); - - if (rc == 0) { - spu_add_dev_attr_group(&spu_attribute_group); - cpu_add_dev_attr_group(&ppe_attribute_group); - } - - return rc; -} -module_init(thermal_init); - -static void __exit thermal_exit(void) -{ - spu_remove_dev_attr_group(&spu_attribute_group); - cpu_remove_dev_attr_group(&ppe_attribute_group); -} -module_exit(thermal_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); - diff --git a/arch/powerpc/platforms/cell/cell.h b/arch/powerpc/platforms/cell/cell.h deleted file mode 100644 index d5142e905ab3..000000000000 --- a/arch/powerpc/platforms/cell/cell.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Cell Platform common data structures - * - * Copyright 2015, Daniel Axtens, IBM Corporation - */ - -#ifndef CELL_H -#define CELL_H - -#include <asm/pci-bridge.h> - -extern struct pci_controller_ops cell_pci_controller_ops; - -#endif diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c deleted file mode 100644 index ca7849e113d7..000000000000 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * spu aware cpufreq governor for the cell processor - * - * © Copyright IBM Corporation 2006-2008 - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/cpufreq.h> -#include <linux/sched.h> -#include <linux/sched/loadavg.h> -#include <linux/module.h> -#include <linux/timer.h> -#include <linux/workqueue.h> -#include <linux/atomic.h> -#include <asm/machdep.h> -#include <asm/spu.h> - -#define POLL_TIME 100000 /* in µs */ -#define EXP 753 /* exp(-1) in fixed-point */ - -struct spu_gov_info_struct { - unsigned long busy_spus; /* fixed-point */ - struct cpufreq_policy *policy; - struct delayed_work work; - unsigned int poll_int; /* µs */ -}; -static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); - -static int calc_freq(struct spu_gov_info_struct *info) -{ - int cpu; - int busy_spus; - - cpu = info->policy->cpu; - busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus); - - info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1); - pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n", - cpu, busy_spus, info->busy_spus); - - return info->policy->max * info->busy_spus / FIXED_1; -} - -static void spu_gov_work(struct work_struct *work) -{ - struct spu_gov_info_struct *info; - int delay; - unsigned long target_freq; - - info = container_of(work, struct spu_gov_info_struct, work.work); - - /* after cancel_delayed_work_sync we unset info->policy */ - BUG_ON(info->policy == NULL); - - target_freq = calc_freq(info); - __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); - - delay = usecs_to_jiffies(info->poll_int); - schedule_delayed_work_on(info->policy->cpu, &info->work, delay); -} - -static void spu_gov_init_work(struct spu_gov_info_struct *info) -{ - int delay = usecs_to_jiffies(info->poll_int); - INIT_DEFERRABLE_WORK(&info->work, spu_gov_work); - schedule_delayed_work_on(info->policy->cpu, &info->work, delay); -} - -static void spu_gov_cancel_work(struct spu_gov_info_struct *info) -{ - cancel_delayed_work_sync(&info->work); -} - -static int spu_gov_start(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); - struct spu_gov_info_struct *affected_info; - int i; - - if (!cpu_online(cpu)) { - printk(KERN_ERR "cpu %d is not online\n", cpu); - return -EINVAL; - } - - if (!policy->cur) { - printk(KERN_ERR "no cpu specified in policy\n"); - return -EINVAL; - } - - /* initialize spu_gov_info for all affected cpus */ - for_each_cpu(i, policy->cpus) { - affected_info = &per_cpu(spu_gov_info, i); - affected_info->policy = policy; - } - - info->poll_int = POLL_TIME; - - /* setup timer */ - spu_gov_init_work(info); - - return 0; -} - -static void spu_gov_stop(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); - int i; - - /* cancel timer */ - spu_gov_cancel_work(info); - - /* clean spu_gov_info for all affected cpus */ - for_each_cpu (i, policy->cpus) { - info = &per_cpu(spu_gov_info, i); - info->policy = NULL; - } -} - -static struct cpufreq_governor spu_governor = { - .name = "spudemand", - .start = spu_gov_start, - .stop = spu_gov_stop, - .owner = THIS_MODULE, -}; -cpufreq_governor_init(spu_governor); -cpufreq_governor_exit(spu_governor); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c deleted file mode 100644 index 03ee8152ee97..000000000000 --- a/arch/powerpc/platforms/cell/interrupt.c +++ /dev/null @@ -1,390 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cell Internal Interrupt Controller - * - * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org) - * IBM, Corp. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Arnd Bergmann <arndb@de.ibm.com> - * - * TODO: - * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers - * vs node numbers in the setup code - * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from - * a non-active node to the active node) - */ - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> -#include <linux/export.h> -#include <linux/percpu.h> -#include <linux/types.h> -#include <linux/ioport.h> -#include <linux/kernel_stat.h> -#include <linux/pgtable.h> -#include <linux/of_address.h> - -#include <asm/io.h> -#include <asm/ptrace.h> -#include <asm/machdep.h> -#include <asm/cell-regs.h> - -#include "interrupt.h" - -struct iic { - struct cbe_iic_thread_regs __iomem *regs; - u8 target_id; - u8 eoi_stack[16]; - int eoi_ptr; - struct device_node *node; -}; - -static DEFINE_PER_CPU(struct iic, cpu_iic); -#define IIC_NODE_COUNT 2 -static struct irq_domain *iic_host; - -/* Convert between "pending" bits and hw irq number */ -static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) -{ - unsigned char unit = bits.source & 0xf; - unsigned char node = bits.source >> 4; - unsigned char class = bits.class & 3; - - /* Decode IPIs */ - if (bits.flags & CBE_IIC_IRQ_IPI) - return IIC_IRQ_TYPE_IPI | (bits.prio >> 4); - else - return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; -} - -static void iic_mask(struct irq_data *d) -{ -} - -static void iic_unmask(struct irq_data *d) -{ -} - -static void iic_eoi(struct irq_data *d) -{ - struct iic *iic = this_cpu_ptr(&cpu_iic); - out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); - BUG_ON(iic->eoi_ptr < 0); -} - -static struct irq_chip iic_chip = { - .name = "CELL-IIC", - .irq_mask = iic_mask, - .irq_unmask = iic_unmask, - .irq_eoi = iic_eoi, -}; - - -static void iic_ioexc_eoi(struct irq_data *d) -{ -} - -static void iic_ioexc_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct cbe_iic_regs __iomem *node_iic = - (void __iomem *)irq_desc_get_handler_data(desc); - unsigned int irq = irq_desc_get_irq(desc); - unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; - unsigned long bits, ack; - int cascade; - - for (;;) { - bits = in_be64(&node_iic->iic_is); - if (bits == 0) - break; - /* pre-ack edge interrupts */ - ack = bits & IIC_ISR_EDGE_MASK; - if (ack) - out_be64(&node_iic->iic_is, ack); - /* handle them */ - for (cascade = 63; cascade >= 0; cascade--) - if (bits & (0x8000000000000000UL >> cascade)) - generic_handle_domain_irq(iic_host, - base | cascade); - /* post-ack level interrupts */ - ack = bits & ~IIC_ISR_EDGE_MASK; - if (ack) - out_be64(&node_iic->iic_is, ack); - } - chip->irq_eoi(&desc->irq_data); -} - - -static struct irq_chip iic_ioexc_chip = { - .name = "CELL-IOEX", - .irq_mask = iic_mask, - .irq_unmask = iic_unmask, - .irq_eoi = iic_ioexc_eoi, -}; - -/* Get an IRQ number from the pending state register of the IIC */ -static unsigned int iic_get_irq(void) -{ - struct cbe_iic_pending_bits pending; - struct iic *iic; - unsigned int virq; - - iic = this_cpu_ptr(&cpu_iic); - *(unsigned long *) &pending = - in_be64((u64 __iomem *) &iic->regs->pending_destr); - if (!(pending.flags & CBE_IIC_IRQ_VALID)) - return 0; - virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending)); - if (!virq) - return 0; - iic->eoi_stack[++iic->eoi_ptr] = pending.prio; - BUG_ON(iic->eoi_ptr > 15); - return virq; -} - -void iic_setup_cpu(void) -{ - out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff); -} - -u8 iic_get_target_id(int cpu) -{ - return per_cpu(cpu_iic, cpu).target_id; -} - -EXPORT_SYMBOL_GPL(iic_get_target_id); - -#ifdef CONFIG_SMP - -/* Use the highest interrupt priorities for IPI */ -static inline int iic_msg_to_irq(int msg) -{ - return IIC_IRQ_TYPE_IPI + 0xf - msg; -} - -void iic_message_pass(int cpu, int msg) -{ - out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); -} - -static void iic_request_ipi(int msg) -{ - int virq; - - virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg)); - if (!virq) { - printk(KERN_ERR - "iic: failed to map IPI %s\n", smp_ipi_name[msg]); - return; - } - - /* - * If smp_request_message_ipi encounters an error it will notify - * the error. If a message is not needed it will return non-zero. - */ - if (smp_request_message_ipi(virq, msg)) - irq_dispose_mapping(virq); -} - -void iic_request_IPIs(void) -{ - iic_request_ipi(PPC_MSG_CALL_FUNCTION); - iic_request_ipi(PPC_MSG_RESCHEDULE); - iic_request_ipi(PPC_MSG_TICK_BROADCAST); - iic_request_ipi(PPC_MSG_NMI_IPI); -} - -#endif /* CONFIG_SMP */ - - -static int iic_host_match(struct irq_domain *h, struct device_node *node, - enum irq_domain_bus_token bus_token) -{ - return of_device_is_compatible(node, - "IBM,CBEA-Internal-Interrupt-Controller"); -} - -static int iic_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - switch (hw & IIC_IRQ_TYPE_MASK) { - case IIC_IRQ_TYPE_IPI: - irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); - break; - case IIC_IRQ_TYPE_IOEXC: - irq_set_chip_and_handler(virq, &iic_ioexc_chip, - handle_edge_eoi_irq); - break; - default: - irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); - } - return 0; -} - -static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_flags) - -{ - unsigned int node, ext, unit, class; - const u32 *val; - - if (!of_device_is_compatible(ct, - "IBM,CBEA-Internal-Interrupt-Controller")) - return -ENODEV; - if (intsize != 1) - return -ENODEV; - val = of_get_property(ct, "#interrupt-cells", NULL); - if (val == NULL || *val != 1) - return -ENODEV; - - node = intspec[0] >> 24; - ext = (intspec[0] >> 16) & 0xff; - class = (intspec[0] >> 8) & 0xff; - unit = intspec[0] & 0xff; - - /* Check if node is in supported range */ - if (node > 1) - return -EINVAL; - - /* Build up interrupt number, special case for IO exceptions */ - *out_hwirq = (node << IIC_IRQ_NODE_SHIFT); - if (unit == IIC_UNIT_IIC && class == 1) - *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext; - else - *out_hwirq |= IIC_IRQ_TYPE_NORMAL | - (class << IIC_IRQ_CLASS_SHIFT) | unit; - - /* Dummy flags, ignored by iic code */ - *out_flags = IRQ_TYPE_EDGE_RISING; - - return 0; -} - -static const struct irq_domain_ops iic_host_ops = { - .match = iic_host_match, - .map = iic_host_map, - .xlate = iic_host_xlate, -}; - -static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, - struct device_node *node) -{ - /* XXX FIXME: should locate the linux CPU number from the HW cpu - * number properly. We are lucky for now - */ - struct iic *iic = &per_cpu(cpu_iic, hw_cpu); - - iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); - BUG_ON(iic->regs == NULL); - - iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); - iic->eoi_stack[0] = 0xff; - iic->node = of_node_get(node); - out_be64(&iic->regs->prio, 0); - - printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n", - hw_cpu, iic->target_id, node); -} - -static int __init setup_iic(void) -{ - struct device_node *dn; - struct resource r0, r1; - unsigned int node, cascade, found = 0; - struct cbe_iic_regs __iomem *node_iic; - const u32 *np; - - for_each_node_by_name(dn, "interrupt-controller") { - if (!of_device_is_compatible(dn, - "IBM,CBEA-Internal-Interrupt-Controller")) - continue; - np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL); - if (np == NULL) { - printk(KERN_WARNING "IIC: CPU association not found\n"); - of_node_put(dn); - return -ENODEV; - } - if (of_address_to_resource(dn, 0, &r0) || - of_address_to_resource(dn, 1, &r1)) { - printk(KERN_WARNING "IIC: Can't resolve addresses\n"); - of_node_put(dn); - return -ENODEV; - } - found++; - init_one_iic(np[0], r0.start, dn); - init_one_iic(np[1], r1.start, dn); - - /* Setup cascade for IO exceptions. XXX cleanup tricks to get - * node vs CPU etc... - * Note that we configure the IIC_IRR here with a hard coded - * priority of 1. We might want to improve that later. - */ - node = np[0] >> 1; - node_iic = cbe_get_cpu_iic_regs(np[0]); - cascade = node << IIC_IRQ_NODE_SHIFT; - cascade |= 1 << IIC_IRQ_CLASS_SHIFT; - cascade |= IIC_UNIT_IIC; - cascade = irq_create_mapping(iic_host, cascade); - if (!cascade) - continue; - /* - * irq_data is a generic pointer that gets passed back - * to us later, so the forced cast is fine. - */ - irq_set_handler_data(cascade, (void __force *)node_iic); - irq_set_chained_handler(cascade, iic_ioexc_cascade); - out_be64(&node_iic->iic_ir, - (1 << 12) /* priority */ | - (node << 4) /* dest node */ | - IIC_UNIT_THREAD_0 /* route them to thread 0 */); - /* Flush pending (make sure it triggers if there is - * anything pending - */ - out_be64(&node_iic->iic_is, 0xfffffffffffffffful); - } - - if (found) - return 0; - else - return -ENODEV; -} - -void __init iic_init_IRQ(void) -{ - /* Setup an irq host data structure */ - iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops, - NULL); - BUG_ON(iic_host == NULL); - irq_set_default_host(iic_host); - - /* Discover and initialize iics */ - if (setup_iic() < 0) - panic("IIC: Failed to initialize !\n"); - - /* Set master interrupt handling function */ - ppc_md.get_irq = iic_get_irq; - - /* Enable on current CPU */ - iic_setup_cpu(); -} - -void iic_set_interrupt_routing(int cpu, int thread, int priority) -{ - struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu); - u64 iic_ir = 0; - int node = cpu >> 1; - - /* Set which node and thread will handle the next interrupt */ - iic_ir |= CBE_IIC_IR_PRIO(priority) | - CBE_IIC_IR_DEST_NODE(node); - if (thread == 0) - iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0); - else - iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1); - out_be64(&iic_regs->iic_ir, iic_ir); -} diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h deleted file mode 100644 index a47902248541..000000000000 --- a/arch/powerpc/platforms/cell/interrupt.h +++ /dev/null @@ -1,90 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_CELL_PIC_H -#define ASM_CELL_PIC_H -#ifdef __KERNEL__ -/* - * Mapping of IIC pending bits into per-node interrupt numbers. - * - * Interrupt numbers are in the range 0...0x1ff where the top bit - * (0x100) represent the source node. Only 2 nodes are supported with - * the current code though it's trivial to extend that if necessary using - * higher level bits - * - * The bottom 8 bits are split into 2 type bits and 6 data bits that - * depend on the type: - * - * 00 (0x00 | data) : normal interrupt. data is (class << 4) | source - * 01 (0x40 | data) : IO exception. data is the exception number as - * defined by bit numbers in IIC_SR - * 10 (0x80 | data) : IPI. data is the IPI number (obtained from the priority) - * and node is always 0 (IPIs are per-cpu, their source is - * not relevant) - * 11 (0xc0 | data) : reserved - * - * In addition, interrupt number 0x80000000 is defined as always invalid - * (that is the node field is expected to never extend to move than 23 bits) - * - */ - -enum { - IIC_IRQ_INVALID = 0x80000000u, - IIC_IRQ_NODE_MASK = 0x100, - IIC_IRQ_NODE_SHIFT = 8, - IIC_IRQ_MAX = 0x1ff, - IIC_IRQ_TYPE_MASK = 0xc0, - IIC_IRQ_TYPE_NORMAL = 0x00, - IIC_IRQ_TYPE_IOEXC = 0x40, - IIC_IRQ_TYPE_IPI = 0x80, - IIC_IRQ_CLASS_SHIFT = 4, - IIC_IRQ_CLASS_0 = 0x00, - IIC_IRQ_CLASS_1 = 0x10, - IIC_IRQ_CLASS_2 = 0x20, - IIC_SOURCE_COUNT = 0x200, - - /* Here are defined the various source/dest units. Avoid using those - * definitions if you can, they are mostly here for reference - */ - IIC_UNIT_SPU_0 = 0x4, - IIC_UNIT_SPU_1 = 0x7, - IIC_UNIT_SPU_2 = 0x3, - IIC_UNIT_SPU_3 = 0x8, - IIC_UNIT_SPU_4 = 0x2, - IIC_UNIT_SPU_5 = 0x9, - IIC_UNIT_SPU_6 = 0x1, - IIC_UNIT_SPU_7 = 0xa, - IIC_UNIT_IOC_0 = 0x0, - IIC_UNIT_IOC_1 = 0xb, - IIC_UNIT_THREAD_0 = 0xe, /* target only */ - IIC_UNIT_THREAD_1 = 0xf, /* target only */ - IIC_UNIT_IIC = 0xe, /* source only (IO exceptions) */ - - /* Base numbers for the external interrupts */ - IIC_IRQ_EXT_IOIF0 = - IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_0, - IIC_IRQ_EXT_IOIF1 = - IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_1, - - /* Base numbers for the IIC_ISR interrupts */ - IIC_IRQ_IOEX_TMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 63, - IIC_IRQ_IOEX_PMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 62, - IIC_IRQ_IOEX_ATI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 61, - IIC_IRQ_IOEX_MATBFI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 60, - IIC_IRQ_IOEX_ELDI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 59, - - /* Which bits in IIC_ISR are edge sensitive */ - IIC_ISR_EDGE_MASK = 0x4ul, -}; - -extern void iic_init_IRQ(void); -extern void iic_message_pass(int cpu, int msg); -extern void iic_request_IPIs(void); -extern void iic_setup_cpu(void); - -extern u8 iic_get_target_id(int cpu); - -extern void spider_init_IRQ(void); - -extern void iic_set_interrupt_routing(int cpu, int thread, int priority); - -#endif -#endif /* ASM_CELL_PIC_H */ diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c deleted file mode 100644 index 1202a69b0a20..000000000000 --- a/arch/powerpc/platforms/cell/iommu.c +++ /dev/null @@ -1,1094 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * IOMMU implementation for Cell Broadband Processor Architecture - * - * (C) Copyright IBM Corporation 2006-2008 - * - * Author: Jeremy Kerr <jk@ozlabs.org> - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/irqdomain.h> -#include <linux/notifier.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/memblock.h> - -#include <asm/prom.h> -#include <asm/iommu.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <asm/udbg.h> -#include <asm/firmware.h> -#include <asm/cell-regs.h> - -#include "cell.h" -#include "interrupt.h" - -/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages - * instead of leaving them mapped to some dummy page. This can be - * enabled once the appropriate workarounds for spider bugs have - * been enabled - */ -#define CELL_IOMMU_REAL_UNMAP - -/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of - * IO PTEs based on the transfer direction. That can be enabled - * once spider-net has been fixed to pass the correct direction - * to the DMA mapping functions - */ -#define CELL_IOMMU_STRICT_PROTECTION - - -#define NR_IOMMUS 2 - -/* IOC mmap registers */ -#define IOC_Reg_Size 0x2000 - -#define IOC_IOPT_CacheInvd 0x908 -#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul -#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul -#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul - -#define IOC_IOST_Origin 0x918 -#define IOC_IOST_Origin_E 0x8000000000000000ul -#define IOC_IOST_Origin_HW 0x0000000000000800ul -#define IOC_IOST_Origin_HL 0x0000000000000400ul - -#define IOC_IO_ExcpStat 0x920 -#define IOC_IO_ExcpStat_V 0x8000000000000000ul -#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul -#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul -#define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul -#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul -#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul -#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful - -#define IOC_IO_ExcpMask 0x928 -#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul -#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul - -#define IOC_IOCmd_Offset 0x1000 - -#define IOC_IOCmd_Cfg 0xc00 -#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul - - -/* Segment table entries */ -#define IOSTE_V 0x8000000000000000ul /* valid */ -#define IOSTE_H 0x4000000000000000ul /* cache hint */ -#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */ -#define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */ -#define IOSTE_PS_Mask 0x0000000000000007ul /* page size */ -#define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */ -#define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */ -#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */ -#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */ - - -/* IOMMU sizing */ -#define IO_SEGMENT_SHIFT 28 -#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) - -/* The high bit needs to be set on every DMA address */ -#define SPIDER_DMA_OFFSET 0x80000000ul - -struct iommu_window { - struct list_head list; - struct cbe_iommu *iommu; - unsigned long offset; - unsigned long size; - unsigned int ioid; - struct iommu_table table; -}; - -#define NAMESIZE 8 -struct cbe_iommu { - int nid; - char name[NAMESIZE]; - void __iomem *xlate_regs; - void __iomem *cmd_regs; - unsigned long *stab; - unsigned long *ptab; - void *pad_page; - struct list_head windows; -}; - -/* Static array of iommus, one per node - * each contains a list of windows, keyed from dma_window property - * - on bus setup, look for a matching window, or create one - * - on dev setup, assign iommu_table ptr - */ -static struct cbe_iommu iommus[NR_IOMMUS]; -static int cbe_nr_iommus; - -static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, - long n_ptes) -{ - u64 __iomem *reg; - u64 val; - long n; - - reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; - - while (n_ptes > 0) { - /* we can invalidate up to 1 << 11 PTEs at once */ - n = min(n_ptes, 1l << 11); - val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask) - | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask) - | IOC_IOPT_CacheInvd_Busy; - - out_be64(reg, val); - while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy) - ; - - n_ptes -= n; - pte += n; - } -} - -static int tce_build_cell(struct iommu_table *tbl, long index, long npages, - unsigned long uaddr, enum dma_data_direction direction, - unsigned long attrs) -{ - int i; - unsigned long *io_pte, base_pte; - struct iommu_window *window = - container_of(tbl, struct iommu_window, table); - - /* implementing proper protection causes problems with the spidernet - * driver - check mapping directions later, but allow read & write by - * default for now.*/ -#ifdef CELL_IOMMU_STRICT_PROTECTION - /* to avoid referencing a global, we use a trick here to setup the - * protection bit. "prot" is setup to be 3 fields of 4 bits appended - * together for each of the 3 supported direction values. It is then - * shifted left so that the fields matching the desired direction - * lands on the appropriate bits, and other bits are masked out. - */ - const unsigned long prot = 0xc48; - base_pte = - ((prot << (52 + 4 * direction)) & - (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) | - CBE_IOPTE_M | CBE_IOPTE_SO_RW | - (window->ioid & CBE_IOPTE_IOID_Mask); -#else - base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | - CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); -#endif - if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING)) - base_pte &= ~CBE_IOPTE_SO_RW; - - io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); - - for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) - io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); - - mb(); - - invalidate_tce_cache(window->iommu, io_pte, npages); - - pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n", - index, npages, direction, base_pte); - return 0; -} - -static void tce_free_cell(struct iommu_table *tbl, long index, long npages) -{ - - int i; - unsigned long *io_pte, pte; - struct iommu_window *window = - container_of(tbl, struct iommu_window, table); - - pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); - -#ifdef CELL_IOMMU_REAL_UNMAP - pte = 0; -#else - /* spider bridge does PCI reads after freeing - insert a mapping - * to a scratch page instead of an invalid entry */ - pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | - __pa(window->iommu->pad_page) | - (window->ioid & CBE_IOPTE_IOID_Mask); -#endif - - io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); - - for (i = 0; i < npages; i++) - io_pte[i] = pte; - - mb(); - - invalidate_tce_cache(window->iommu, io_pte, npages); -} - -static irqreturn_t ioc_interrupt(int irq, void *data) -{ - unsigned long stat, spf; - struct cbe_iommu *iommu = data; - - stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); - spf = stat & IOC_IO_ExcpStat_SPF_Mask; - - /* Might want to rate limit it */ - printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); - printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n", - !!(stat & IOC_IO_ExcpStat_V), - (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ', - (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ', - (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write", - (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask)); - printk(KERN_ERR " page=0x%016lx\n", - stat & IOC_IO_ExcpStat_ADDR_Mask); - - /* clear interrupt */ - stat &= ~IOC_IO_ExcpStat_V; - out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); - - return IRQ_HANDLED; -} - -static int __init cell_iommu_find_ioc(int nid, unsigned long *base) -{ - struct device_node *np; - struct resource r; - - *base = 0; - - /* First look for new style /be nodes */ - for_each_node_by_name(np, "ioc") { - if (of_node_to_nid(np) != nid) - continue; - if (of_address_to_resource(np, 0, &r)) { - printk(KERN_ERR "iommu: can't get address for %pOF\n", - np); - continue; - } - *base = r.start; - of_node_put(np); - return 0; - } - - /* Ok, let's try the old way */ - for_each_node_by_type(np, "cpu") { - const unsigned int *nidp; - const unsigned long *tmp; - - nidp = of_get_property(np, "node-id", NULL); - if (nidp && *nidp == nid) { - tmp = of_get_property(np, "ioc-translation", NULL); - if (tmp) { - *base = *tmp; - of_node_put(np); - return 0; - } - } - } - - return -ENODEV; -} - -static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu, - unsigned long dbase, unsigned long dsize, - unsigned long fbase, unsigned long fsize) -{ - struct page *page; - unsigned long segments, stab_size; - - segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; - - pr_debug("%s: iommu[%d]: segments: %lu\n", - __func__, iommu->nid, segments); - - /* set up the segment table */ - stab_size = segments * sizeof(unsigned long); - page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); - BUG_ON(!page); - iommu->stab = page_address(page); - memset(iommu->stab, 0, stab_size); -} - -static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu, - unsigned long base, unsigned long size, unsigned long gap_base, - unsigned long gap_size, unsigned long page_shift) -{ - struct page *page; - int i; - unsigned long reg, segments, pages_per_segment, ptab_size, - n_pte_pages, start_seg, *ptab; - - start_seg = base >> IO_SEGMENT_SHIFT; - segments = size >> IO_SEGMENT_SHIFT; - pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); - /* PTEs for each segment must start on a 4K boundary */ - pages_per_segment = max(pages_per_segment, - (1 << 12) / sizeof(unsigned long)); - - ptab_size = segments * pages_per_segment * sizeof(unsigned long); - pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, - iommu->nid, ptab_size, get_order(ptab_size)); - page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); - BUG_ON(!page); - - ptab = page_address(page); - memset(ptab, 0, ptab_size); - - /* number of 4K pages needed for a page table */ - n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; - - pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", - __func__, iommu->nid, iommu->stab, ptab, - n_pte_pages); - - /* initialise the STEs */ - reg = IOSTE_V | ((n_pte_pages - 1) << 5); - - switch (page_shift) { - case 12: reg |= IOSTE_PS_4K; break; - case 16: reg |= IOSTE_PS_64K; break; - case 20: reg |= IOSTE_PS_1M; break; - case 24: reg |= IOSTE_PS_16M; break; - default: BUG(); - } - - gap_base = gap_base >> IO_SEGMENT_SHIFT; - gap_size = gap_size >> IO_SEGMENT_SHIFT; - - pr_debug("Setting up IOMMU stab:\n"); - for (i = start_seg; i < (start_seg + segments); i++) { - if (i >= gap_base && i < (gap_base + gap_size)) { - pr_debug("\toverlap at %d, skipping\n", i); - continue; - } - iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * - (i - start_seg)); - pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); - } - - return ptab; -} - -static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu) -{ - int ret; - unsigned long reg, xlate_base; - unsigned int virq; - - if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) - panic("%s: missing IOC register mappings for node %d\n", - __func__, iommu->nid); - - iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); - iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; - - /* ensure that the STEs have updated */ - mb(); - - /* setup interrupts for the iommu. */ - reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); - out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, - reg & ~IOC_IO_ExcpStat_V); - out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, - IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE); - - virq = irq_create_mapping(NULL, - IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); - BUG_ON(!virq); - - ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); - BUG_ON(ret); - - /* set the IOC segment table origin register (and turn on the iommu) */ - reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; - out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); - in_be64(iommu->xlate_regs + IOC_IOST_Origin); - - /* turn on IO translation */ - reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; - out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); -} - -static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu, - unsigned long base, unsigned long size) -{ - cell_iommu_setup_stab(iommu, base, size, 0, 0); - iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, - IOMMU_PAGE_SHIFT_4K); - cell_iommu_enable_hardware(iommu); -} - -#if 0/* Unused for now */ -static struct iommu_window *find_window(struct cbe_iommu *iommu, - unsigned long offset, unsigned long size) -{ - struct iommu_window *window; - - /* todo: check for overlapping (but not equal) windows) */ - - list_for_each_entry(window, &(iommu->windows), list) { - if (window->offset == offset && window->size == size) - return window; - } - - return NULL; -} -#endif - -static inline u32 cell_iommu_get_ioid(struct device_node *np) -{ - const u32 *ioid; - - ioid = of_get_property(np, "ioid", NULL); - if (ioid == NULL) { - printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n", - np); - return 0; - } - - return *ioid; -} - -static struct iommu_table_ops cell_iommu_ops = { - .set = tce_build_cell, - .clear = tce_free_cell -}; - -static struct iommu_window * __init -cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, - unsigned long offset, unsigned long size, - unsigned long pte_offset) -{ - struct iommu_window *window; - struct page *page; - u32 ioid; - - ioid = cell_iommu_get_ioid(np); - - window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); - BUG_ON(window == NULL); - - window->offset = offset; - window->size = size; - window->ioid = ioid; - window->iommu = iommu; - - window->table.it_blocksize = 16; - window->table.it_base = (unsigned long)iommu->ptab; - window->table.it_index = iommu->nid; - window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; - window->table.it_offset = - (offset >> window->table.it_page_shift) + pte_offset; - window->table.it_size = size >> window->table.it_page_shift; - window->table.it_ops = &cell_iommu_ops; - - if (!iommu_init_table(&window->table, iommu->nid, 0, 0)) - panic("Failed to initialize iommu table"); - - pr_debug("\tioid %d\n", window->ioid); - pr_debug("\tblocksize %ld\n", window->table.it_blocksize); - pr_debug("\tbase 0x%016lx\n", window->table.it_base); - pr_debug("\toffset 0x%lx\n", window->table.it_offset); - pr_debug("\tsize %ld\n", window->table.it_size); - - list_add(&window->list, &iommu->windows); - - if (offset != 0) - return window; - - /* We need to map and reserve the first IOMMU page since it's used - * by the spider workaround. In theory, we only need to do that when - * running on spider but it doesn't really matter. - * - * This code also assumes that we have a window that starts at 0, - * which is the case on all spider based blades. - */ - page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); - BUG_ON(!page); - iommu->pad_page = page_address(page); - clear_page(iommu->pad_page); - - __set_bit(0, window->table.it_map); - tce_build_cell(&window->table, window->table.it_offset, 1, - (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); - - return window; -} - -static struct cbe_iommu *cell_iommu_for_node(int nid) -{ - int i; - - for (i = 0; i < cbe_nr_iommus; i++) - if (iommus[i].nid == nid) - return &iommus[i]; - return NULL; -} - -static unsigned long cell_dma_nommu_offset; - -static unsigned long dma_iommu_fixed_base; -static bool cell_iommu_enabled; - -/* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ -bool iommu_fixed_is_weak; - -static struct iommu_table *cell_get_iommu_table(struct device *dev) -{ - struct iommu_window *window; - struct cbe_iommu *iommu; - - /* Current implementation uses the first window available in that - * node's iommu. We -might- do something smarter later though it may - * never be necessary - */ - iommu = cell_iommu_for_node(dev_to_node(dev)); - if (iommu == NULL || list_empty(&iommu->windows)) { - dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n", - dev->of_node, dev_to_node(dev)); - return NULL; - } - window = list_entry(iommu->windows.next, struct iommu_window, list); - - return &window->table; -} - -static u64 cell_iommu_get_fixed_address(struct device *dev); - -static void cell_dma_dev_setup(struct device *dev) -{ - if (cell_iommu_enabled) { - u64 addr = cell_iommu_get_fixed_address(dev); - - if (addr != OF_BAD_ADDR) - dev->archdata.dma_offset = addr + dma_iommu_fixed_base; - set_iommu_table_base(dev, cell_get_iommu_table(dev)); - } else { - dev->archdata.dma_offset = cell_dma_nommu_offset; - } -} - -static void cell_pci_dma_dev_setup(struct pci_dev *dev) -{ - cell_dma_dev_setup(&dev->dev); -} - -static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct device *dev = data; - - /* We are only interested in device addition */ - if (action != BUS_NOTIFY_ADD_DEVICE) - return 0; - - if (cell_iommu_enabled) - dev->dma_ops = &dma_iommu_ops; - cell_dma_dev_setup(dev); - return 0; -} - -static struct notifier_block cell_of_bus_notifier = { - .notifier_call = cell_of_bus_notify -}; - -static int __init cell_iommu_get_window(struct device_node *np, - unsigned long *base, - unsigned long *size) -{ - const __be32 *dma_window; - unsigned long index; - - /* Use ibm,dma-window if available, else, hard code ! */ - dma_window = of_get_property(np, "ibm,dma-window", NULL); - if (dma_window == NULL) { - *base = 0; - *size = 0x80000000u; - return -ENODEV; - } - - of_parse_dma_window(np, dma_window, &index, base, size); - return 0; -} - -static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) -{ - struct cbe_iommu *iommu; - int nid, i; - - /* Get node ID */ - nid = of_node_to_nid(np); - if (nid < 0) { - printk(KERN_ERR "iommu: failed to get node for %pOF\n", - np); - return NULL; - } - pr_debug("iommu: setting up iommu for node %d (%pOF)\n", - nid, np); - - /* XXX todo: If we can have multiple windows on the same IOMMU, which - * isn't the case today, we probably want here to check whether the - * iommu for that node is already setup. - * However, there might be issue with getting the size right so let's - * ignore that for now. We might want to completely get rid of the - * multiple window support since the cell iommu supports per-page ioids - */ - - if (cbe_nr_iommus >= NR_IOMMUS) { - printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n", - np); - return NULL; - } - - /* Init base fields */ - i = cbe_nr_iommus++; - iommu = &iommus[i]; - iommu->stab = NULL; - iommu->nid = nid; - snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); - INIT_LIST_HEAD(&iommu->windows); - - return iommu; -} - -static void __init cell_iommu_init_one(struct device_node *np, - unsigned long offset) -{ - struct cbe_iommu *iommu; - unsigned long base, size; - - iommu = cell_iommu_alloc(np); - if (!iommu) - return; - - /* Obtain a window for it */ - cell_iommu_get_window(np, &base, &size); - - pr_debug("\ttranslating window 0x%lx...0x%lx\n", - base, base + size - 1); - - /* Initialize the hardware */ - cell_iommu_setup_hardware(iommu, base, size); - - /* Setup the iommu_table */ - cell_iommu_setup_window(iommu, np, base, size, - offset >> IOMMU_PAGE_SHIFT_4K); -} - -static void __init cell_disable_iommus(void) -{ - int node; - unsigned long base, val; - void __iomem *xregs, *cregs; - - /* Make sure IOC translation is disabled on all nodes */ - for_each_online_node(node) { - if (cell_iommu_find_ioc(node, &base)) - continue; - xregs = ioremap(base, IOC_Reg_Size); - if (xregs == NULL) - continue; - cregs = xregs + IOC_IOCmd_Offset; - - pr_debug("iommu: cleaning up iommu on node %d\n", node); - - out_be64(xregs + IOC_IOST_Origin, 0); - (void)in_be64(xregs + IOC_IOST_Origin); - val = in_be64(cregs + IOC_IOCmd_Cfg); - val &= ~IOC_IOCmd_Cfg_TE; - out_be64(cregs + IOC_IOCmd_Cfg, val); - (void)in_be64(cregs + IOC_IOCmd_Cfg); - - iounmap(xregs); - } -} - -static int __init cell_iommu_init_disabled(void) -{ - struct device_node *np = NULL; - unsigned long base = 0, size; - - /* When no iommu is present, we use direct DMA ops */ - - /* First make sure all IOC translation is turned off */ - cell_disable_iommus(); - - /* If we have no Axon, we set up the spider DMA magic offset */ - np = of_find_node_by_name(NULL, "axon"); - if (!np) - cell_dma_nommu_offset = SPIDER_DMA_OFFSET; - of_node_put(np); - - /* Now we need to check to see where the memory is mapped - * in PCI space. We assume that all busses use the same dma - * window which is always the case so far on Cell, thus we - * pick up the first pci-internal node we can find and check - * the DMA window from there. - */ - for_each_node_by_name(np, "axon") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - if (cell_iommu_get_window(np, &base, &size) == 0) - break; - } - if (np == NULL) { - for_each_node_by_name(np, "pci-internal") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - if (cell_iommu_get_window(np, &base, &size) == 0) - break; - } - } - of_node_put(np); - - /* If we found a DMA window, we check if it's big enough to enclose - * all of physical memory. If not, we force enable IOMMU - */ - if (np && size < memblock_end_of_DRAM()) { - printk(KERN_WARNING "iommu: force-enabled, dma window" - " (%ldMB) smaller than total memory (%lldMB)\n", - size >> 20, memblock_end_of_DRAM() >> 20); - return -ENODEV; - } - - cell_dma_nommu_offset += base; - - if (cell_dma_nommu_offset != 0) - cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; - - printk("iommu: disabled, direct DMA offset is 0x%lx\n", - cell_dma_nommu_offset); - - return 0; -} - -/* - * Fixed IOMMU mapping support - * - * This code adds support for setting up a fixed IOMMU mapping on certain - * cell machines. For 64-bit devices this avoids the performance overhead of - * mapping and unmapping pages at runtime. 32-bit devices are unable to use - * the fixed mapping. - * - * The fixed mapping is established at boot, and maps all of physical memory - * 1:1 into device space at some offset. On machines with < 30 GB of memory - * we setup the fixed mapping immediately above the normal IOMMU window. - * - * For example a machine with 4GB of memory would end up with the normal - * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In - * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to - * 3GB, plus any offset required by firmware. The firmware offset is encoded - * in the "dma-ranges" property. - * - * On machines with 30GB or more of memory, we are unable to place the fixed - * mapping above the normal IOMMU window as we would run out of address space. - * Instead we move the normal IOMMU window to coincide with the hash page - * table, this region does not need to be part of the fixed mapping as no - * device should ever be DMA'ing to it. We then setup the fixed mapping - * from 0 to 32GB. - */ - -static u64 cell_iommu_get_fixed_address(struct device *dev) -{ - u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR; - struct device_node *np; - const u32 *ranges = NULL; - int i, len, best, naddr, nsize, pna, range_size; - - /* We can be called for platform devices that have no of_node */ - np = of_node_get(dev->of_node); - if (!np) - goto out; - - while (1) { - naddr = of_n_addr_cells(np); - nsize = of_n_size_cells(np); - np = of_get_next_parent(np); - if (!np) - break; - - ranges = of_get_property(np, "dma-ranges", &len); - - /* Ignore empty ranges, they imply no translation required */ - if (ranges && len > 0) - break; - } - - if (!ranges) { - dev_dbg(dev, "iommu: no dma-ranges found\n"); - goto out; - } - - len /= sizeof(u32); - - pna = of_n_addr_cells(np); - range_size = naddr + nsize + pna; - - /* dma-ranges format: - * child addr : naddr cells - * parent addr : pna cells - * size : nsize cells - */ - for (i = 0, best = -1, best_size = 0; i < len; i += range_size) { - cpu_addr = of_translate_dma_address(np, ranges + i + naddr); - size = of_read_number(ranges + i + naddr + pna, nsize); - - if (cpu_addr == 0 && size > best_size) { - best = i; - best_size = size; - } - } - - if (best >= 0) { - dev_addr = of_read_number(ranges + best, naddr); - } else - dev_dbg(dev, "iommu: no suitable range found!\n"); - -out: - of_node_put(np); - - return dev_addr; -} - -static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask) -{ - return mask == DMA_BIT_MASK(64) && - cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR; -} - -static void __init insert_16M_pte(unsigned long addr, unsigned long *ptab, - unsigned long base_pte) -{ - unsigned long segment, offset; - - segment = addr >> IO_SEGMENT_SHIFT; - offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); - ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); - - pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", - addr, ptab, segment, offset); - - ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask); -} - -static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, - struct device_node *np, unsigned long dbase, unsigned long dsize, - unsigned long fbase, unsigned long fsize) -{ - unsigned long base_pte, uaddr, ioaddr, *ptab; - - ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); - - dma_iommu_fixed_base = fbase; - - pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); - - base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | - (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask); - - if (iommu_fixed_is_weak) - pr_info("IOMMU: Using weak ordering for fixed mapping\n"); - else { - pr_info("IOMMU: Using strong ordering for fixed mapping\n"); - base_pte |= CBE_IOPTE_SO_RW; - } - - for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { - /* Don't touch the dynamic region */ - ioaddr = uaddr + fbase; - if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { - pr_debug("iommu: fixed/dynamic overlap, skipping\n"); - continue; - } - - insert_16M_pte(uaddr, ptab, base_pte); - } - - mb(); -} - -static int __init cell_iommu_fixed_mapping_init(void) -{ - unsigned long dbase, dsize, fbase, fsize, hbase, hend; - struct cbe_iommu *iommu; - struct device_node *np; - - /* The fixed mapping is only supported on axon machines */ - np = of_find_node_by_name(NULL, "axon"); - of_node_put(np); - - if (!np) { - pr_debug("iommu: fixed mapping disabled, no axons found\n"); - return -1; - } - - /* We must have dma-ranges properties for fixed mapping to work */ - np = of_find_node_with_property(NULL, "dma-ranges"); - of_node_put(np); - - if (!np) { - pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); - return -1; - } - - /* The default setup is to have the fixed mapping sit after the - * dynamic region, so find the top of the largest IOMMU window - * on any axon, then add the size of RAM and that's our max value. - * If that is > 32GB we have to do other shennanigans. - */ - fbase = 0; - for_each_node_by_name(np, "axon") { - cell_iommu_get_window(np, &dbase, &dsize); - fbase = max(fbase, dbase + dsize); - } - - fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT); - fsize = memblock_phys_mem_size(); - - if ((fbase + fsize) <= 0x800000000ul) - hbase = 0; /* use the device tree window */ - else { - /* If we're over 32 GB we need to cheat. We can't map all of - * RAM with the fixed mapping, and also fit the dynamic - * region. So try to place the dynamic region where the hash - * table sits, drivers never need to DMA to it, we don't - * need a fixed mapping for that area. - */ - if (!htab_address) { - pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); - return -1; - } - hbase = __pa(htab_address); - hend = hbase + htab_size_bytes; - - /* The window must start and end on a segment boundary */ - if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) || - (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) { - pr_debug("iommu: hash window not segment aligned\n"); - return -1; - } - - /* Check the hash window fits inside the real DMA window */ - for_each_node_by_name(np, "axon") { - cell_iommu_get_window(np, &dbase, &dsize); - - if (hbase < dbase || (hend > (dbase + dsize))) { - pr_debug("iommu: hash window doesn't fit in" - "real DMA window\n"); - of_node_put(np); - return -1; - } - } - - fbase = 0; - } - - /* Setup the dynamic regions */ - for_each_node_by_name(np, "axon") { - iommu = cell_iommu_alloc(np); - BUG_ON(!iommu); - - if (hbase == 0) - cell_iommu_get_window(np, &dbase, &dsize); - else { - dbase = hbase; - dsize = htab_size_bytes; - } - - printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " - "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, - dbase + dsize, fbase, fbase + fsize); - - cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); - iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, - IOMMU_PAGE_SHIFT_4K); - cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, - fbase, fsize); - cell_iommu_enable_hardware(iommu); - cell_iommu_setup_window(iommu, np, dbase, dsize, 0); - } - - cell_pci_controller_ops.iommu_bypass_supported = - cell_pci_iommu_bypass_supported; - return 0; -} - -static int iommu_fixed_disabled; - -static int __init setup_iommu_fixed(char *str) -{ - struct device_node *pciep; - - if (strcmp(str, "off") == 0) - iommu_fixed_disabled = 1; - - /* If we can find a pcie-endpoint in the device tree assume that - * we're on a triblade or a CAB so by default the fixed mapping - * should be set to be weakly ordered; but only if the boot - * option WASN'T set for strong ordering - */ - pciep = of_find_node_by_type(NULL, "pcie-endpoint"); - - if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) - iommu_fixed_is_weak = true; - - of_node_put(pciep); - - return 1; -} -__setup("iommu_fixed=", setup_iommu_fixed); - -static int __init cell_iommu_init(void) -{ - struct device_node *np; - - /* If IOMMU is disabled or we have little enough RAM to not need - * to enable it, we setup a direct mapping. - * - * Note: should we make sure we have the IOMMU actually disabled ? - */ - if (iommu_is_off || - (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull)) - if (cell_iommu_init_disabled() == 0) - goto bail; - - /* Setup various callbacks */ - cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; - - if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) - goto done; - - /* Create an iommu for each /axon node. */ - for_each_node_by_name(np, "axon") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - cell_iommu_init_one(np, 0); - } - - /* Create an iommu for each toplevel /pci-internal node for - * old hardware/firmware - */ - for_each_node_by_name(np, "pci-internal") { - if (np->parent == NULL || np->parent->parent != NULL) - continue; - cell_iommu_init_one(np, SPIDER_DMA_OFFSET); - } - done: - /* Setup default PCI iommu ops */ - set_pci_dma_ops(&dma_iommu_ops); - cell_iommu_enabled = true; - bail: - /* Register callbacks on OF platform device addition/removal - * to handle linking them to the right DMA operations - */ - bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier); - - return 0; -} -machine_arch_initcall(cell, cell_iommu_init); diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c deleted file mode 100644 index 58d967ee38b3..000000000000 --- a/arch/powerpc/platforms/cell/pervasive.c +++ /dev/null @@ -1,125 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * CBE Pervasive Monitor and Debug - * - * (C) Copyright IBM Corporation 2005 - * - * Authors: Maximino Aguilar (maguilar@us.ibm.com) - * Michael N. Day (mnday@us.ibm.com) - */ - -#undef DEBUG - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/percpu.h> -#include <linux/types.h> -#include <linux/kallsyms.h> -#include <linux/pgtable.h> - -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/reg.h> -#include <asm/cell-regs.h> -#include <asm/cpu_has_feature.h> - -#include "pervasive.h" -#include "ras.h" - -static void cbe_power_save(void) -{ - unsigned long ctrl, thread_switch_control; - - /* Ensure our interrupt state is properly tracked */ - if (!prep_irq_for_idle()) - return; - - ctrl = mfspr(SPRN_CTRLF); - - /* Enable DEC and EE interrupt request */ - thread_switch_control = mfspr(SPRN_TSC_CELL); - thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; - - switch (ctrl & CTRL_CT) { - case CTRL_CT0: - thread_switch_control |= TSC_CELL_DEC_ENABLE_0; - break; - case CTRL_CT1: - thread_switch_control |= TSC_CELL_DEC_ENABLE_1; - break; - default: - printk(KERN_WARNING "%s: unknown configuration\n", - __func__); - break; - } - mtspr(SPRN_TSC_CELL, thread_switch_control); - - /* - * go into low thread priority, medium priority will be - * restored for us after wake-up. - */ - HMT_low(); - - /* - * atomically disable thread execution and runlatch. - * External and Decrementer exceptions are still handled when the - * thread is disabled but now enter in cbe_system_reset_exception() - */ - ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); - mtspr(SPRN_CTRLT, ctrl); - - /* Re-enable interrupts in MSR */ - __hard_irq_enable(); -} - -static int cbe_system_reset_exception(struct pt_regs *regs) -{ - switch (regs->msr & SRR1_WAKEMASK) { - case SRR1_WAKEDEC: - set_dec(1); - break; - case SRR1_WAKEEE: - /* - * Handle these when interrupts get re-enabled and we take - * them as regular exceptions. We are in an NMI context - * and can't handle these here. - */ - break; - case SRR1_WAKEMT: - return cbe_sysreset_hack(); -#ifdef CONFIG_CBE_RAS - case SRR1_WAKESYSERR: - cbe_system_error_exception(regs); - break; - case SRR1_WAKETHERM: - cbe_thermal_exception(regs); - break; -#endif /* CONFIG_CBE_RAS */ - default: - /* do system reset */ - return 0; - } - /* everything handled */ - return 1; -} - -void __init cbe_pervasive_init(void) -{ - int cpu; - - if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) - return; - - for_each_possible_cpu(cpu) { - struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu); - if (!regs) - continue; - - /* Enable Pause(0) control bit */ - out_be64(®s->pmcr, in_be64(®s->pmcr) | - CBE_PMD_PAUSE_ZERO_CONTROL); - } - - ppc_md.power_save = cbe_power_save; - ppc_md.system_reset_exception = cbe_system_reset_exception; -} diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h deleted file mode 100644 index 0da74ab10716..000000000000 --- a/arch/powerpc/platforms/cell/pervasive.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Cell Pervasive Monitor and Debug interface and HW structures - * - * (C) Copyright IBM Corporation 2005 - * - * Authors: Maximino Aguilar (maguilar@us.ibm.com) - * David J. Erb (djerb@us.ibm.com) - */ - - -#ifndef PERVASIVE_H -#define PERVASIVE_H - -extern void cbe_pervasive_init(void); - -#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON -extern int cbe_sysreset_hack(void); -#else -static inline int cbe_sysreset_hack(void) -{ - return 1; -} -#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */ - -#endif diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c deleted file mode 100644 index b207a7f99be5..000000000000 --- a/arch/powerpc/platforms/cell/pmu.c +++ /dev/null @@ -1,412 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cell Broadband Engine Performance Monitor - * - * (C) Copyright IBM Corporation 2001,2006 - * - * Author: - * David Erb (djerb@us.ibm.com) - * Kevin Corry (kevcorry@us.ibm.com) - */ - -#include <linux/interrupt.h> -#include <linux/irqdomain.h> -#include <linux/types.h> -#include <linux/export.h> -#include <asm/io.h> -#include <asm/irq_regs.h> -#include <asm/machdep.h> -#include <asm/pmc.h> -#include <asm/reg.h> -#include <asm/spu.h> -#include <asm/cell-regs.h> - -#include "interrupt.h" - -/* - * When writing to write-only mmio addresses, save a shadow copy. All of the - * registers are 32-bit, but stored in the upper-half of a 64-bit field in - * pmd_regs. - */ - -#define WRITE_WO_MMIO(reg, x) \ - do { \ - u32 _x = (x); \ - struct cbe_pmd_regs __iomem *pmd_regs; \ - struct cbe_pmd_shadow_regs *shadow_regs; \ - pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ - out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \ - shadow_regs->reg = _x; \ - } while (0) - -#define READ_SHADOW_REG(val, reg) \ - do { \ - struct cbe_pmd_shadow_regs *shadow_regs; \ - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ - (val) = shadow_regs->reg; \ - } while (0) - -#define READ_MMIO_UPPER32(val, reg) \ - do { \ - struct cbe_pmd_regs __iomem *pmd_regs; \ - pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ - (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \ - } while (0) - -/* - * Physical counter registers. - * Each physical counter can act as one 32-bit counter or two 16-bit counters. - */ - -u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr) -{ - u32 val_in_latch, val = 0; - - if (phys_ctr < NR_PHYS_CTRS) { - READ_SHADOW_REG(val_in_latch, counter_value_in_latch); - - /* Read the latch or the actual counter, whichever is newer. */ - if (val_in_latch & (1 << phys_ctr)) { - READ_SHADOW_REG(val, pm_ctr[phys_ctr]); - } else { - READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]); - } - } - - return val; -} -EXPORT_SYMBOL_GPL(cbe_read_phys_ctr); - -void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val) -{ - struct cbe_pmd_shadow_regs *shadow_regs; - u32 pm_ctrl; - - if (phys_ctr < NR_PHYS_CTRS) { - /* Writing to a counter only writes to a hardware latch. - * The new value is not propagated to the actual counter - * until the performance monitor is enabled. - */ - WRITE_WO_MMIO(pm_ctr[phys_ctr], val); - - pm_ctrl = cbe_read_pm(cpu, pm_control); - if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) { - /* The counters are already active, so we need to - * rewrite the pm_control register to "re-enable" - * the PMU. - */ - cbe_write_pm(cpu, pm_control, pm_ctrl); - } else { - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); - shadow_regs->counter_value_in_latch |= (1 << phys_ctr); - } - } -} -EXPORT_SYMBOL_GPL(cbe_write_phys_ctr); - -/* - * "Logical" counter registers. - * These will read/write 16-bits or 32-bits depending on the - * current size of the counter. Counters 4 - 7 are always 16-bit. - */ - -u32 cbe_read_ctr(u32 cpu, u32 ctr) -{ - u32 val; - u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1); - - val = cbe_read_phys_ctr(cpu, phys_ctr); - - if (cbe_get_ctr_size(cpu, phys_ctr) == 16) - val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff); - - return val; -} -EXPORT_SYMBOL_GPL(cbe_read_ctr); - -void cbe_write_ctr(u32 cpu, u32 ctr, u32 val) -{ - u32 phys_ctr; - u32 phys_val; - - phys_ctr = ctr & (NR_PHYS_CTRS - 1); - - if (cbe_get_ctr_size(cpu, phys_ctr) == 16) { - phys_val = cbe_read_phys_ctr(cpu, phys_ctr); - - if (ctr < NR_PHYS_CTRS) - val = (val << 16) | (phys_val & 0xffff); - else - val = (val & 0xffff) | (phys_val & 0xffff0000); - } - - cbe_write_phys_ctr(cpu, phys_ctr, val); -} -EXPORT_SYMBOL_GPL(cbe_write_ctr); - -/* - * Counter-control registers. - * Each "logical" counter has a corresponding control register. - */ - -u32 cbe_read_pm07_control(u32 cpu, u32 ctr) -{ - u32 pm07_control = 0; - - if (ctr < NR_CTRS) - READ_SHADOW_REG(pm07_control, pm07_control[ctr]); - - return pm07_control; -} -EXPORT_SYMBOL_GPL(cbe_read_pm07_control); - -void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val) -{ - if (ctr < NR_CTRS) - WRITE_WO_MMIO(pm07_control[ctr], val); -} -EXPORT_SYMBOL_GPL(cbe_write_pm07_control); - -/* - * Other PMU control registers. Most of these are write-only. - */ - -u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg) -{ - u32 val = 0; - - switch (reg) { - case group_control: - READ_SHADOW_REG(val, group_control); - break; - - case debug_bus_control: - READ_SHADOW_REG(val, debug_bus_control); - break; - - case trace_address: - READ_MMIO_UPPER32(val, trace_address); - break; - - case ext_tr_timer: - READ_SHADOW_REG(val, ext_tr_timer); - break; - - case pm_status: - READ_MMIO_UPPER32(val, pm_status); - break; - - case pm_control: - READ_SHADOW_REG(val, pm_control); - break; - - case pm_interval: - READ_MMIO_UPPER32(val, pm_interval); - break; - - case pm_start_stop: - READ_SHADOW_REG(val, pm_start_stop); - break; - } - - return val; -} -EXPORT_SYMBOL_GPL(cbe_read_pm); - -void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val) -{ - switch (reg) { - case group_control: - WRITE_WO_MMIO(group_control, val); - break; - - case debug_bus_control: - WRITE_WO_MMIO(debug_bus_control, val); - break; - - case trace_address: - WRITE_WO_MMIO(trace_address, val); - break; - - case ext_tr_timer: - WRITE_WO_MMIO(ext_tr_timer, val); - break; - - case pm_status: - WRITE_WO_MMIO(pm_status, val); - break; - - case pm_control: - WRITE_WO_MMIO(pm_control, val); - break; - - case pm_interval: - WRITE_WO_MMIO(pm_interval, val); - break; - - case pm_start_stop: - WRITE_WO_MMIO(pm_start_stop, val); - break; - } -} -EXPORT_SYMBOL_GPL(cbe_write_pm); - -/* - * Get/set the size of a physical counter to either 16 or 32 bits. - */ - -u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr) -{ - u32 pm_ctrl, size = 0; - - if (phys_ctr < NR_PHYS_CTRS) { - pm_ctrl = cbe_read_pm(cpu, pm_control); - size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32; - } - - return size; -} -EXPORT_SYMBOL_GPL(cbe_get_ctr_size); - -void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size) -{ - u32 pm_ctrl; - - if (phys_ctr < NR_PHYS_CTRS) { - pm_ctrl = cbe_read_pm(cpu, pm_control); - switch (ctr_size) { - case 16: - pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr); - break; - - case 32: - pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr); - break; - } - cbe_write_pm(cpu, pm_control, pm_ctrl); - } -} -EXPORT_SYMBOL_GPL(cbe_set_ctr_size); - -/* - * Enable/disable the entire performance monitoring unit. - * When we enable the PMU, all pending writes to counters get committed. - */ - -void cbe_enable_pm(u32 cpu) -{ - struct cbe_pmd_shadow_regs *shadow_regs; - u32 pm_ctrl; - - shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); - shadow_regs->counter_value_in_latch = 0; - - pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON; - cbe_write_pm(cpu, pm_control, pm_ctrl); -} -EXPORT_SYMBOL_GPL(cbe_enable_pm); - -void cbe_disable_pm(u32 cpu) -{ - u32 pm_ctrl; - pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON; - cbe_write_pm(cpu, pm_control, pm_ctrl); -} -EXPORT_SYMBOL_GPL(cbe_disable_pm); - -/* - * Reading from the trace_buffer. - * The trace buffer is two 64-bit registers. Reading from - * the second half automatically increments the trace_address. - */ - -void cbe_read_trace_buffer(u32 cpu, u64 *buf) -{ - struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu); - - *buf++ = in_be64(&pmd_regs->trace_buffer_0_63); - *buf++ = in_be64(&pmd_regs->trace_buffer_64_127); -} -EXPORT_SYMBOL_GPL(cbe_read_trace_buffer); - -/* - * Enabling/disabling interrupts for the entire performance monitoring unit. - */ - -u32 cbe_get_and_clear_pm_interrupts(u32 cpu) -{ - /* Reading pm_status clears the interrupt bits. */ - return cbe_read_pm(cpu, pm_status); -} -EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts); - -void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask) -{ - /* Set which node and thread will handle the next interrupt. */ - iic_set_interrupt_routing(cpu, thread, 0); - - /* Enable the interrupt bits in the pm_status register. */ - if (mask) - cbe_write_pm(cpu, pm_status, mask); -} -EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts); - -void cbe_disable_pm_interrupts(u32 cpu) -{ - cbe_get_and_clear_pm_interrupts(cpu); - cbe_write_pm(cpu, pm_status, 0); -} -EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts); - -static irqreturn_t cbe_pm_irq(int irq, void *dev_id) -{ - perf_irq(get_irq_regs()); - return IRQ_HANDLED; -} - -static int __init cbe_init_pm_irq(void) -{ - unsigned int irq; - int rc, node; - - for_each_online_node(node) { - irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI | - (node << IIC_IRQ_NODE_SHIFT)); - if (!irq) { - printk("ERROR: Unable to allocate irq for node %d\n", - node); - return -EINVAL; - } - - rc = request_irq(irq, cbe_pm_irq, - 0, "cbe-pmu-0", NULL); - if (rc) { - printk("ERROR: Request for irq on node %d failed\n", - node); - return rc; - } - } - - return 0; -} -machine_arch_initcall(cell, cbe_init_pm_irq); - -void cbe_sync_irq(int node) -{ - unsigned int irq; - - irq = irq_find_mapping(NULL, - IIC_IRQ_IOEX_PMI - | (node << IIC_IRQ_NODE_SHIFT)); - - if (!irq) { - printk(KERN_WARNING "ERROR, unable to get existing irq %d " \ - "for node %d\n", irq, node); - return; - } - - synchronize_irq(irq); -} -EXPORT_SYMBOL_GPL(cbe_sync_irq); - diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c deleted file mode 100644 index f6b87926530c..000000000000 --- a/arch/powerpc/platforms/cell/ras.c +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2006-2008, IBM Corporation. - */ - -#undef DEBUG - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/smp.h> -#include <linux/reboot.h> -#include <linux/kexec.h> -#include <linux/crash_dump.h> -#include <linux/of.h> - -#include <asm/kexec.h> -#include <asm/reg.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/rtas.h> -#include <asm/cell-regs.h> - -#include "ras.h" -#include "pervasive.h" - -static void dump_fir(int cpu) -{ - struct cbe_pmd_regs __iomem *pregs = cbe_get_cpu_pmd_regs(cpu); - struct cbe_iic_regs __iomem *iregs = cbe_get_cpu_iic_regs(cpu); - - if (pregs == NULL) - return; - - /* Todo: do some nicer parsing of bits and based on them go down - * to other sub-units FIRs and not only IIC - */ - printk(KERN_ERR "Global Checkstop FIR : 0x%016llx\n", - in_be64(&pregs->checkstop_fir)); - printk(KERN_ERR "Global Recoverable FIR : 0x%016llx\n", - in_be64(&pregs->checkstop_fir)); - printk(KERN_ERR "Global MachineCheck FIR : 0x%016llx\n", - in_be64(&pregs->spec_att_mchk_fir)); - - if (iregs == NULL) - return; - printk(KERN_ERR "IOC FIR : 0x%016llx\n", - in_be64(&iregs->ioc_fir)); - -} - -DEFINE_INTERRUPT_HANDLER(cbe_system_error_exception) -{ - int cpu = smp_processor_id(); - - printk(KERN_ERR "System Error Interrupt on CPU %d !\n", cpu); - dump_fir(cpu); - dump_stack(); -} - -DEFINE_INTERRUPT_HANDLER(cbe_maintenance_exception) -{ - int cpu = smp_processor_id(); - - /* - * Nothing implemented for the maintenance interrupt at this point - */ - - printk(KERN_ERR "Unhandled Maintenance interrupt on CPU %d !\n", cpu); - dump_stack(); -} - -DEFINE_INTERRUPT_HANDLER(cbe_thermal_exception) -{ - int cpu = smp_processor_id(); - - /* - * Nothing implemented for the thermal interrupt at this point - */ - - printk(KERN_ERR "Unhandled Thermal interrupt on CPU %d !\n", cpu); - dump_stack(); -} - -static int cbe_machine_check_handler(struct pt_regs *regs) -{ - int cpu = smp_processor_id(); - - printk(KERN_ERR "Machine Check Interrupt on CPU %d !\n", cpu); - dump_fir(cpu); - - /* No recovery from this code now, lets continue */ - return 0; -} - -struct ptcal_area { - struct list_head list; - int nid; - int order; - struct page *pages; -}; - -static LIST_HEAD(ptcal_list); - -static int ptcal_start_tok, ptcal_stop_tok; - -static int __init cbe_ptcal_enable_on_node(int nid, int order) -{ - struct ptcal_area *area; - int ret = -ENOMEM; - unsigned long addr; - - if (is_kdump_kernel()) - rtas_call(ptcal_stop_tok, 1, 1, NULL, nid); - - area = kmalloc(sizeof(*area), GFP_KERNEL); - if (!area) - goto out_err; - - area->nid = nid; - area->order = order; - area->pages = __alloc_pages_node(area->nid, - GFP_KERNEL|__GFP_THISNODE, - area->order); - - if (!area->pages) { - printk(KERN_WARNING "%s: no page on node %d\n", - __func__, area->nid); - goto out_free_area; - } - - /* - * We move the ptcal area to the middle of the allocated - * page, in order to avoid prefetches in memcpy and similar - * functions stepping on it. - */ - addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1); - printk(KERN_DEBUG "%s: enabling PTCAL on node %d address=0x%016lx\n", - __func__, area->nid, addr); - - ret = -EIO; - if (rtas_call(ptcal_start_tok, 3, 1, NULL, area->nid, - (unsigned int)(addr >> 32), - (unsigned int)(addr & 0xffffffff))) { - printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n", - __func__, nid); - goto out_free_pages; - } - - list_add(&area->list, &ptcal_list); - - return 0; - -out_free_pages: - __free_pages(area->pages, area->order); -out_free_area: - kfree(area); -out_err: - return ret; -} - -static int __init cbe_ptcal_enable(void) -{ - const u32 *size; - struct device_node *np; - int order, found_mic = 0; - - np = of_find_node_by_path("/rtas"); - if (!np) - return -ENODEV; - - size = of_get_property(np, "ibm,cbe-ptcal-size", NULL); - if (!size) { - of_node_put(np); - return -ENODEV; - } - - pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); - order = get_order(*size); - of_node_put(np); - - /* support for malta device trees, with be@/mic@ nodes */ - for_each_node_by_type(np, "mic-tm") { - cbe_ptcal_enable_on_node(of_node_to_nid(np), order); - found_mic = 1; - } - - if (found_mic) - return 0; - - /* support for older device tree - use cpu nodes */ - for_each_node_by_type(np, "cpu") { - const u32 *nid = of_get_property(np, "node-id", NULL); - if (!nid) { - printk(KERN_ERR "%s: node %pOF is missing node-id?\n", - __func__, np); - continue; - } - cbe_ptcal_enable_on_node(*nid, order); - found_mic = 1; - } - - return found_mic ? 0 : -ENODEV; -} - -static int cbe_ptcal_disable(void) -{ - struct ptcal_area *area, *tmp; - int ret = 0; - - pr_debug("%s: disabling PTCAL\n", __func__); - - list_for_each_entry_safe(area, tmp, &ptcal_list, list) { - /* disable ptcal on this node */ - if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) { - printk(KERN_ERR "%s: error disabling PTCAL " - "on node %d!\n", __func__, - area->nid); - ret = -EIO; - continue; - } - - /* ensure we can access the PTCAL area */ - memset(page_address(area->pages), 0, - 1 << (area->order + PAGE_SHIFT)); - - /* clean up */ - list_del(&area->list); - __free_pages(area->pages, area->order); - kfree(area); - } - - return ret; -} - -static int cbe_ptcal_notify_reboot(struct notifier_block *nb, - unsigned long code, void *data) -{ - return cbe_ptcal_disable(); -} - -static void cbe_ptcal_crash_shutdown(void) -{ - cbe_ptcal_disable(); -} - -static struct notifier_block cbe_ptcal_reboot_notifier = { - .notifier_call = cbe_ptcal_notify_reboot -}; - -#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON -static int sysreset_hack; - -static int __init cbe_sysreset_init(void) -{ - struct cbe_pmd_regs __iomem *regs; - - sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0"); - if (!sysreset_hack) - return 0; - - regs = cbe_get_cpu_pmd_regs(0); - if (!regs) - return 0; - - /* Enable JTAG system-reset hack */ - out_be32(®s->fir_mode_reg, - in_be32(®s->fir_mode_reg) | - CBE_PMD_FIR_MODE_M8); - - return 0; -} -device_initcall(cbe_sysreset_init); - -int cbe_sysreset_hack(void) -{ - struct cbe_pmd_regs __iomem *regs; - - /* - * The BMC can inject user triggered system reset exceptions, - * but cannot set the system reset reason in srr1, - * so check an extra register here. - */ - if (sysreset_hack && (smp_processor_id() == 0)) { - regs = cbe_get_cpu_pmd_regs(0); - if (!regs) - return 0; - if (in_be64(®s->ras_esc_0) & 0x0000ffff) { - out_be64(®s->ras_esc_0, 0); - return 0; - } - } - return 1; -} -#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */ - -static int __init cbe_ptcal_init(void) -{ - int ret; - ptcal_start_tok = rtas_function_token(RTAS_FN_IBM_CBE_START_PTCAL); - ptcal_stop_tok = rtas_function_token(RTAS_FN_IBM_CBE_STOP_PTCAL); - - if (ptcal_start_tok == RTAS_UNKNOWN_SERVICE - || ptcal_stop_tok == RTAS_UNKNOWN_SERVICE) - return -ENODEV; - - ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier); - if (ret) - goto out1; - - ret = crash_shutdown_register(&cbe_ptcal_crash_shutdown); - if (ret) - goto out2; - - return cbe_ptcal_enable(); - -out2: - unregister_reboot_notifier(&cbe_ptcal_reboot_notifier); -out1: - printk(KERN_ERR "Can't disable PTCAL, so not enabling\n"); - return ret; -} - -arch_initcall(cbe_ptcal_init); - -void __init cbe_ras_init(void) -{ - unsigned long hid0; - - /* - * Enable System Error & thermal interrupts and wakeup conditions - */ - - hid0 = mfspr(SPRN_HID0); - hid0 |= HID0_CBE_THERM_INT_EN | HID0_CBE_THERM_WAKEUP | - HID0_CBE_SYSERR_INT_EN | HID0_CBE_SYSERR_WAKEUP; - mtspr(SPRN_HID0, hid0); - mb(); - - /* - * Install machine check handler. Leave setting of precise mode to - * what the firmware did for now - */ - ppc_md.machine_check_exception = cbe_machine_check_handler; - mb(); - - /* - * For now, we assume that IOC_FIR is already set to forward some - * error conditions to the System Error handler. If that is not true - * then it will have to be fixed up here. - */ -} diff --git a/arch/powerpc/platforms/cell/ras.h b/arch/powerpc/platforms/cell/ras.h deleted file mode 100644 index 226dbd48efad..000000000000 --- a/arch/powerpc/platforms/cell/ras.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef RAS_H -#define RAS_H - -#include <asm/interrupt.h> - -DECLARE_INTERRUPT_HANDLER(cbe_system_error_exception); -DECLARE_INTERRUPT_HANDLER(cbe_maintenance_exception); -DECLARE_INTERRUPT_HANDLER(cbe_thermal_exception); - -extern void cbe_ras_init(void); - -#endif /* RAS_H */ diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c deleted file mode 100644 index f64a1ef98aa8..000000000000 --- a/arch/powerpc/platforms/cell/setup.c +++ /dev/null @@ -1,274 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * linux/arch/powerpc/platforms/cell/cell_setup.c - * - * Copyright (C) 1995 Linus Torvalds - * Adapted from 'alpha' version by Gary Thomas - * Modified by Cort Dougan (cort@cs.nmt.edu) - * Modified by PPC64 Team, IBM Corp - * Modified by Cell Team, IBM Deutschland Entwicklung GmbH - */ -#undef DEBUG - -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/export.h> -#include <linux/unistd.h> -#include <linux/user.h> -#include <linux/reboot.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/console.h> -#include <linux/mutex.h> -#include <linux/memory_hotplug.h> -#include <linux/of_platform.h> -#include <linux/platform_device.h> - -#include <asm/mmu.h> -#include <asm/processor.h> -#include <asm/io.h> -#include <asm/rtas.h> -#include <asm/pci-bridge.h> -#include <asm/iommu.h> -#include <asm/dma.h> -#include <asm/machdep.h> -#include <asm/time.h> -#include <asm/nvram.h> -#include <asm/cputable.h> -#include <asm/ppc-pci.h> -#include <asm/irq.h> -#include <asm/spu.h> -#include <asm/spu_priv1.h> -#include <asm/udbg.h> -#include <asm/mpic.h> -#include <asm/cell-regs.h> -#include <asm/io-workarounds.h> - -#include "cell.h" -#include "interrupt.h" -#include "pervasive.h" -#include "ras.h" - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -static void cell_show_cpuinfo(struct seq_file *m) -{ - struct device_node *root; - const char *model = ""; - - root = of_find_node_by_path("/"); - if (root) - model = of_get_property(root, "model", NULL); - seq_printf(m, "machine\t\t: CHRP %s\n", model); - of_node_put(root); -} - -static void cell_progress(char *s, unsigned short hex) -{ - printk("*** %04x : %s\n", hex, s ? s : ""); -} - -static void cell_fixup_pcie_rootcomplex(struct pci_dev *dev) -{ - struct pci_controller *hose; - const char *s; - int i; - - if (!machine_is(cell)) - return; - - /* We're searching for a direct child of the PHB */ - if (dev->bus->self != NULL || dev->devfn != 0) - return; - - hose = pci_bus_to_host(dev->bus); - if (hose == NULL) - return; - - /* Only on PCIE */ - if (!of_device_is_compatible(hose->dn, "pciex")) - return; - - /* And only on axon */ - s = of_get_property(hose->dn, "model", NULL); - if (!s || strcmp(s, "Axon") != 0) - return; - - for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { - dev->resource[i].start = dev->resource[i].end = 0; - dev->resource[i].flags = 0; - } - - printk(KERN_DEBUG "PCI: Hiding resources on Axon PCIE RC %s\n", - pci_name(dev)); -} -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, cell_fixup_pcie_rootcomplex); - -static int cell_setup_phb(struct pci_controller *phb) -{ - const char *model; - struct device_node *np; - - int rc = rtas_setup_phb(phb); - if (rc) - return rc; - - phb->controller_ops = cell_pci_controller_ops; - - np = phb->dn; - model = of_get_property(np, "model", NULL); - if (model == NULL || !of_node_name_eq(np, "pci")) - return 0; - - /* Setup workarounds for spider */ - if (strcmp(model, "Spider")) - return 0; - - iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, - (void *)SPIDER_PCI_REG_BASE); - return 0; -} - -static const struct of_device_id cell_bus_ids[] __initconst = { - { .type = "soc", }, - { .compatible = "soc", }, - { .type = "spider", }, - { .type = "axon", }, - { .type = "plb5", }, - { .type = "plb4", }, - { .type = "opb", }, - { .type = "ebc", }, - {}, -}; - -static int __init cell_publish_devices(void) -{ - struct device_node *root = of_find_node_by_path("/"); - struct device_node *np; - int node; - - /* Publish OF platform devices for southbridge IOs */ - of_platform_bus_probe(NULL, cell_bus_ids, NULL); - - /* On spider based blades, we need to manually create the OF - * platform devices for the PCI host bridges - */ - for_each_child_of_node(root, np) { - if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "pciex")) - continue; - of_platform_device_create(np, NULL, NULL); - } - - of_node_put(root); - - /* There is no device for the MIC memory controller, thus we create - * a platform device for it to attach the EDAC driver to. - */ - for_each_online_node(node) { - if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL) - continue; - platform_device_register_simple("cbe-mic", node, NULL, 0); - } - - return 0; -} -machine_subsys_initcall(cell, cell_publish_devices); - -static void __init mpic_init_IRQ(void) -{ - struct device_node *dn; - struct mpic *mpic; - - for_each_node_by_name(dn, "interrupt-controller") { - if (!of_device_is_compatible(dn, "CBEA,platform-open-pic")) - continue; - - /* The MPIC driver will get everything it needs from the - * device-tree, just pass 0 to all arguments - */ - mpic = mpic_alloc(dn, 0, MPIC_SECONDARY | MPIC_NO_RESET, - 0, 0, " MPIC "); - if (mpic == NULL) - continue; - mpic_init(mpic); - } -} - - -static void __init cell_init_irq(void) -{ - iic_init_IRQ(); - spider_init_IRQ(); - mpic_init_IRQ(); -} - -static void __init cell_set_dabrx(void) -{ - mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); -} - -static void __init cell_setup_arch(void) -{ -#ifdef CONFIG_SPU_BASE - spu_priv1_ops = &spu_priv1_mmio_ops; - spu_management_ops = &spu_management_of_ops; -#endif - - cbe_regs_init(); - - cell_set_dabrx(); - -#ifdef CONFIG_CBE_RAS - cbe_ras_init(); -#endif - -#ifdef CONFIG_SMP - smp_init_cell(); -#endif - /* init to some ~sane value until calibrate_delay() runs */ - loops_per_jiffy = 50000000; - - /* Find and initialize PCI host bridges */ - init_pci_config_tokens(); - - cbe_pervasive_init(); - - mmio_nvram_init(); -} - -static int __init cell_probe(void) -{ - if (!of_machine_is_compatible("IBM,CBEA") && - !of_machine_is_compatible("IBM,CPBW-1.0")) - return 0; - - pm_power_off = rtas_power_off; - - return 1; -} - -define_machine(cell) { - .name = "Cell", - .probe = cell_probe, - .setup_arch = cell_setup_arch, - .show_cpuinfo = cell_show_cpuinfo, - .restart = rtas_restart, - .halt = rtas_halt, - .get_boot_time = rtas_get_boot_time, - .get_rtc_time = rtas_get_rtc_time, - .set_rtc_time = rtas_set_rtc_time, - .progress = cell_progress, - .init_IRQ = cell_init_irq, - .pci_setup_phb = cell_setup_phb, -}; - -struct pci_controller_ops cell_pci_controller_ops; diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c deleted file mode 100644 index 30394c6f8894..000000000000 --- a/arch/powerpc/platforms/cell/smp.c +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * SMP support for BPA machines. - * - * Dave Engebretsen, Peter Bergner, and - * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com - * - * Plus various changes from other IBM teams... - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/cache.h> -#include <linux/err.h> -#include <linux/device.h> -#include <linux/cpu.h> -#include <linux/pgtable.h> - -#include <asm/ptrace.h> -#include <linux/atomic.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/io.h> -#include <asm/smp.h> -#include <asm/paca.h> -#include <asm/machdep.h> -#include <asm/cputable.h> -#include <asm/firmware.h> -#include <asm/rtas.h> -#include <asm/cputhreads.h> -#include <asm/code-patching.h> - -#include "interrupt.h" -#include <asm/udbg.h> - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -/* - * The Primary thread of each non-boot processor was started from the OF client - * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. - */ -static cpumask_t of_spin_map; - -/** - * smp_startup_cpu() - start the given cpu - * - * At boot time, there is nothing to do for primary threads which were - * started from Open Firmware. For anything else, call RTAS with the - * appropriate start location. - * - * Returns: - * 0 - failure - * 1 - success - */ -static inline int smp_startup_cpu(unsigned int lcpu) -{ - int status; - unsigned long start_here = - __pa(ppc_function_entry(generic_secondary_smp_init)); - unsigned int pcpu; - int start_cpu; - - if (cpumask_test_cpu(lcpu, &of_spin_map)) - /* Already started by OF and sitting in spin loop */ - return 1; - - pcpu = get_hard_smp_processor_id(lcpu); - - /* - * If the RTAS start-cpu token does not exist then presume the - * cpu is already spinning. - */ - start_cpu = rtas_function_token(RTAS_FN_START_CPU); - if (start_cpu == RTAS_UNKNOWN_SERVICE) - return 1; - - status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); - if (status != 0) { - printk(KERN_ERR "start-cpu failed: %i\n", status); - return 0; - } - - return 1; -} - -static void smp_cell_setup_cpu(int cpu) -{ - if (cpu != boot_cpuid) - iic_setup_cpu(); - - /* - * change default DABRX to allow user watchpoints - */ - mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); -} - -static int smp_cell_kick_cpu(int nr) -{ - if (nr < 0 || nr >= nr_cpu_ids) - return -EINVAL; - - if (!smp_startup_cpu(nr)) - return -ENOENT; - - /* - * The processor is currently spinning, waiting for the - * cpu_start field to become non-zero After we set cpu_start, - * the processor will continue on to secondary_start - */ - paca_ptrs[nr]->cpu_start = 1; - - return 0; -} - -static struct smp_ops_t bpa_iic_smp_ops = { - .message_pass = iic_message_pass, - .probe = iic_request_IPIs, - .kick_cpu = smp_cell_kick_cpu, - .setup_cpu = smp_cell_setup_cpu, - .cpu_bootable = smp_generic_cpu_bootable, -}; - -/* This is called very early */ -void __init smp_init_cell(void) -{ - int i; - - DBG(" -> smp_init_cell()\n"); - - smp_ops = &bpa_iic_smp_ops; - - /* Mark threads which are still spinning in hold loops. */ - if (cpu_has_feature(CPU_FTR_SMT)) { - for_each_present_cpu(i) { - if (cpu_thread_in_core(i) == 0) - cpumask_set_cpu(i, &of_spin_map); - } - } else - cpumask_copy(&of_spin_map, cpu_present_mask); - - cpumask_clear_cpu(boot_cpuid, &of_spin_map); - - /* Non-lpar has additional take/give timebase */ - if (rtas_function_token(RTAS_FN_FREEZE_TIME_BASE) != RTAS_UNKNOWN_SERVICE) { - smp_ops->give_timebase = rtas_give_timebase; - smp_ops->take_timebase = rtas_take_timebase; - } - - DBG(" <- smp_init_cell()\n"); -} diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c deleted file mode 100644 index 68439445b1c3..000000000000 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * IO workarounds for PCI on Celleb/Cell platform - * - * (C) Copyright 2006-2007 TOSHIBA CORPORATION - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/of_address.h> -#include <linux/slab.h> -#include <linux/io.h> - -#include <asm/ppc-pci.h> -#include <asm/pci-bridge.h> -#include <asm/io-workarounds.h> - -#define SPIDER_PCI_DISABLE_PREFETCH - -struct spiderpci_iowa_private { - void __iomem *regs; -}; - -static void spiderpci_io_flush(struct iowa_bus *bus) -{ - struct spiderpci_iowa_private *priv; - - priv = bus->private; - in_be32(priv->regs + SPIDER_PCI_DUMMY_READ); - iosync(); -} - -#define SPIDER_PCI_MMIO_READ(name, ret) \ -static ret spiderpci_##name(const PCI_IO_ADDR addr) \ -{ \ - ret val = __do_##name(addr); \ - spiderpci_io_flush(iowa_mem_find_bus(addr)); \ - return val; \ -} - -#define SPIDER_PCI_MMIO_READ_STR(name) \ -static void spiderpci_##name(const PCI_IO_ADDR addr, void *buf, \ - unsigned long count) \ -{ \ - __do_##name(addr, buf, count); \ - spiderpci_io_flush(iowa_mem_find_bus(addr)); \ -} - -SPIDER_PCI_MMIO_READ(readb, u8) -SPIDER_PCI_MMIO_READ(readw, u16) -SPIDER_PCI_MMIO_READ(readl, u32) -SPIDER_PCI_MMIO_READ(readq, u64) -SPIDER_PCI_MMIO_READ(readw_be, u16) -SPIDER_PCI_MMIO_READ(readl_be, u32) -SPIDER_PCI_MMIO_READ(readq_be, u64) -SPIDER_PCI_MMIO_READ_STR(readsb) -SPIDER_PCI_MMIO_READ_STR(readsw) -SPIDER_PCI_MMIO_READ_STR(readsl) - -static void spiderpci_memcpy_fromio(void *dest, const PCI_IO_ADDR src, - unsigned long n) -{ - __do_memcpy_fromio(dest, src, n); - spiderpci_io_flush(iowa_mem_find_bus(src)); -} - -static int __init spiderpci_pci_setup_chip(struct pci_controller *phb, - void __iomem *regs) -{ - void *dummy_page_va; - dma_addr_t dummy_page_da; - -#ifdef SPIDER_PCI_DISABLE_PREFETCH - u32 val = in_be32(regs + SPIDER_PCI_VCI_CNTL_STAT); - pr_debug("SPIDER_IOWA:PVCI_Control_Status was 0x%08x\n", val); - out_be32(regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8); -#endif /* SPIDER_PCI_DISABLE_PREFETCH */ - - /* setup dummy read */ - /* - * On CellBlade, we can't know that which XDR memory is used by - * kmalloc() to allocate dummy_page_va. - * In order to improve the performance, the XDR which is used to - * allocate dummy_page_va is the nearest the spider-pci. - * We have to select the CBE which is the nearest the spider-pci - * to allocate memory from the best XDR, but I don't know that - * how to do. - * - * Celleb does not have this problem, because it has only one XDR. - */ - dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!dummy_page_va) { - pr_err("SPIDERPCI-IOWA:Alloc dummy_page_va failed.\n"); - return -1; - } - - dummy_page_da = dma_map_single(phb->parent, dummy_page_va, - PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(phb->parent, dummy_page_da)) { - pr_err("SPIDER-IOWA:Map dummy page filed.\n"); - kfree(dummy_page_va); - return -1; - } - - out_be32(regs + SPIDER_PCI_DUMMY_READ_BASE, dummy_page_da); - - return 0; -} - -int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data) -{ - void __iomem *regs = NULL; - struct spiderpci_iowa_private *priv; - struct device_node *np = bus->phb->dn; - struct resource r; - unsigned long offset = (unsigned long)data; - - pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n", - np); - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - pr_err("SPIDERPCI-IOWA:" - "Can't allocate struct spiderpci_iowa_private"); - return -1; - } - - if (of_address_to_resource(np, 0, &r)) { - pr_err("SPIDERPCI-IOWA:Can't get resource.\n"); - goto error; - } - - regs = ioremap(r.start + offset, SPIDER_PCI_REG_SIZE); - if (!regs) { - pr_err("SPIDERPCI-IOWA:ioremap failed.\n"); - goto error; - } - priv->regs = regs; - bus->private = priv; - - if (spiderpci_pci_setup_chip(bus->phb, regs)) - goto error; - - return 0; - -error: - kfree(priv); - bus->private = NULL; - - if (regs) - iounmap(regs); - - return -1; -} - -struct ppc_pci_io spiderpci_ops = { - .readb = spiderpci_readb, - .readw = spiderpci_readw, - .readl = spiderpci_readl, - .readq = spiderpci_readq, - .readw_be = spiderpci_readw_be, - .readl_be = spiderpci_readl_be, - .readq_be = spiderpci_readq_be, - .readsb = spiderpci_readsb, - .readsw = spiderpci_readsw, - .readsl = spiderpci_readsl, - .memcpy_fromio = spiderpci_memcpy_fromio, -}; - diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c deleted file mode 100644 index 11df737c8c6a..000000000000 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ /dev/null @@ -1,344 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * External Interrupt Controller on Spider South Bridge - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * Author: Arnd Bergmann <arndb@de.ibm.com> - */ - -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/ioport.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/pgtable.h> - -#include <asm/io.h> - -#include "interrupt.h" - -/* register layout taken from Spider spec, table 7.4-4 */ -enum { - TIR_DEN = 0x004, /* Detection Enable Register */ - TIR_MSK = 0x084, /* Mask Level Register */ - TIR_EDC = 0x0c0, /* Edge Detection Clear Register */ - TIR_PNDA = 0x100, /* Pending Register A */ - TIR_PNDB = 0x104, /* Pending Register B */ - TIR_CS = 0x144, /* Current Status Register */ - TIR_LCSA = 0x150, /* Level Current Status Register A */ - TIR_LCSB = 0x154, /* Level Current Status Register B */ - TIR_LCSC = 0x158, /* Level Current Status Register C */ - TIR_LCSD = 0x15c, /* Level Current Status Register D */ - TIR_CFGA = 0x200, /* Setting Register A0 */ - TIR_CFGB = 0x204, /* Setting Register B0 */ - /* 0x208 ... 0x3ff Setting Register An/Bn */ - TIR_PPNDA = 0x400, /* Packet Pending Register A */ - TIR_PPNDB = 0x404, /* Packet Pending Register B */ - TIR_PIERA = 0x408, /* Packet Output Error Register A */ - TIR_PIERB = 0x40c, /* Packet Output Error Register B */ - TIR_PIEN = 0x444, /* Packet Output Enable Register */ - TIR_PIPND = 0x454, /* Packet Output Pending Register */ - TIRDID = 0x484, /* Spider Device ID Register */ - REISTIM = 0x500, /* Reissue Command Timeout Time Setting */ - REISTIMEN = 0x504, /* Reissue Command Timeout Setting */ - REISWAITEN = 0x508, /* Reissue Wait Control*/ -}; - -#define SPIDER_CHIP_COUNT 4 -#define SPIDER_SRC_COUNT 64 -#define SPIDER_IRQ_INVALID 63 - -struct spider_pic { - struct irq_domain *host; - void __iomem *regs; - unsigned int node_id; -}; -static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; - -static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d) -{ - return irq_data_get_irq_chip_data(d); -} - -static void __iomem *spider_get_irq_config(struct spider_pic *pic, - unsigned int src) -{ - return pic->regs + TIR_CFGA + 8 * src; -} - -static void spider_unmask_irq(struct irq_data *d) -{ - struct spider_pic *pic = spider_irq_data_to_pic(d); - void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); - - out_be32(cfg, in_be32(cfg) | 0x30000000u); -} - -static void spider_mask_irq(struct irq_data *d) -{ - struct spider_pic *pic = spider_irq_data_to_pic(d); - void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); - - out_be32(cfg, in_be32(cfg) & ~0x30000000u); -} - -static void spider_ack_irq(struct irq_data *d) -{ - struct spider_pic *pic = spider_irq_data_to_pic(d); - unsigned int src = irqd_to_hwirq(d); - - /* Reset edge detection logic if necessary - */ - if (irqd_is_level_type(d)) - return; - - /* Only interrupts 47 to 50 can be set to edge */ - if (src < 47 || src > 50) - return; - - /* Perform the clear of the edge logic */ - out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); -} - -static int spider_set_irq_type(struct irq_data *d, unsigned int type) -{ - unsigned int sense = type & IRQ_TYPE_SENSE_MASK; - struct spider_pic *pic = spider_irq_data_to_pic(d); - unsigned int hw = irqd_to_hwirq(d); - void __iomem *cfg = spider_get_irq_config(pic, hw); - u32 old_mask; - u32 ic; - - /* Note that only level high is supported for most interrupts */ - if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH && - (hw < 47 || hw > 50)) - return -EINVAL; - - /* Decode sense type */ - switch(sense) { - case IRQ_TYPE_EDGE_RISING: - ic = 0x3; - break; - case IRQ_TYPE_EDGE_FALLING: - ic = 0x2; - break; - case IRQ_TYPE_LEVEL_LOW: - ic = 0x0; - break; - case IRQ_TYPE_LEVEL_HIGH: - case IRQ_TYPE_NONE: - ic = 0x1; - break; - default: - return -EINVAL; - } - - /* Configure the source. One gross hack that was there before and - * that I've kept around is the priority to the BE which I set to - * be the same as the interrupt source number. I don't know whether - * that's supposed to make any kind of sense however, we'll have to - * decide that, but for now, I'm not changing the behaviour. - */ - old_mask = in_be32(cfg) & 0x30000000u; - out_be32(cfg, old_mask | (ic << 24) | (0x7 << 16) | - (pic->node_id << 4) | 0xe); - out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff)); - - return 0; -} - -static struct irq_chip spider_pic = { - .name = "SPIDER", - .irq_unmask = spider_unmask_irq, - .irq_mask = spider_mask_irq, - .irq_ack = spider_ack_irq, - .irq_set_type = spider_set_irq_type, -}; - -static int spider_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - irq_set_chip_data(virq, h->host_data); - irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); - - /* Set default irq type */ - irq_set_irq_type(virq, IRQ_TYPE_NONE); - - return 0; -} - -static int spider_host_xlate(struct irq_domain *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_flags) - -{ - /* Spider interrupts have 2 cells, first is the interrupt source, - * second, well, I don't know for sure yet ... We mask the top bits - * because old device-trees encode a node number in there - */ - *out_hwirq = intspec[0] & 0x3f; - *out_flags = IRQ_TYPE_LEVEL_HIGH; - return 0; -} - -static const struct irq_domain_ops spider_host_ops = { - .map = spider_host_map, - .xlate = spider_host_xlate, -}; - -static void spider_irq_cascade(struct irq_desc *desc) -{ - struct irq_chip *chip = irq_desc_get_chip(desc); - struct spider_pic *pic = irq_desc_get_handler_data(desc); - unsigned int cs; - - cs = in_be32(pic->regs + TIR_CS) >> 24; - if (cs != SPIDER_IRQ_INVALID) - generic_handle_domain_irq(pic->host, cs); - - chip->irq_eoi(&desc->irq_data); -} - -/* For hooking up the cascade we have a problem. Our device-tree is - * crap and we don't know on which BE iic interrupt we are hooked on at - * least not the "standard" way. We can reconstitute it based on two - * informations though: which BE node we are connected to and whether - * we are connected to IOIF0 or IOIF1. Right now, we really only care - * about the IBM cell blade and we know that its firmware gives us an - * interrupt-map property which is pretty strange. - */ -static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) -{ - unsigned int virq; - const u32 *imap, *tmp; - int imaplen, intsize, unit; - struct device_node *iic; - struct device_node *of_node; - - of_node = irq_domain_get_of_node(pic->host); - - /* First, we check whether we have a real "interrupts" in the device - * tree in case the device-tree is ever fixed - */ - virq = irq_of_parse_and_map(of_node, 0); - if (virq) - return virq; - - /* Now do the horrible hacks */ - tmp = of_get_property(of_node, "#interrupt-cells", NULL); - if (tmp == NULL) - return 0; - intsize = *tmp; - imap = of_get_property(of_node, "interrupt-map", &imaplen); - if (imap == NULL || imaplen < (intsize + 1)) - return 0; - iic = of_find_node_by_phandle(imap[intsize]); - if (iic == NULL) - return 0; - imap += intsize + 1; - tmp = of_get_property(iic, "#interrupt-cells", NULL); - if (tmp == NULL) { - of_node_put(iic); - return 0; - } - intsize = *tmp; - /* Assume unit is last entry of interrupt specifier */ - unit = imap[intsize - 1]; - /* Ok, we have a unit, now let's try to get the node */ - tmp = of_get_property(iic, "ibm,interrupt-server-ranges", NULL); - if (tmp == NULL) { - of_node_put(iic); - return 0; - } - /* ugly as hell but works for now */ - pic->node_id = (*tmp) >> 1; - of_node_put(iic); - - /* Ok, now let's get cracking. You may ask me why I just didn't match - * the iic host from the iic OF node, but that way I'm still compatible - * with really really old old firmwares for which we don't have a node - */ - /* Manufacture an IIC interrupt number of class 2 */ - virq = irq_create_mapping(NULL, - (pic->node_id << IIC_IRQ_NODE_SHIFT) | - (2 << IIC_IRQ_CLASS_SHIFT) | - unit); - if (!virq) - printk(KERN_ERR "spider_pic: failed to map cascade !"); - return virq; -} - - -static void __init spider_init_one(struct device_node *of_node, int chip, - unsigned long addr) -{ - struct spider_pic *pic = &spider_pics[chip]; - int i, virq; - - /* Map registers */ - pic->regs = ioremap(addr, 0x1000); - if (pic->regs == NULL) - panic("spider_pic: can't map registers !"); - - /* Allocate a host */ - pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT, - &spider_host_ops, pic); - if (pic->host == NULL) - panic("spider_pic: can't allocate irq host !"); - - /* Go through all sources and disable them */ - for (i = 0; i < SPIDER_SRC_COUNT; i++) { - void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i; - out_be32(cfg, in_be32(cfg) & ~0x30000000u); - } - - /* do not mask any interrupts because of level */ - out_be32(pic->regs + TIR_MSK, 0x0); - - /* enable interrupt packets to be output */ - out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1); - - /* Hook up the cascade interrupt to the iic and nodeid */ - virq = spider_find_cascade_and_node(pic); - if (!virq) - return; - irq_set_handler_data(virq, pic); - irq_set_chained_handler(virq, spider_irq_cascade); - - printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %pOF\n", - pic->node_id, addr, of_node); - - /* Enable the interrupt detection enable bit. Do this last! */ - out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1); -} - -void __init spider_init_IRQ(void) -{ - struct resource r; - struct device_node *dn; - int chip = 0; - - /* XXX node numbers are totally bogus. We _hope_ we get the device - * nodes in the right order here but that's definitely not guaranteed, - * we need to get the node from the device tree instead. - * There is currently no proper property for it (but our whole - * device-tree is bogus anyway) so all we can do is pray or maybe test - * the address and deduce the node-id - */ - for_each_node_by_name(dn, "interrupt-controller") { - if (of_device_is_compatible(dn, "CBEA,platform-spider-pic")) { - if (of_address_to_resource(dn, 0, &r)) { - printk(KERN_WARNING "spider-pic: Failed\n"); - continue; - } - } else if (of_device_is_compatible(dn, "sti,platform-spider-pic") - && (chip < 2)) { - static long hard_coded_pics[] = - { 0x24000008000ul, 0x34000008000ul}; - r.start = hard_coded_pics[chip]; - } else - continue; - spider_init_one(dn, chip++, r.start); - } -} diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index dea6f0f25897..2c07387201d0 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -23,7 +23,6 @@ #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/spu_csa.h> -#include <asm/xmon.h> #include <asm/kexec.h> const struct spu_management_ops *spu_management_ops; @@ -772,7 +771,6 @@ static int __init init_spu_base(void) fb_append_extra_logo(&logo_spe_clut224, ret); mutex_lock(&spu_full_list_mutex); - xmon_register_spus(&spu_full_list); crash_register_spus(&spu_full_list); mutex_unlock(&spu_full_list_mutex); spu_add_dev_attr(&dev_attr_stat); diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c deleted file mode 100644 index f464a1f2e568..000000000000 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ /dev/null @@ -1,530 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * spu management operations for of based platforms - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * Copyright 2006 Sony Corp. - * (C) Copyright 2007 TOSHIBA CORPORATION - */ - -#include <linux/interrupt.h> -#include <linux/list.h> -#include <linux/export.h> -#include <linux/ptrace.h> -#include <linux/wait.h> -#include <linux/mm.h> -#include <linux/io.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> - -#include <asm/spu.h> -#include <asm/spu_priv1.h> -#include <asm/firmware.h> - -#include "spufs/spufs.h" -#include "interrupt.h" -#include "spu_priv1_mmio.h" - -struct device_node *spu_devnode(struct spu *spu) -{ - return spu->devnode; -} - -EXPORT_SYMBOL_GPL(spu_devnode); - -static u64 __init find_spu_unit_number(struct device_node *spe) -{ - const unsigned int *prop; - int proplen; - - /* new device trees should provide the physical-id attribute */ - prop = of_get_property(spe, "physical-id", &proplen); - if (proplen == 4) - return (u64)*prop; - - /* celleb device tree provides the unit-id */ - prop = of_get_property(spe, "unit-id", &proplen); - if (proplen == 4) - return (u64)*prop; - - /* legacy device trees provide the id in the reg attribute */ - prop = of_get_property(spe, "reg", &proplen); - if (proplen == 4) - return (u64)*prop; - - return 0; -} - -static void spu_unmap(struct spu *spu) -{ - if (!firmware_has_feature(FW_FEATURE_LPAR)) - iounmap(spu->priv1); - iounmap(spu->priv2); - iounmap(spu->problem); - iounmap((__force u8 __iomem *)spu->local_store); -} - -static int __init spu_map_interrupts_old(struct spu *spu, - struct device_node *np) -{ - unsigned int isrc; - const u32 *tmp; - int nid; - - /* Get the interrupt source unit from the device-tree */ - tmp = of_get_property(np, "isrc", NULL); - if (!tmp) - return -ENODEV; - isrc = tmp[0]; - - tmp = of_get_property(np->parent->parent, "node-id", NULL); - if (!tmp) { - printk(KERN_WARNING "%s: can't find node-id\n", __func__); - nid = spu->node; - } else - nid = tmp[0]; - - /* Add the node number */ - isrc |= nid << IIC_IRQ_NODE_SHIFT; - - /* Now map interrupts of all 3 classes */ - spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc); - spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc); - spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc); - - /* Right now, we only fail if class 2 failed */ - if (!spu->irqs[2]) - return -EINVAL; - - return 0; -} - -static void __iomem * __init spu_map_prop_old(struct spu *spu, - struct device_node *n, - const char *name) -{ - const struct address_prop { - unsigned long address; - unsigned int len; - } __attribute__((packed)) *prop; - int proplen; - - prop = of_get_property(n, name, &proplen); - if (prop == NULL || proplen != sizeof (struct address_prop)) - return NULL; - - return ioremap(prop->address, prop->len); -} - -static int __init spu_map_device_old(struct spu *spu) -{ - struct device_node *node = spu->devnode; - const char *prop; - int ret; - - ret = -ENODEV; - spu->name = of_get_property(node, "name", NULL); - if (!spu->name) - goto out; - - prop = of_get_property(node, "local-store", NULL); - if (!prop) - goto out; - spu->local_store_phys = *(unsigned long *)prop; - - /* we use local store as ram, not io memory */ - spu->local_store = (void __force *) - spu_map_prop_old(spu, node, "local-store"); - if (!spu->local_store) - goto out; - - prop = of_get_property(node, "problem", NULL); - if (!prop) - goto out_unmap; - spu->problem_phys = *(unsigned long *)prop; - - spu->problem = spu_map_prop_old(spu, node, "problem"); - if (!spu->problem) - goto out_unmap; - - spu->priv2 = spu_map_prop_old(spu, node, "priv2"); - if (!spu->priv2) - goto out_unmap; - - if (!firmware_has_feature(FW_FEATURE_LPAR)) { - spu->priv1 = spu_map_prop_old(spu, node, "priv1"); - if (!spu->priv1) - goto out_unmap; - } - - ret = 0; - goto out; - -out_unmap: - spu_unmap(spu); -out: - return ret; -} - -static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) -{ - int i; - - for (i=0; i < 3; i++) { - spu->irqs[i] = irq_of_parse_and_map(np, i); - if (!spu->irqs[i]) - goto err; - } - return 0; - -err: - pr_debug("failed to map irq %x for spu %s\n", i, spu->name); - for (; i >= 0; i--) { - if (spu->irqs[i]) - irq_dispose_mapping(spu->irqs[i]); - } - return -EINVAL; -} - -static int __init spu_map_resource(struct spu *spu, int nr, - void __iomem** virt, unsigned long *phys) -{ - struct device_node *np = spu->devnode; - struct resource resource = { }; - unsigned long len; - int ret; - - ret = of_address_to_resource(np, nr, &resource); - if (ret) - return ret; - if (phys) - *phys = resource.start; - len = resource_size(&resource); - *virt = ioremap(resource.start, len); - if (!*virt) - return -EINVAL; - return 0; -} - -static int __init spu_map_device(struct spu *spu) -{ - struct device_node *np = spu->devnode; - int ret = -ENODEV; - - spu->name = of_get_property(np, "name", NULL); - if (!spu->name) - goto out; - - ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store, - &spu->local_store_phys); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 0\n", - np); - goto out; - } - ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem, - &spu->problem_phys); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 1\n", - np); - goto out_unmap; - } - ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 2\n", - np); - goto out_unmap; - } - if (!firmware_has_feature(FW_FEATURE_LPAR)) - ret = spu_map_resource(spu, 3, - (void __iomem**)&spu->priv1, NULL); - if (ret) { - pr_debug("spu_new: failed to map %pOF resource 3\n", - np); - goto out_unmap; - } - pr_debug("spu_new: %pOF maps:\n", np); - pr_debug(" local store : 0x%016lx -> 0x%p\n", - spu->local_store_phys, spu->local_store); - pr_debug(" problem state : 0x%016lx -> 0x%p\n", - spu->problem_phys, spu->problem); - pr_debug(" priv2 : 0x%p\n", spu->priv2); - pr_debug(" priv1 : 0x%p\n", spu->priv1); - - return 0; - -out_unmap: - spu_unmap(spu); -out: - pr_debug("failed to map spe %s: %d\n", spu->name, ret); - return ret; -} - -static int __init of_enumerate_spus(int (*fn)(void *data)) -{ - int ret; - struct device_node *node; - unsigned int n = 0; - - ret = -ENODEV; - for_each_node_by_type(node, "spe") { - ret = fn(node); - if (ret) { - printk(KERN_WARNING "%s: Error initializing %pOFn\n", - __func__, node); - of_node_put(node); - break; - } - n++; - } - return ret ? ret : n; -} - -static int __init of_create_spu(struct spu *spu, void *data) -{ - int ret; - struct device_node *spe = (struct device_node *)data; - static int legacy_map = 0, legacy_irq = 0; - - spu->devnode = of_node_get(spe); - spu->spe_id = find_spu_unit_number(spe); - - spu->node = of_node_to_nid(spe); - if (spu->node >= MAX_NUMNODES) { - printk(KERN_WARNING "SPE %pOF on node %d ignored," - " node number too big\n", spe, spu->node); - printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); - ret = -ENODEV; - goto out; - } - - ret = spu_map_device(spu); - if (ret) { - if (!legacy_map) { - legacy_map = 1; - printk(KERN_WARNING "%s: Legacy device tree found, " - "trying to map old style\n", __func__); - } - ret = spu_map_device_old(spu); - if (ret) { - printk(KERN_ERR "Unable to map %s\n", - spu->name); - goto out; - } - } - - ret = spu_map_interrupts(spu, spe); - if (ret) { - if (!legacy_irq) { - legacy_irq = 1; - printk(KERN_WARNING "%s: Legacy device tree found, " - "trying old style irq\n", __func__); - } - ret = spu_map_interrupts_old(spu, spe); - if (ret) { - printk(KERN_ERR "%s: could not map interrupts\n", - spu->name); - goto out_unmap; - } - } - - pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name, - spu->local_store, spu->problem, spu->priv1, - spu->priv2, spu->number); - goto out; - -out_unmap: - spu_unmap(spu); -out: - return ret; -} - -static int of_destroy_spu(struct spu *spu) -{ - spu_unmap(spu); - of_node_put(spu->devnode); - return 0; -} - -static void enable_spu_by_master_run(struct spu_context *ctx) -{ - ctx->ops->master_start(ctx); -} - -static void disable_spu_by_master_run(struct spu_context *ctx) -{ - ctx->ops->master_stop(ctx); -} - -/* Hardcoded affinity idxs for qs20 */ -#define QS20_SPES_PER_BE 8 -static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; -static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 }; - -static struct spu *__init spu_lookup_reg(int node, u32 reg) -{ - struct spu *spu; - const u32 *spu_reg; - - list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { - spu_reg = of_get_property(spu_devnode(spu), "reg", NULL); - if (*spu_reg == reg) - return spu; - } - return NULL; -} - -static void __init init_affinity_qs20_harcoded(void) -{ - int node, i; - struct spu *last_spu, *spu; - u32 reg; - - for (node = 0; node < MAX_NUMNODES; node++) { - last_spu = NULL; - for (i = 0; i < QS20_SPES_PER_BE; i++) { - reg = qs20_reg_idxs[i]; - spu = spu_lookup_reg(node, reg); - if (!spu) - continue; - spu->has_mem_affinity = qs20_reg_memory[reg]; - if (last_spu) - list_add_tail(&spu->aff_list, - &last_spu->aff_list); - last_spu = spu; - } - } -} - -static int __init of_has_vicinity(void) -{ - struct device_node *dn; - - for_each_node_by_type(dn, "spe") { - if (of_property_present(dn, "vicinity")) { - of_node_put(dn); - return 1; - } - } - return 0; -} - -static struct spu *__init devnode_spu(int cbe, struct device_node *dn) -{ - struct spu *spu; - - list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) - if (spu_devnode(spu) == dn) - return spu; - return NULL; -} - -static struct spu * __init -neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid) -{ - struct spu *spu; - struct device_node *spu_dn; - const phandle *vic_handles; - int lenp, i; - - list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) { - spu_dn = spu_devnode(spu); - if (spu_dn == avoid) - continue; - vic_handles = of_get_property(spu_dn, "vicinity", &lenp); - for (i=0; i < (lenp / sizeof(phandle)); i++) { - if (vic_handles[i] == target->phandle) - return spu; - } - } - return NULL; -} - -static void __init init_affinity_node(int cbe) -{ - struct spu *spu, *last_spu; - struct device_node *vic_dn, *last_spu_dn; - phandle avoid_ph; - const phandle *vic_handles; - int lenp, i, added; - - last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu, - cbe_list); - avoid_ph = 0; - for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) { - last_spu_dn = spu_devnode(last_spu); - vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp); - - /* - * Walk through each phandle in vicinity property of the spu - * (typically two vicinity phandles per spe node) - */ - for (i = 0; i < (lenp / sizeof(phandle)); i++) { - if (vic_handles[i] == avoid_ph) - continue; - - vic_dn = of_find_node_by_phandle(vic_handles[i]); - if (!vic_dn) - continue; - - if (of_node_name_eq(vic_dn, "spe") ) { - spu = devnode_spu(cbe, vic_dn); - avoid_ph = last_spu_dn->phandle; - } else { - /* - * "mic-tm" and "bif0" nodes do not have - * vicinity property. So we need to find the - * spe which has vic_dn as neighbour, but - * skipping the one we came from (last_spu_dn) - */ - spu = neighbour_spu(cbe, vic_dn, last_spu_dn); - if (!spu) - continue; - if (of_node_name_eq(vic_dn, "mic-tm")) { - last_spu->has_mem_affinity = 1; - spu->has_mem_affinity = 1; - } - avoid_ph = vic_dn->phandle; - } - - of_node_put(vic_dn); - - list_add_tail(&spu->aff_list, &last_spu->aff_list); - last_spu = spu; - break; - } - } -} - -static void __init init_affinity_fw(void) -{ - int cbe; - - for (cbe = 0; cbe < MAX_NUMNODES; cbe++) - init_affinity_node(cbe); -} - -static int __init init_affinity(void) -{ - if (of_has_vicinity()) { - init_affinity_fw(); - } else { - if (of_machine_is_compatible("IBM,CPBW-1.0")) - init_affinity_qs20_harcoded(); - else - printk("No affinity configuration found\n"); - } - - return 0; -} - -const struct spu_management_ops spu_management_of_ops = { - .enumerate_spus = of_enumerate_spus, - .create_spu = of_create_spu, - .destroy_spu = of_destroy_spu, - .enable_spu = enable_spu_by_master_run, - .disable_spu = disable_spu_by_master_run, - .init_affinity = init_affinity, -}; diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c deleted file mode 100644 index d150e3987304..000000000000 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c +++ /dev/null @@ -1,167 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * spu hypervisor abstraction for direct hardware access. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * Copyright 2006 Sony Corp. - */ - -#include <linux/interrupt.h> -#include <linux/list.h> -#include <linux/ptrace.h> -#include <linux/wait.h> -#include <linux/mm.h> -#include <linux/io.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <linux/sched.h> - -#include <asm/spu.h> -#include <asm/spu_priv1.h> -#include <asm/firmware.h> - -#include "interrupt.h" -#include "spu_priv1_mmio.h" - -static void int_mask_and(struct spu *spu, int class, u64 mask) -{ - u64 old_mask; - - old_mask = in_be64(&spu->priv1->int_mask_RW[class]); - out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask); -} - -static void int_mask_or(struct spu *spu, int class, u64 mask) -{ - u64 old_mask; - - old_mask = in_be64(&spu->priv1->int_mask_RW[class]); - out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask); -} - -static void int_mask_set(struct spu *spu, int class, u64 mask) -{ - out_be64(&spu->priv1->int_mask_RW[class], mask); -} - -static u64 int_mask_get(struct spu *spu, int class) -{ - return in_be64(&spu->priv1->int_mask_RW[class]); -} - -static void int_stat_clear(struct spu *spu, int class, u64 stat) -{ - out_be64(&spu->priv1->int_stat_RW[class], stat); -} - -static u64 int_stat_get(struct spu *spu, int class) -{ - return in_be64(&spu->priv1->int_stat_RW[class]); -} - -static void cpu_affinity_set(struct spu *spu, int cpu) -{ - u64 target; - u64 route; - - if (nr_cpus_node(spu->node)) { - const struct cpumask *spumask = cpumask_of_node(spu->node), - *cpumask = cpumask_of_node(cpu_to_node(cpu)); - - if (!cpumask_intersects(spumask, cpumask)) - return; - } - - target = iic_get_target_id(cpu); - route = target << 48 | target << 32 | target << 16; - out_be64(&spu->priv1->int_route_RW, route); -} - -static u64 mfc_dar_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_dar_RW); -} - -static u64 mfc_dsisr_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_dsisr_RW); -} - -static void mfc_dsisr_set(struct spu *spu, u64 dsisr) -{ - out_be64(&spu->priv1->mfc_dsisr_RW, dsisr); -} - -static void mfc_sdr_setup(struct spu *spu) -{ - out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); -} - -static void mfc_sr1_set(struct spu *spu, u64 sr1) -{ - out_be64(&spu->priv1->mfc_sr1_RW, sr1); -} - -static u64 mfc_sr1_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_sr1_RW); -} - -static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) -{ - out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id); -} - -static u64 mfc_tclass_id_get(struct spu *spu) -{ - return in_be64(&spu->priv1->mfc_tclass_id_RW); -} - -static void tlb_invalidate(struct spu *spu) -{ - out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul); -} - -static void resource_allocation_groupID_set(struct spu *spu, u64 id) -{ - out_be64(&spu->priv1->resource_allocation_groupID_RW, id); -} - -static u64 resource_allocation_groupID_get(struct spu *spu) -{ - return in_be64(&spu->priv1->resource_allocation_groupID_RW); -} - -static void resource_allocation_enable_set(struct spu *spu, u64 enable) -{ - out_be64(&spu->priv1->resource_allocation_enable_RW, enable); -} - -static u64 resource_allocation_enable_get(struct spu *spu) -{ - return in_be64(&spu->priv1->resource_allocation_enable_RW); -} - -const struct spu_priv1_ops spu_priv1_mmio_ops = -{ - .int_mask_and = int_mask_and, - .int_mask_or = int_mask_or, - .int_mask_set = int_mask_set, - .int_mask_get = int_mask_get, - .int_stat_clear = int_stat_clear, - .int_stat_get = int_stat_get, - .cpu_affinity_set = cpu_affinity_set, - .mfc_dar_get = mfc_dar_get, - .mfc_dsisr_get = mfc_dsisr_get, - .mfc_dsisr_set = mfc_dsisr_set, - .mfc_sdr_setup = mfc_sdr_setup, - .mfc_sr1_set = mfc_sr1_set, - .mfc_sr1_get = mfc_sr1_get, - .mfc_tclass_id_set = mfc_tclass_id_set, - .mfc_tclass_id_get = mfc_tclass_id_get, - .tlb_invalidate = tlb_invalidate, - .resource_allocation_groupID_set = resource_allocation_groupID_set, - .resource_allocation_groupID_get = resource_allocation_groupID_get, - .resource_allocation_enable_set = resource_allocation_enable_set, - .resource_allocation_enable_get = resource_allocation_enable_get, -}; diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h deleted file mode 100644 index 04f0db339dc1..000000000000 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * spu hypervisor abstraction for direct hardware access. - * - * Copyright (C) 2006 Sony Computer Entertainment Inc. - * Copyright 2006 Sony Corp. - */ - -#ifndef SPU_PRIV1_MMIO_H -#define SPU_PRIV1_MMIO_H - -struct device_node *spu_devnode(struct spu *spu); - -#endif /* SPU_PRIV1_MMIO_H */ diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c index 87ad7d563cfa..000894e07b02 100644 --- a/arch/powerpc/platforms/cell/spu_syscalls.c +++ b/arch/powerpc/platforms/cell/spu_syscalls.c @@ -36,6 +36,9 @@ static inline struct spufs_calls *spufs_calls_get(void) static inline void spufs_calls_put(struct spufs_calls *calls) { + if (!calls) + return; + BUG_ON(calls != spufs_calls); /* we don't need to rcu this, as we hold a reference to the module */ @@ -53,82 +56,55 @@ static inline void spufs_calls_put(struct spufs_calls *calls) { } #endif /* CONFIG_SPU_FS_MODULE */ +DEFINE_CLASS(spufs_calls, struct spufs_calls *, spufs_calls_put(_T), spufs_calls_get(), void) + SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags, umode_t, mode, int, neighbor_fd) { - long ret; - struct spufs_calls *calls; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return -ENOSYS; if (flags & SPU_CREATE_AFFINITY_SPU) { - struct fd neighbor = fdget(neighbor_fd); - ret = -EBADF; - if (neighbor.file) { - ret = calls->create_thread(name, flags, mode, neighbor.file); - fdput(neighbor); - } - } else - ret = calls->create_thread(name, flags, mode, NULL); - - spufs_calls_put(calls); - return ret; + CLASS(fd, neighbor)(neighbor_fd); + if (fd_empty(neighbor)) + return -EBADF; + return calls->create_thread(name, flags, mode, fd_file(neighbor)); + } else { + return calls->create_thread(name, flags, mode, NULL); + } } SYSCALL_DEFINE3(spu_run,int, fd, __u32 __user *, unpc, __u32 __user *, ustatus) { - long ret; - struct fd arg; - struct spufs_calls *calls; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return -ENOSYS; - ret = -EBADF; - arg = fdget(fd); - if (arg.file) { - ret = calls->spu_run(arg.file, unpc, ustatus); - fdput(arg); - } + CLASS(fd, arg)(fd); + if (fd_empty(arg)) + return -EBADF; - spufs_calls_put(calls); - return ret; + return calls->spu_run(fd_file(arg), unpc, ustatus); } #ifdef CONFIG_COREDUMP int elf_coredump_extra_notes_size(void) { - struct spufs_calls *calls; - int ret; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return 0; - ret = calls->coredump_extra_notes_size(); - - spufs_calls_put(calls); - - return ret; + return calls->coredump_extra_notes_size(); } int elf_coredump_extra_notes_write(struct coredump_params *cprm) { - struct spufs_calls *calls; - int ret; - - calls = spufs_calls_get(); + CLASS(spufs_calls, calls)(); if (!calls) return 0; - ret = calls->coredump_extra_notes_write(cprm); - - spufs_calls_put(calls); - - return ret; + return calls->coredump_extra_notes_write(cprm); } #endif diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 18daafbe2e65..301ee7d8b7df 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -73,9 +73,7 @@ static struct spu_context *coredump_next_context(int *fd) return NULL; *fd = n - 1; - rcu_read_lock(); - file = lookup_fdget_rcu(*fd); - rcu_read_unlock(); + file = fget_raw(*fd); if (file) { ctx = SPUFS_I(file_inode(file))->i_ctx; get_spu_context(ctx); diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 02a8158c469d..d5a2c77bc908 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -453,7 +453,6 @@ static const struct file_operations spufs_cntl_fops = { .release = spufs_cntl_release, .read = simple_attr_read, .write = simple_attr_write, - .llseek = no_llseek, .mmap = spufs_cntl_mmap, }; @@ -634,7 +633,6 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, static const struct file_operations spufs_mbox_fops = { .open = spufs_pipe_open, .read = spufs_mbox_read, - .llseek = no_llseek, }; static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, @@ -664,7 +662,6 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_mbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_mbox_stat_read, - .llseek = no_llseek, }; /* low-level ibox access function */ @@ -769,7 +766,6 @@ static const struct file_operations spufs_ibox_fops = { .open = spufs_pipe_open, .read = spufs_ibox_read, .poll = spufs_ibox_poll, - .llseek = no_llseek, }; static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, @@ -797,7 +793,6 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_ibox_stat_fops = { .open = spufs_pipe_open, .read = spufs_ibox_stat_read, - .llseek = no_llseek, }; /* low-level mailbox write */ @@ -901,7 +896,6 @@ static const struct file_operations spufs_wbox_fops = { .open = spufs_pipe_open, .write = spufs_wbox_write, .poll = spufs_wbox_poll, - .llseek = no_llseek, }; static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, @@ -929,7 +923,6 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, static const struct file_operations spufs_wbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_wbox_stat_read, - .llseek = no_llseek, }; static int spufs_signal1_open(struct inode *inode, struct file *file) @@ -1056,7 +1049,6 @@ static const struct file_operations spufs_signal1_fops = { .read = spufs_signal1_read, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, - .llseek = no_llseek, }; static const struct file_operations spufs_signal1_nosched_fops = { @@ -1064,7 +1056,6 @@ static const struct file_operations spufs_signal1_nosched_fops = { .release = spufs_signal1_release, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, - .llseek = no_llseek, }; static int spufs_signal2_open(struct inode *inode, struct file *file) @@ -1195,7 +1186,6 @@ static const struct file_operations spufs_signal2_fops = { .read = spufs_signal2_read, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, - .llseek = no_llseek, }; static const struct file_operations spufs_signal2_nosched_fops = { @@ -1203,7 +1193,6 @@ static const struct file_operations spufs_signal2_nosched_fops = { .release = spufs_signal2_release, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, - .llseek = no_llseek, }; /* @@ -1343,7 +1332,6 @@ static const struct file_operations spufs_mss_fops = { .open = spufs_mss_open, .release = spufs_mss_release, .mmap = spufs_mss_mmap, - .llseek = no_llseek, }; static vm_fault_t @@ -1401,7 +1389,6 @@ static const struct file_operations spufs_psmap_fops = { .open = spufs_psmap_open, .release = spufs_psmap_release, .mmap = spufs_psmap_mmap, - .llseek = no_llseek, }; @@ -1704,23 +1691,11 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id) ret = spu_acquire(ctx); if (ret) - goto out; -#if 0 -/* this currently hangs */ - ret = spufs_wait(ctx->mfc_wq, - ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); - if (ret) - goto out; - ret = spufs_wait(ctx->mfc_wq, - ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); - if (ret) - goto out; -#else - ret = 0; -#endif + return ret; + spu_release(ctx); -out: - return ret; + + return 0; } static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) @@ -1744,7 +1719,6 @@ static const struct file_operations spufs_mfc_fops = { .flush = spufs_mfc_flush, .fsync = spufs_mfc_fsync, .mmap = spufs_mfc_mmap, - .llseek = no_llseek, }; static int spufs_npc_set(void *data, u64 val) @@ -2114,7 +2088,6 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, static const struct file_operations spufs_dma_info_fops = { .open = spufs_info_open, .read = spufs_dma_info_read, - .llseek = no_llseek, }; static void spufs_get_proxydma_info(struct spu_context *ctx, @@ -2171,7 +2144,6 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, static const struct file_operations spufs_proxydma_info_fops = { .open = spufs_info_open, .read = spufs_proxydma_info_read, - .llseek = no_llseek, }; static int spufs_show_tid(struct seq_file *s, void *private) @@ -2454,7 +2426,6 @@ static const struct file_operations spufs_switch_log_fops = { .read = spufs_switch_log_read, .poll = spufs_switch_log_poll, .release = spufs_switch_log_release, - .llseek = no_llseek, }; /** diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c index 827d338deaf4..2c2999de6bfa 100644 --- a/arch/powerpc/platforms/cell/spufs/gang.c +++ b/arch/powerpc/platforms/cell/spufs/gang.c @@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void) mutex_init(&gang->aff_mutex); INIT_LIST_HEAD(&gang->list); INIT_LIST_HEAD(&gang->aff_list_head); + gang->alive = 1; out: return gang; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 030de2b8c145..9f9e4b871627 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir, return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); - if (ret) + if (ret) { + dput(dentry); return ret; + } files++; } return 0; } +static void unuse_gang(struct dentry *dir) +{ + struct inode *inode = dir->d_inode; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; + + if (gang) { + bool dead; + + inode_lock(inode); // exclusion with spufs_create_context() + dead = !--gang->alive; + inode_unlock(inode); + + if (dead) + simple_recursive_removal(dir, NULL); + } +} + static int spufs_dir_close(struct inode *inode, struct file *file) { struct inode *parent; @@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file) inode_unlock(parent); WARN_ON(ret); + unuse_gang(dir->d_parent); return dcache_dir_close(inode, file); } @@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, { int ret; int affinity; - struct spu_gang *gang; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry}; @@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) return -ENODEV; - gang = NULL; + if (gang) { + if (!gang->alive) + return -ENOENT; + gang->alive++; + } + neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { - gang = SPUFS_I(inode)->i_gang; if (!gang) return -EINVAL; mutex_lock(&gang->aff_mutex); @@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, } ret = spufs_mkdir(inode, dentry, flags, mode & 0777); - if (ret) + if (ret) { + if (neighbor) + put_spu_context(neighbor); goto out_aff_unlock; + } if (affinity) { spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx, @@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); + if (ret && gang) + gang->alive--; // can't reach 0 return ret; } @@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_fop = &simple_dir_operations; d_instantiate(dentry, inode); + dget(dentry); inc_nlink(dir); inc_nlink(d_inode(dentry)); return ret; @@ -492,6 +522,21 @@ out: return ret; } +static int spufs_gang_close(struct inode *inode, struct file *file) +{ + unuse_gang(file->f_path.dentry); + return dcache_dir_close(inode, file); +} + +static const struct file_operations spufs_gang_fops = { + .open = dcache_dir_open, + .release = spufs_gang_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .iterate_shared = dcache_readdir, + .fsync = noop_fsync, +}; + static int spufs_gang_open(const struct path *path) { int ret; @@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path) return PTR_ERR(filp); } - filp->f_op = &simple_dir_operations; + filp->f_op = &spufs_gang_fops; fd_install(ret, filp); return ret; } @@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode, ret = spufs_mkgang(inode, dentry, mode & 0777); if (!ret) { ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); - } + if (ret < 0) + unuse_gang(dentry); } return ret; } @@ -822,6 +865,7 @@ static void __exit spufs_exit(void) } module_exit(spufs_exit); +MODULE_DESCRIPTION("SPU file system"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 99bd027a7f7c..8e7ed010bfde 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -508,7 +508,7 @@ static void __spu_del_from_rq(struct spu_context *ctx) if (!list_empty(&ctx->rq)) { if (!--spu_prio->nr_waiting) - del_timer(&spusched_timer); + timer_delete(&spusched_timer); list_del_init(&ctx->rq); if (list_empty(&spu_prio->runq[prio])) @@ -868,7 +868,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) } /** - * spu_deactivate - unbind a context from it's physical spu + * spu_deactivate - unbind a context from its physical spu * @ctx: spu context to unbind * * Unbind @ctx from the physical spu it is running on and schedule @@ -1126,8 +1126,8 @@ void spu_sched_exit(void) remove_proc_entry("spu_loadavg", NULL); - del_timer_sync(&spusched_timer); - del_timer_sync(&spuloadavg_timer); + timer_delete_sync(&spusched_timer); + timer_delete_sync(&spuloadavg_timer); kthread_stop(spusched_task); for (node = 0; node < MAX_NUMNODES; node++) { diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 84958487f696..d33787c57c39 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -151,6 +151,8 @@ struct spu_gang { int aff_flags; struct spu *aff_ref_spu; atomic_t aff_sched_count; + + int alive; }; /* Flag bits for spu_gang aff_flags */ diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index 0eedae96498c..d3bf56a46656 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c @@ -92,4 +92,5 @@ void __init chrp_nvram_init(void) return; } +MODULE_DESCRIPTION("PPC NVRAM device driver"); MODULE_LICENSE("GPL v2"); diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c index 5c4f1a9ca154..6f4a41a9352a 100644 --- a/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/arch/powerpc/platforms/chrp/pegasos_eth.c @@ -14,7 +14,7 @@ #include <linux/ioport.h> #include <linux/device.h> #include <linux/platform_device.h> -#include <linux/mv643xx.h> +#include <linux/mv643xx_eth.h> #include <linux/pci.h> #define PEGASOS2_MARVELL_REGBASE (0xf1000000) @@ -25,12 +25,15 @@ #define PEGASOS2_SRAM_BASE_ETH_PORT0 (PEGASOS2_SRAM_BASE) #define PEGASOS2_SRAM_BASE_ETH_PORT1 (PEGASOS2_SRAM_BASE_ETH_PORT0 + (PEGASOS2_SRAM_SIZE / 2) ) - #define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4) #undef BE_VERBOSE +#define MV64340_BASE_ADDR_ENABLE 0x278 +#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268 +#define MV64340_SRAM_CONFIG 0x380 + static struct resource mv643xx_eth_shared_resources[] = { [0] = { .name = "ethernet shared base", diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 36ee3a5056a1..c1bfa4c3444c 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -486,7 +486,7 @@ static void __init chrp_find_8259(void) i8259_init(pic, chrp_int_ack); if (ppc_md.get_irq == NULL) { ppc_md.get_irq = i8259_irq; - irq_set_default_host(i8259_get_host()); + irq_set_default_domain(i8259_get_host()); } if (chrp_mpic != NULL) { cascade_irq = irq_of_parse_and_map(pic, 0); diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index 4d9200bdba78..91a8f0a7086e 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -149,8 +149,9 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np) __flipper_quiesce(io_base); - irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS, - &flipper_irq_domain_ops, io_base); + irq_domain = irq_domain_create_linear(of_fwnode_handle(np), + FLIPPER_NR_IRQS, + &flipper_irq_domain_ops, io_base); if (!irq_domain) { pr_err("failed to allocate irq_domain\n"); return NULL; @@ -172,7 +173,7 @@ unsigned int flipper_pic_get_irq(void) return 0; /* no more IRQs pending */ irq = __ffs(irq_status); - return irq_linear_revmap(flipper_irq_host, irq); + return irq_find_mapping(flipper_irq_host, irq); } /* @@ -190,7 +191,7 @@ void __init flipper_pic_probe(void) flipper_irq_host = flipper_pic_init(np); BUG_ON(!flipper_irq_host); - irq_set_default_host(flipper_irq_host); + irq_set_default_domain(flipper_irq_host); of_node_put(np); } diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 4d2d92de30af..b57e87b0b3ce 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -175,8 +175,9 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np) __hlwd_quiesce(io_base); - irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS, - &hlwd_irq_domain_ops, io_base); + irq_domain = irq_domain_create_linear(of_fwnode_handle(np), + HLWD_NR_IRQS, + &hlwd_irq_domain_ops, io_base); if (!irq_domain) { pr_err("failed to allocate irq_domain\n"); iounmap(io_base); @@ -189,7 +190,7 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np) unsigned int hlwd_pic_get_irq(void) { unsigned int hwirq = __hlwd_pic_get_irq(hlwd_irq_host); - return hwirq ? irq_linear_revmap(hlwd_irq_host, hwirq) : 0; + return hwirq ? irq_find_mapping(hlwd_irq_host, hwirq) : 0; } /* diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c index e265f026eee2..4012f206ec63 100644 --- a/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/initrd.h> #include <linux/of_platform.h> +#include <linux/seq_file.h> #include <asm/time.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index 00bec0f051be..5ca41972ef22 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c @@ -14,6 +14,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/seq_file.h> #include <asm/i8259.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig deleted file mode 100644 index 4c058cc57c90..000000000000 --- a/arch/powerpc/platforms/maple/Kconfig +++ /dev/null @@ -1,19 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config PPC_MAPLE - depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN - bool "Maple 970FX Evaluation Board" - select FORCE_PCI - select MPIC - select U3_DART - select MPIC_U3_HT_IRQS - select GENERIC_TBSYNC - select PPC_UDBG_16550 - select PPC_970_NAP - select PPC_64S_HASH_MMU - select PPC_HASH_MMU_NATIVE - select PPC_RTAS - select MMIO_NVRAM - select ATA_NONSTANDARD if ATA - help - This option enables support for the Maple 970FX Evaluation Board. - For more information, refer to <http://www.970eval.com> diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h deleted file mode 100644 index 4f358b55c341..000000000000 --- a/arch/powerpc/platforms/maple/maple.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Declarations for maple-specific code. - * - * Maple is the name of a PPC970 evaluation board. - */ -extern int maple_set_rtc_time(struct rtc_time *tm); -extern void maple_get_rtc_time(struct rtc_time *tm); -extern time64_t maple_get_boot_time(void); -extern void maple_calibrate_decr(void); -extern void maple_pci_init(void); -extern void maple_pci_irq_fixup(struct pci_dev *dev); -extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel); - -extern struct pci_controller_ops maple_pci_controller_ops; diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c deleted file mode 100644 index b911b31717cc..000000000000 --- a/arch/powerpc/platforms/maple/pci.c +++ /dev/null @@ -1,672 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), - * IBM Corp. - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/of_irq.h> - -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/pci-bridge.h> -#include <asm/machdep.h> -#include <asm/iommu.h> -#include <asm/ppc-pci.h> -#include <asm/isa-bridge.h> - -#include "maple.h" - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - -static struct pci_controller *u3_agp, *u3_ht, *u4_pcie; - -static int __init fixup_one_level_bus_range(struct device_node *node, int higher) -{ - for (; node; node = node->sibling) { - const int *bus_range; - const unsigned int *class_code; - int len; - - /* For PCI<->PCI bridges or CardBus bridges, we go down */ - class_code = of_get_property(node, "class-code", NULL); - if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) - continue; - bus_range = of_get_property(node, "bus-range", &len); - if (bus_range != NULL && len > 2 * sizeof(int)) { - if (bus_range[1] > higher) - higher = bus_range[1]; - } - higher = fixup_one_level_bus_range(node->child, higher); - } - return higher; -} - -/* This routine fixes the "bus-range" property of all bridges in the - * system since they tend to have their "last" member wrong on macs - * - * Note that the bus numbers manipulated here are OF bus numbers, they - * are not Linux bus numbers. - */ -static void __init fixup_bus_range(struct device_node *bridge) -{ - int *bus_range; - struct property *prop; - int len; - - /* Lookup the "bus-range" property for the hose */ - prop = of_find_property(bridge, "bus-range", &len); - if (prop == NULL || prop->value == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %pOF\n", - bridge); - return; - } - bus_range = prop->value; - bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); -} - - -static unsigned long u3_agp_cfa0(u8 devfn, u8 off) -{ - return (1 << (unsigned long)PCI_SLOT(devfn)) | - ((unsigned long)PCI_FUNC(devfn) << 8) | - ((unsigned long)off & 0xFCUL); -} - -static unsigned long u3_agp_cfa1(u8 bus, u8 devfn, u8 off) -{ - return ((unsigned long)bus << 16) | - ((unsigned long)devfn << 8) | - ((unsigned long)off & 0xFCUL) | - 1UL; -} - -static volatile void __iomem *u3_agp_cfg_access(struct pci_controller* hose, - u8 bus, u8 dev_fn, u8 offset) -{ - unsigned int caddr; - - if (bus == hose->first_busno) { - if (dev_fn < (11 << 3)) - return NULL; - caddr = u3_agp_cfa0(dev_fn, offset); - } else - caddr = u3_agp_cfa1(bus, dev_fn, offset); - - /* Uninorth will return garbage if we don't read back the value ! */ - do { - out_le32(hose->cfg_addr, caddr); - } while (in_le32(hose->cfg_addr) != caddr); - - offset &= 0x07; - return hose->cfg_data + offset; -} - -static int u3_agp_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - addr = u3_agp_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_le16(addr); - break; - default: - *val = in_le32(addr); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static int u3_agp_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - addr = u3_agp_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_le16(addr, val); - break; - default: - out_le32(addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops u3_agp_pci_ops = -{ - .read = u3_agp_read_config, - .write = u3_agp_write_config, -}; - -static unsigned long u3_ht_cfa0(u8 devfn, u8 off) -{ - return (devfn << 8) | off; -} - -static unsigned long u3_ht_cfa1(u8 bus, u8 devfn, u8 off) -{ - return u3_ht_cfa0(devfn, off) + (bus << 16) + 0x01000000UL; -} - -static volatile void __iomem *u3_ht_cfg_access(struct pci_controller* hose, - u8 bus, u8 devfn, u8 offset) -{ - if (bus == hose->first_busno) { - if (PCI_SLOT(devfn) == 0) - return NULL; - return hose->cfg_data + u3_ht_cfa0(devfn, offset); - } else - return hose->cfg_data + u3_ht_cfa1(bus, devfn, offset); -} - -static int u3_ht_root_read_config(struct pci_controller *hose, u8 offset, - int len, u32 *val) -{ - volatile void __iomem *addr; - - addr = hose->cfg_addr; - addr += ((offset & ~3) << 2) + (4 - len - (offset & 3)); - - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_be16(addr); - break; - default: - *val = in_be32(addr); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_root_write_config(struct pci_controller *hose, u8 offset, - int len, u32 val) -{ - volatile void __iomem *addr; - - addr = hose->cfg_addr + ((offset & ~3) << 2) + (4 - len - (offset & 3)); - - if (offset >= PCI_BASE_ADDRESS_0 && offset < PCI_CAPABILITY_LIST) - return PCIBIOS_SUCCESSFUL; - - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_be16(addr, val); - break; - default: - out_be32(addr, val); - break; - } - - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0)) - return u3_ht_root_read_config(hose, offset, len, val); - - if (offset > 0xff) - return PCIBIOS_BAD_REGISTER_NUMBER; - - addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_le16(addr); - break; - default: - *val = in_le32(addr); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0)) - return u3_ht_root_write_config(hose, offset, len, val); - - if (offset > 0xff) - return PCIBIOS_BAD_REGISTER_NUMBER; - - addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_le16(addr, val); - break; - default: - out_le32(addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops u3_ht_pci_ops = -{ - .read = u3_ht_read_config, - .write = u3_ht_write_config, -}; - -static unsigned int u4_pcie_cfa0(unsigned int devfn, unsigned int off) -{ - return (1 << PCI_SLOT(devfn)) | - (PCI_FUNC(devfn) << 8) | - ((off >> 8) << 28) | - (off & 0xfcu); -} - -static unsigned int u4_pcie_cfa1(unsigned int bus, unsigned int devfn, - unsigned int off) -{ - return (bus << 16) | - (devfn << 8) | - ((off >> 8) << 28) | - (off & 0xfcu) | 1u; -} - -static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose, - u8 bus, u8 dev_fn, int offset) -{ - unsigned int caddr; - - if (bus == hose->first_busno) - caddr = u4_pcie_cfa0(dev_fn, offset); - else - caddr = u4_pcie_cfa1(bus, dev_fn, offset); - - /* Uninorth will return garbage if we don't read back the value ! */ - do { - out_le32(hose->cfg_addr, caddr); - } while (in_le32(hose->cfg_addr) != caddr); - - offset &= 0x03; - return hose->cfg_data + offset; -} - -static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - if (offset >= 0x1000) - return PCIBIOS_BAD_REGISTER_NUMBER; - addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - *val = in_8(addr); - break; - case 2: - *val = in_le16(addr); - break; - default: - *val = in_le32(addr); - break; - } - return PCIBIOS_SUCCESSFUL; -} -static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - volatile void __iomem *addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - if (offset >= 0x1000) - return PCIBIOS_BAD_REGISTER_NUMBER; - addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset); - if (!addr) - return PCIBIOS_DEVICE_NOT_FOUND; - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - out_8(addr, val); - break; - case 2: - out_le16(addr, val); - break; - default: - out_le32(addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops u4_pcie_pci_ops = -{ - .read = u4_pcie_read_config, - .write = u4_pcie_write_config, -}; - -static void __init setup_u3_agp(struct pci_controller* hose) -{ - /* On G5, we move AGP up to high bus number so we don't need - * to reassign bus numbers for HT. If we ever have P2P bridges - * on AGP, we'll have to move pci_assign_all_buses to the - * pci_controller structure so we enable it for AGP and not for - * HT childs. - * We hard code the address because of the different size of - * the reg address cell, we shall fix that by killing struct - * reg_property and using some accessor functions instead - */ - hose->first_busno = 0xf0; - hose->last_busno = 0xff; - hose->ops = &u3_agp_pci_ops; - hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); - hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); - - u3_agp = hose; -} - -static void __init setup_u4_pcie(struct pci_controller* hose) -{ - /* We currently only implement the "non-atomic" config space, to - * be optimised later. - */ - hose->ops = &u4_pcie_pci_ops; - hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); - hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); - - u4_pcie = hose; -} - -static void __init setup_u3_ht(struct pci_controller* hose) -{ - hose->ops = &u3_ht_pci_ops; - - /* We hard code the address because of the different size of - * the reg address cell, we shall fix that by killing struct - * reg_property and using some accessor functions instead - */ - hose->cfg_data = ioremap(0xf2000000, 0x02000000); - hose->cfg_addr = ioremap(0xf8070000, 0x1000); - - hose->first_busno = 0; - hose->last_busno = 0xef; - - u3_ht = hose; -} - -static int __init maple_add_bridge(struct device_node *dev) -{ - int len; - struct pci_controller *hose; - char* disp_name; - const int *bus_range; - int primary = 1; - - DBG("Adding PCI host bridge %pOF\n", dev); - - bus_range = of_get_property(dev, "bus-range", &len); - if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %pOF, assume bus 0\n", - dev); - } - - hose = pcibios_alloc_controller(dev); - if (hose == NULL) - return -ENOMEM; - hose->first_busno = bus_range ? bus_range[0] : 0; - hose->last_busno = bus_range ? bus_range[1] : 0xff; - hose->controller_ops = maple_pci_controller_ops; - - disp_name = NULL; - if (of_device_is_compatible(dev, "u3-agp")) { - setup_u3_agp(hose); - disp_name = "U3-AGP"; - primary = 0; - } else if (of_device_is_compatible(dev, "u3-ht")) { - setup_u3_ht(hose); - disp_name = "U3-HT"; - primary = 1; - } else if (of_device_is_compatible(dev, "u4-pcie")) { - setup_u4_pcie(hose); - disp_name = "U4-PCIE"; - primary = 0; - } - printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n", - disp_name, hose->first_busno, hose->last_busno); - - /* Interpret the "ranges" property */ - /* This also maps the I/O region and sets isa_io/mem_base */ - pci_process_bridge_OF_ranges(hose, dev, primary); - - /* Fixup "bus-range" OF property */ - fixup_bus_range(dev); - - /* Check for legacy IOs */ - isa_bridge_find_early(hose); - - /* create pci_dn's for DT nodes under this PHB */ - pci_devs_phb_init_dynamic(hose); - - return 0; -} - - -void maple_pci_irq_fixup(struct pci_dev *dev) -{ - DBG(" -> maple_pci_irq_fixup\n"); - - /* Fixup IRQ for PCIe host */ - if (u4_pcie != NULL && dev->bus->number == 0 && - pci_bus_to_host(dev->bus) == u4_pcie) { - printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); - dev->irq = irq_create_mapping(NULL, 1); - if (dev->irq) - irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); - } - - /* Hide AMD8111 IDE interrupt when in legacy mode so - * the driver calls pci_get_legacy_ide_irq() - */ - if (dev->vendor == PCI_VENDOR_ID_AMD && - dev->device == PCI_DEVICE_ID_AMD_8111_IDE && - (dev->class & 5) != 5) { - dev->irq = 0; - } - - DBG(" <- maple_pci_irq_fixup\n"); -} - -static int maple_pci_root_bridge_prepare(struct pci_host_bridge *bridge) -{ - struct pci_controller *hose = pci_bus_to_host(bridge->bus); - struct device_node *np, *child; - - if (hose != u3_agp) - return 0; - - /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We - * assume there is no P2P bridge on the AGP bus, which should be a - * safe assumptions hopefully. - */ - np = hose->dn; - PCI_DN(np)->busno = 0xf0; - for_each_child_of_node(np, child) - PCI_DN(child)->busno = 0xf0; - - return 0; -} - -void __init maple_pci_init(void) -{ - struct device_node *np, *root; - struct device_node *ht = NULL; - - /* Probe root PCI hosts, that is on U3 the AGP host and the - * HyperTransport host. That one is actually "kept" around - * and actually added last as it's resource management relies - * on the AGP resources to have been setup first - */ - root = of_find_node_by_path("/"); - if (root == NULL) { - printk(KERN_CRIT "maple_find_bridges: can't find root of device tree\n"); - return; - } - for_each_child_of_node(root, np) { - if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "ht")) - continue; - if ((of_device_is_compatible(np, "u4-pcie") || - of_device_is_compatible(np, "u3-agp")) && - maple_add_bridge(np) == 0) - of_node_get(np); - - if (of_device_is_compatible(np, "u3-ht")) { - of_node_get(np); - ht = np; - } - } - of_node_put(root); - - /* Now setup the HyperTransport host if we found any - */ - if (ht && maple_add_bridge(ht) != 0) - of_node_put(ht); - - ppc_md.pcibios_root_bridge_prepare = maple_pci_root_bridge_prepare; - - /* Tell pci.c to not change any resource allocations. */ - pci_add_flags(PCI_PROBE_ONLY); -} - -int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) -{ - struct device_node *np; - unsigned int defirq = channel ? 15 : 14; - unsigned int irq; - - if (pdev->vendor != PCI_VENDOR_ID_AMD || - pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) - return defirq; - - np = pci_device_to_OF_node(pdev); - if (np == NULL) { - printk("Failed to locate OF node for IDE %s\n", - pci_name(pdev)); - return defirq; - } - irq = irq_of_parse_and_map(np, channel & 0x1); - if (!irq) { - printk("Failed to map onboard IDE interrupt for channel %d\n", - channel); - return defirq; - } - return irq; -} - -static void quirk_ipr_msi(struct pci_dev *dev) -{ - /* Something prevents MSIs from the IPR from working on Bimini, - * and the driver has no smarts to recover. So disable MSI - * on it for now. */ - - if (machine_is(maple)) { - dev->no_msi = 1; - dev_info(&dev->dev, "Quirk disabled MSI\n"); - } -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, - quirk_ipr_msi); - -struct pci_controller_ops maple_pci_controller_ops = { -}; diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c deleted file mode 100644 index f329a03edf4a..000000000000 --- a/arch/powerpc/platforms/maple/setup.c +++ /dev/null @@ -1,363 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Maple (970 eval board) setup code - * - * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org), - * IBM Corp. - */ - -#undef DEBUG - -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/user.h> -#include <linux/tty.h> -#include <linux/string.h> -#include <linux/delay.h> -#include <linux/ioport.h> -#include <linux/major.h> -#include <linux/initrd.h> -#include <linux/vt_kern.h> -#include <linux/console.h> -#include <linux/pci.h> -#include <linux/adb.h> -#include <linux/cuda.h> -#include <linux/pmu.h> -#include <linux/irq.h> -#include <linux/seq_file.h> -#include <linux/root_dev.h> -#include <linux/serial.h> -#include <linux/smp.h> -#include <linux/bitops.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/platform_device.h> -#include <linux/memblock.h> - -#include <asm/processor.h> -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/pci-bridge.h> -#include <asm/iommu.h> -#include <asm/machdep.h> -#include <asm/dma.h> -#include <asm/cputable.h> -#include <asm/time.h> -#include <asm/mpic.h> -#include <asm/rtas.h> -#include <asm/udbg.h> -#include <asm/nvram.h> - -#include "maple.h" - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -static unsigned long maple_find_nvram_base(void) -{ - struct device_node *rtcs; - unsigned long result = 0; - - /* find NVRAM device */ - rtcs = of_find_compatible_node(NULL, "nvram", "AMD8111"); - if (rtcs) { - struct resource r; - if (of_address_to_resource(rtcs, 0, &r)) { - printk(KERN_EMERG "Maple: Unable to translate NVRAM" - " address\n"); - goto bail; - } - if (!(r.flags & IORESOURCE_IO)) { - printk(KERN_EMERG "Maple: NVRAM address isn't PIO!\n"); - goto bail; - } - result = r.start; - } else - printk(KERN_EMERG "Maple: Unable to find NVRAM\n"); - bail: - of_node_put(rtcs); - return result; -} - -static void __noreturn maple_restart(char *cmd) -{ - unsigned int maple_nvram_base; - const unsigned int *maple_nvram_offset, *maple_nvram_command; - struct device_node *sp; - - maple_nvram_base = maple_find_nvram_base(); - if (maple_nvram_base == 0) - goto fail; - - /* find service processor device */ - sp = of_find_node_by_name(NULL, "service-processor"); - if (!sp) { - printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); - goto fail; - } - maple_nvram_offset = of_get_property(sp, "restart-addr", NULL); - maple_nvram_command = of_get_property(sp, "restart-value", NULL); - of_node_put(sp); - - /* send command */ - outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); - for (;;) ; - fail: - printk(KERN_EMERG "Maple: Manual Restart Required\n"); - for (;;) ; -} - -static void __noreturn maple_power_off(void) -{ - unsigned int maple_nvram_base; - const unsigned int *maple_nvram_offset, *maple_nvram_command; - struct device_node *sp; - - maple_nvram_base = maple_find_nvram_base(); - if (maple_nvram_base == 0) - goto fail; - - /* find service processor device */ - sp = of_find_node_by_name(NULL, "service-processor"); - if (!sp) { - printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); - goto fail; - } - maple_nvram_offset = of_get_property(sp, "power-off-addr", NULL); - maple_nvram_command = of_get_property(sp, "power-off-value", NULL); - of_node_put(sp); - - /* send command */ - outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); - for (;;) ; - fail: - printk(KERN_EMERG "Maple: Manual Power-Down Required\n"); - for (;;) ; -} - -static void __noreturn maple_halt(void) -{ - maple_power_off(); -} - -#ifdef CONFIG_SMP -static struct smp_ops_t maple_smp_ops = { - .probe = smp_mpic_probe, - .message_pass = smp_mpic_message_pass, - .kick_cpu = smp_generic_kick_cpu, - .setup_cpu = smp_mpic_setup_cpu, - .give_timebase = smp_generic_give_timebase, - .take_timebase = smp_generic_take_timebase, -}; -#endif /* CONFIG_SMP */ - -static void __init maple_use_rtas_reboot_and_halt_if_present(void) -{ - if (rtas_function_implemented(RTAS_FN_SYSTEM_REBOOT) && - rtas_function_implemented(RTAS_FN_POWER_OFF)) { - ppc_md.restart = rtas_restart; - pm_power_off = rtas_power_off; - ppc_md.halt = rtas_halt; - } -} - -static void __init maple_setup_arch(void) -{ - /* init to some ~sane value until calibrate_delay() runs */ - loops_per_jiffy = 50000000; - - /* Setup SMP callback */ -#ifdef CONFIG_SMP - smp_ops = &maple_smp_ops; -#endif - maple_use_rtas_reboot_and_halt_if_present(); - - printk(KERN_DEBUG "Using native/NAP idle loop\n"); - - mmio_nvram_init(); -} - -/* - * This is almost identical to pSeries and CHRP. We need to make that - * code generic at one point, with appropriate bits in the device-tree to - * identify the presence of an HT APIC - */ -static void __init maple_init_IRQ(void) -{ - struct device_node *root, *np, *mpic_node = NULL; - const unsigned int *opprop; - unsigned long openpic_addr = 0; - int naddr, n, i, opplen, has_isus = 0; - struct mpic *mpic; - unsigned int flags = 0; - - /* Locate MPIC in the device-tree. Note that there is a bug - * in Maple device-tree where the type of the controller is - * open-pic and not interrupt-controller - */ - - for_each_node_by_type(np, "interrupt-controller") - if (of_device_is_compatible(np, "open-pic")) { - mpic_node = np; - break; - } - if (mpic_node == NULL) - for_each_node_by_type(np, "open-pic") { - mpic_node = np; - break; - } - if (mpic_node == NULL) { - printk(KERN_ERR - "Failed to locate the MPIC interrupt controller\n"); - return; - } - - /* Find address list in /platform-open-pic */ - root = of_find_node_by_path("/"); - naddr = of_n_addr_cells(root); - opprop = of_get_property(root, "platform-open-pic", &opplen); - if (opprop) { - openpic_addr = of_read_number(opprop, naddr); - has_isus = (opplen > naddr); - printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", - openpic_addr, has_isus); - } - - BUG_ON(openpic_addr == 0); - - /* Check for a big endian MPIC */ - if (of_property_read_bool(np, "big-endian")) - flags |= MPIC_BIG_ENDIAN; - - /* XXX Maple specific bits */ - flags |= MPIC_U3_HT_IRQS; - /* All U3/U4 are big-endian, older SLOF firmware doesn't encode this */ - flags |= MPIC_BIG_ENDIAN; - - /* Setup the openpic driver. More device-tree junks, we hard code no - * ISUs for now. I'll have to revisit some stuffs with the folks doing - * the firmware for those - */ - mpic = mpic_alloc(mpic_node, openpic_addr, flags, - /*has_isus ? 16 :*/ 0, 0, " MPIC "); - BUG_ON(mpic == NULL); - - /* Add ISUs */ - opplen /= sizeof(u32); - for (n = 0, i = naddr; i < opplen; i += naddr, n++) { - unsigned long isuaddr = of_read_number(opprop + i, naddr); - mpic_assign_isu(mpic, n, isuaddr); - } - - /* All ISUs are setup, complete initialization */ - mpic_init(mpic); - ppc_md.get_irq = mpic_get_irq; - of_node_put(mpic_node); - of_node_put(root); -} - -static void __init maple_progress(char *s, unsigned short hex) -{ - printk("*** %04x : %s\n", hex, s ? s : ""); -} - - -/* - * Called very early, MMU is off, device-tree isn't unflattened - */ -static int __init maple_probe(void) -{ - if (!of_machine_is_compatible("Momentum,Maple") && - !of_machine_is_compatible("Momentum,Apache")) - return 0; - - pm_power_off = maple_power_off; - - iommu_init_early_dart(&maple_pci_controller_ops); - - return 1; -} - -#ifdef CONFIG_EDAC -/* - * Register a platform device for CPC925 memory controller on - * all boards with U3H (CPC925) bridge. - */ -static int __init maple_cpc925_edac_setup(void) -{ - struct platform_device *pdev; - struct device_node *np = NULL; - struct resource r; - int ret; - volatile void __iomem *mem; - u32 rev; - - np = of_find_node_by_type(NULL, "memory-controller"); - if (!np) { - printk(KERN_ERR "%s: Unable to find memory-controller node\n", - __func__); - return -ENODEV; - } - - ret = of_address_to_resource(np, 0, &r); - of_node_put(np); - - if (ret < 0) { - printk(KERN_ERR "%s: Unable to get memory-controller reg\n", - __func__); - return -ENODEV; - } - - mem = ioremap(r.start, resource_size(&r)); - if (!mem) { - printk(KERN_ERR "%s: Unable to map memory-controller memory\n", - __func__); - return -ENOMEM; - } - - rev = __raw_readl(mem); - iounmap(mem); - - if (rev < 0x34 || rev > 0x3f) { /* U3H */ - printk(KERN_ERR "%s: Non-CPC925(U3H) bridge revision: %02x\n", - __func__, rev); - return 0; - } - - pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - printk(KERN_INFO "%s: CPC925 platform device created\n", __func__); - - return 0; -} -machine_device_initcall(maple, maple_cpc925_edac_setup); -#endif - -define_machine(maple) { - .name = "Maple", - .probe = maple_probe, - .setup_arch = maple_setup_arch, - .discover_phbs = maple_pci_init, - .init_IRQ = maple_init_IRQ, - .pci_irq_fixup = maple_pci_irq_fixup, - .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, - .restart = maple_restart, - .halt = maple_halt, - .get_boot_time = maple_get_boot_time, - .set_rtc_time = maple_set_rtc_time, - .get_rtc_time = maple_get_rtc_time, - .progress = maple_progress, - .power_save = power4_idle, -}; diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c deleted file mode 100644 index 91606411d2e0..000000000000 --- a/arch/powerpc/platforms/maple/time.c +++ /dev/null @@ -1,170 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org), - * IBM Corp. - */ - -#undef DEBUG - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/init.h> -#include <linux/time.h> -#include <linux/adb.h> -#include <linux/pmu.h> -#include <linux/interrupt.h> -#include <linux/mc146818rtc.h> -#include <linux/bcd.h> -#include <linux/of_address.h> - -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/machdep.h> -#include <asm/time.h> - -#include "maple.h" - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - -static int maple_rtc_addr; - -static int maple_clock_read(int addr) -{ - outb_p(addr, maple_rtc_addr); - return inb_p(maple_rtc_addr+1); -} - -static void maple_clock_write(unsigned long val, int addr) -{ - outb_p(addr, maple_rtc_addr); - outb_p(val, maple_rtc_addr+1); -} - -void maple_get_rtc_time(struct rtc_time *tm) -{ - do { - tm->tm_sec = maple_clock_read(RTC_SECONDS); - tm->tm_min = maple_clock_read(RTC_MINUTES); - tm->tm_hour = maple_clock_read(RTC_HOURS); - tm->tm_mday = maple_clock_read(RTC_DAY_OF_MONTH); - tm->tm_mon = maple_clock_read(RTC_MONTH); - tm->tm_year = maple_clock_read(RTC_YEAR); - } while (tm->tm_sec != maple_clock_read(RTC_SECONDS)); - - if (!(maple_clock_read(RTC_CONTROL) & RTC_DM_BINARY) - || RTC_ALWAYS_BCD) { - tm->tm_sec = bcd2bin(tm->tm_sec); - tm->tm_min = bcd2bin(tm->tm_min); - tm->tm_hour = bcd2bin(tm->tm_hour); - tm->tm_mday = bcd2bin(tm->tm_mday); - tm->tm_mon = bcd2bin(tm->tm_mon); - tm->tm_year = bcd2bin(tm->tm_year); - } - if ((tm->tm_year + 1900) < 1970) - tm->tm_year += 100; - - tm->tm_wday = -1; -} - -int maple_set_rtc_time(struct rtc_time *tm) -{ - unsigned char save_control, save_freq_select; - int sec, min, hour, mon, mday, year; - - spin_lock(&rtc_lock); - - save_control = maple_clock_read(RTC_CONTROL); /* tell the clock it's being set */ - - maple_clock_write((save_control|RTC_SET), RTC_CONTROL); - - save_freq_select = maple_clock_read(RTC_FREQ_SELECT); /* stop and reset prescaler */ - - maple_clock_write((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - sec = tm->tm_sec; - min = tm->tm_min; - hour = tm->tm_hour; - mon = tm->tm_mon; - mday = tm->tm_mday; - year = tm->tm_year; - - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - sec = bin2bcd(sec); - min = bin2bcd(min); - hour = bin2bcd(hour); - mon = bin2bcd(mon); - mday = bin2bcd(mday); - year = bin2bcd(year); - } - maple_clock_write(sec, RTC_SECONDS); - maple_clock_write(min, RTC_MINUTES); - maple_clock_write(hour, RTC_HOURS); - maple_clock_write(mon, RTC_MONTH); - maple_clock_write(mday, RTC_DAY_OF_MONTH); - maple_clock_write(year, RTC_YEAR); - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - maple_clock_write(save_control, RTC_CONTROL); - maple_clock_write(save_freq_select, RTC_FREQ_SELECT); - - spin_unlock(&rtc_lock); - - return 0; -} - -static struct resource rtc_iores = { - .name = "rtc", - .flags = IORESOURCE_IO | IORESOURCE_BUSY, -}; - -time64_t __init maple_get_boot_time(void) -{ - struct rtc_time tm; - struct device_node *rtcs; - - rtcs = of_find_compatible_node(NULL, "rtc", "pnpPNP,b00"); - if (rtcs) { - struct resource r; - if (of_address_to_resource(rtcs, 0, &r)) { - printk(KERN_EMERG "Maple: Unable to translate RTC" - " address\n"); - goto bail; - } - if (!(r.flags & IORESOURCE_IO)) { - printk(KERN_EMERG "Maple: RTC address isn't PIO!\n"); - goto bail; - } - maple_rtc_addr = r.start; - printk(KERN_INFO "Maple: Found RTC at IO 0x%x\n", - maple_rtc_addr); - } - bail: - of_node_put(rtcs); - if (maple_rtc_addr == 0) { - maple_rtc_addr = RTC_PORT(0); /* legacy address */ - printk(KERN_INFO "Maple: No device node for RTC, assuming " - "legacy address (0x%x)\n", maple_rtc_addr); - } - - rtc_iores.start = maple_rtc_addr; - rtc_iores.end = maple_rtc_addr + 7; - request_resource(&ioport_resource, &rtc_iores); - - maple_get_rtc_time(&tm); - return rtc_tm_to_time64(&tm); -} - diff --git a/arch/powerpc/platforms/microwatt/Kconfig b/arch/powerpc/platforms/microwatt/Kconfig index 6af443a1db99..cb2aff635bb0 100644 --- a/arch/powerpc/platforms/microwatt/Kconfig +++ b/arch/powerpc/platforms/microwatt/Kconfig @@ -1,11 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 config PPC_MICROWATT - depends on PPC_BOOK3S_64 && !SMP + depends on PPC_BOOK3S_64 bool "Microwatt SoC platform" select PPC_XICS select PPC_ICS_NATIVE select PPC_ICP_NATIVE select PPC_UDBG_16550 + select COMMON_CLK help This option enables support for FPGA-based Microwatt implementations. diff --git a/arch/powerpc/platforms/microwatt/Makefile b/arch/powerpc/platforms/microwatt/Makefile index 116d6d3ad3f0..d973b2ab4042 100644 --- a/arch/powerpc/platforms/microwatt/Makefile +++ b/arch/powerpc/platforms/microwatt/Makefile @@ -1 +1,2 @@ obj-y += setup.o rng.o +obj-$(CONFIG_SMP) += smp.o diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h index 335417e95e66..891aa2800768 100644 --- a/arch/powerpc/platforms/microwatt/microwatt.h +++ b/arch/powerpc/platforms/microwatt/microwatt.h @@ -3,5 +3,6 @@ #define _MICROWATT_H void microwatt_rng_init(void); +void microwatt_init_smp(void); #endif /* _MICROWATT_H */ diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c index 5e1c0997170d..6af2ccef736c 100644 --- a/arch/powerpc/platforms/microwatt/setup.c +++ b/arch/powerpc/platforms/microwatt/setup.c @@ -29,15 +29,33 @@ static int __init microwatt_populate(void) } machine_arch_initcall(microwatt, microwatt_populate); +static int __init microwatt_probe(void) +{ + /* Main reason for having this is to start the other CPU(s) */ + if (IS_ENABLED(CONFIG_SMP)) + microwatt_init_smp(); + return 1; +} + static void __init microwatt_setup_arch(void) { microwatt_rng_init(); } +static void microwatt_idle(void) +{ + if (!prep_irq_for_idle_irqsoff()) + return; + + __asm__ __volatile__ ("wait"); +} + define_machine(microwatt) { .name = "microwatt", .compatible = "microwatt-soc", + .probe = microwatt_probe, .init_IRQ = microwatt_init_IRQ, .setup_arch = microwatt_setup_arch, .progress = udbg_progress, + .power_save = microwatt_idle, }; diff --git a/arch/powerpc/platforms/microwatt/smp.c b/arch/powerpc/platforms/microwatt/smp.c new file mode 100644 index 000000000000..7dbf2ca73d47 --- /dev/null +++ b/arch/powerpc/platforms/microwatt/smp.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +/* + * SMP support functions for Microwatt + * Copyright 2025 Paul Mackerras <paulus@ozlabs.org> + */ + +#include <linux/kernel.h> +#include <linux/smp.h> +#include <linux/io.h> +#include <asm/early_ioremap.h> +#include <asm/ppc-opcode.h> +#include <asm/reg.h> +#include <asm/smp.h> +#include <asm/xics.h> + +#include "microwatt.h" + +static void __init microwatt_smp_probe(void) +{ + xics_smp_probe(); +} + +static void microwatt_smp_setup_cpu(int cpu) +{ + if (cpu != 0) + xics_setup_cpu(); +} + +static struct smp_ops_t microwatt_smp_ops = { + .probe = microwatt_smp_probe, + .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ + .kick_cpu = smp_generic_kick_cpu, + .setup_cpu = microwatt_smp_setup_cpu, +}; + +/* XXX get from device tree */ +#define SYSCON_BASE 0xc0000000 +#define SYSCON_LENGTH 0x100 + +#define SYSCON_CPU_CTRL 0x58 + +void __init microwatt_init_smp(void) +{ + volatile unsigned char __iomem *syscon; + int ncpus; + int timeout; + + syscon = early_ioremap(SYSCON_BASE, SYSCON_LENGTH); + if (syscon == NULL) { + pr_err("Failed to map SYSCON\n"); + return; + } + ncpus = (readl(syscon + SYSCON_CPU_CTRL) >> 8) & 0xff; + if (ncpus < 2) + goto out; + + smp_ops = µwatt_smp_ops; + + /* + * Write two instructions at location 0: + * mfspr r3, PIR + * b __secondary_hold + */ + *(unsigned int *)KERNELBASE = PPC_RAW_MFSPR(3, SPRN_PIR); + *(unsigned int *)(KERNELBASE+4) = PPC_RAW_BRANCH(&__secondary_hold - (char *)(KERNELBASE+4)); + + /* enable the other CPUs, they start at location 0 */ + writel((1ul << ncpus) - 1, syscon + SYSCON_CPU_CTRL); + + timeout = 10000; + while (!__secondary_hold_acknowledge) { + if (--timeout == 0) + break; + barrier(); + } + + out: + early_iounmap((void *)syscon, SYSCON_LENGTH); +} diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index 4e983af32949..e4538d471256 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -285,7 +285,7 @@ MODULE_DEVICE_TABLE(of, gpio_mdio_match); static struct platform_driver gpio_mdio_driver = { .probe = gpio_mdio_probe, - .remove_new = gpio_mdio_remove, + .remove = gpio_mdio_remove, .driver = { .name = "gpio-mdio-bitbang", .of_match_table = gpio_mdio_match, diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h index 018c30665e1b..6f6743b8e48d 100644 --- a/arch/powerpc/platforms/pasemi/pasemi.h +++ b/arch/powerpc/platforms/pasemi/pasemi.h @@ -5,7 +5,6 @@ extern time64_t pas_get_boot_time(void); extern void pas_pci_init(void); struct pci_dev; -extern void pas_pci_irq_fixup(struct pci_dev *dev); extern void pas_pci_dma_dev_setup(struct pci_dev *dev); void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset); diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 0761d98e5be3..d03b41336901 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -228,7 +228,7 @@ static void __init nemo_init_IRQ(struct mpic *mpic) irq_set_chained_handler(gpio_virq, sb600_8259_cascade); mpic_unmask_irq(irq_get_irq_data(gpio_virq)); - irq_set_default_host(mpic->irqhost); + irq_set_default_domain(mpic->irqhost); } #else diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index 12bc01353bd3..79741370c40c 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c @@ -57,18 +57,10 @@ struct backlight_device *pmac_backlight; int pmac_has_backlight_type(const char *type) { struct device_node* bk_node = of_find_node_by_name(NULL, "backlight"); + int i = of_property_match_string(bk_node, "backlight-control", type); - if (bk_node) { - const char *prop = of_get_property(bk_node, - "backlight-control", NULL); - if (prop && strncmp(prop, type, strlen(type)) == 0) { - of_node_put(bk_node); - return 1; - } - of_node_put(bk_node); - } - - return 0; + of_node_put(bk_node); + return i >= 0; } static void pmac_backlight_key_worker(struct work_struct *work) diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index c097d591670e..02474e27df9b 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -347,7 +347,7 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id) unsigned long flags; spin_lock_irqsave(&host->lock, flags); - del_timer(&host->timeout_timer); + timer_delete(&host->timeout_timer); kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); if (host->state != state_idle) { host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; @@ -359,7 +359,8 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id) static void kw_i2c_timeout(struct timer_list *t) { - struct pmac_i2c_host_kw *host = from_timer(host, t, timeout_timer); + struct pmac_i2c_host_kw *host = timer_container_of(host, t, + timeout_timer); unsigned long flags; spin_lock_irqsave(&host->lock, flags); diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index fe2e0249cbc2..a112d26185a0 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -514,10 +514,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } - nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); - if (!nvram_image) - panic("%s: Failed to allocate %u bytes\n", __func__, - NVRAM_SIZE); + nvram_image = memblock_alloc_or_panic(NVRAM_SIZE, SMP_CACHE_BYTES); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index 085e0ad20eba..8253de737373 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -313,7 +313,7 @@ static void __init uninorth_install_pfunc(void) /* * Install handlers for the hwclock child if any */ - for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;) + for_each_child_of_node(uninorth_node, np) if (of_node_name_eq(np, "hw-clock")) { unin_hwclock = np; break; diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 7135ea1d7db6..c37783a03d25 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -2,7 +2,7 @@ /* * Support for the interrupt controllers found on Power Macintosh, * currently Apple's "Grand Central" interrupt controller in all - * it's incarnations. OpenPIC support used on newer machines is + * its incarnations. OpenPIC support used on newer machines is * in a separate file * * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) @@ -250,7 +250,7 @@ static unsigned int pmac_pic_get_irq(void) raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); if (unlikely(irq < 0)) return 0; - return irq_linear_revmap(pmac_pic_host, irq); + return irq_find_mapping(pmac_pic_host, irq); } static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, @@ -327,10 +327,11 @@ static void __init pmac_pic_probe_oldstyle(void) /* * Allocate an irq host */ - pmac_pic_host = irq_domain_add_linear(master, max_irqs, - &pmac_pic_host_ops, NULL); + pmac_pic_host = irq_domain_create_linear(of_fwnode_handle(master), + max_irqs, + &pmac_pic_host_ops, NULL); BUG_ON(pmac_pic_host == NULL); - irq_set_default_host(pmac_pic_host); + irq_set_default_domain(pmac_pic_host); /* Get addresses of first controller if we have a node for it */ BUG_ON(of_address_to_resource(master, 0, &r)); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 6de1cd5d8a58..e119ced05d10 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -45,6 +45,7 @@ #include <linux/root_dev.h> #include <linux/bitops.h> #include <linux/suspend.h> +#include <linux/string_choices.h> #include <linux/of.h> #include <linux/of_platform.h> @@ -238,8 +239,7 @@ static void __init l2cr_init(void) _set_L2CR(0); _set_L2CR(*l2cr); pr_info("L2CR overridden (0x%x), backside cache is %s\n", - *l2cr, ((*l2cr) & 0x80000000) ? - "enabled" : "disabled"); + *l2cr, str_enabled_disabled((*l2cr) & 0x80000000)); } of_node_put(np); break; diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index d497a60003d2..822ed70cdcbf 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -176,7 +176,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) * memory location containing the PC to resume from * at address 0. * - On Core99, we must store the wakeup vector at - * address 0x80 and eventually it's parameters + * address 0x80 and eventually its parameters * at address 0x84. I've have some trouble with those * parameters however and I no longer use them. */ diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 15644be31990..88e92af8acf9 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -35,7 +35,7 @@ #include <asm/ptrace.h> #include <linux/atomic.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/sections.h> @@ -190,7 +190,7 @@ static int __init psurge_secondary_ipi_init(void) { int rc = -ENOMEM; - psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL); + psurge_host = irq_domain_create_nomap(NULL, ~0, &psurge_host_ops, NULL); if (psurge_host) psurge_secondary_virq = irq_create_direct_mapping(psurge_host); @@ -827,7 +827,7 @@ static int smp_core99_kick_cpu(int nr) mdelay(1); /* Restore our exception vector */ - patch_instruction(vector, ppc_inst(save_vector)); + patch_uint(vector, save_vector); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 8633891b7aa5..b4426a35aca3 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/time.h> @@ -77,7 +78,7 @@ long __init pmac_time_init(void) delta |= 0xFF000000UL; dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0); printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60, - dst ? "on" : "off"); + str_on_off(dst)); #endif return delta; } diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index 70a46acc70d6..95d7ba73d43d 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -17,6 +17,7 @@ config PPC_POWERNV select MMU_NOTIFIER select FORCE_SMP select ARCH_SUPPORTS_PER_VMA_LOCK + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU default y config OPAL_PRD diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 19f0fc5c6f1b..9e5d0c847ee2 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -21,7 +21,6 @@ obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o obj-$(CONFIG_OPAL_CORE) += opal-core.o obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o obj-$(CONFIG_PCI_IOV) += pci-sriov.o -obj-$(CONFIG_CXL_BASE) += pci-cxl.o obj-$(CONFIG_EEH) += eeh-powernv.o obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o obj-$(CONFIG_OPAL_PRD) += opal-prd.o diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index af3a5d37a149..db3370d1673c 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -99,7 +99,6 @@ static ssize_t pnv_eeh_ei_write(struct file *filp, static const struct file_operations pnv_eeh_ei_fops = { .open = simple_open, - .llseek = no_llseek, .write = pnv_eeh_ei_write, }; @@ -860,7 +859,7 @@ static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) int64_t rc; /* Hot reset to the bus if firmware cannot handle */ - if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) + if (!dn || !of_property_present(dn, "ibm,reset-by-firmware")) return __pnv_eeh_bridge_reset(pdev, option); pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n", diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index ad41dffe4d92..d98b933e4984 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -18,7 +18,7 @@ #include <asm/opal.h> #include <asm/cputhreads.h> #include <asm/cpuidle.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/smp.h> #include <asm/runlatch.h> #include <asm/dbell.h> diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 877720c64515..2ea30b343354 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -48,11 +48,15 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf, static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma) { struct memtrace_entry *ent = filp->private_data; + unsigned long ent_nrpages = ent->size >> PAGE_SHIFT; + unsigned long vma_nrpages = vma_pages(vma); - if (ent->size < vma->vm_end - vma->vm_start) + /* The requested page offset should be within object's page count */ + if (vma->vm_pgoff >= ent_nrpages) return -EINVAL; - if (vma->vm_pgoff << PAGE_SHIFT >= ent->size) + /* The requested mapping range should remain within the bounds */ + if (vma_nrpages > ent_nrpages - vma->vm_pgoff) return -EINVAL; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -88,26 +92,6 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, } } -static void memtrace_clear_range(unsigned long start_pfn, - unsigned long nr_pages) -{ - unsigned long pfn; - - /* As HIGHMEM does not apply, use clear_page() directly. */ - for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { - if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) - cond_resched(); - clear_page(__va(PFN_PHYS(pfn))); - } - /* - * Before we go ahead and use this range as cache inhibited range - * flush the cache. - */ - flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), - (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), - FLUSH_CHUNK_SIZE); -} - static u64 memtrace_alloc_node(u32 nid, u64 size) { const unsigned long nr_pages = PHYS_PFN(size); @@ -119,17 +103,18 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) * by alloc_contig_pages(). */ page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | - __GFP_NOWARN, nid, NULL); + __GFP_NOWARN | __GFP_ZERO, nid, NULL); if (!page) return 0; start_pfn = page_to_pfn(page); /* - * Clear the range while we still have a linear mapping. - * - * TODO: use __GFP_ZERO with alloc_contig_pages() once supported. + * Before we go ahead and use this range as cache inhibited range + * flush the cache. */ - memtrace_clear_range(start_pfn, nr_pages); + flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), + (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), + FLUSH_CHUNK_SIZE); /* * Set pages PageOffline(), to indicate that nobody (e.g., hibernation, diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c index c9a9b759cc92..e652da8f986f 100644 --- a/arch/powerpc/platforms/powernv/opal-core.c +++ b/arch/powerpc/platforms/powernv/opal-core.c @@ -149,7 +149,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf, /* end of vector */ bufp[idx++] = cpu_to_be64(AT_NULL); - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_AUXV, + buf = append_elf64_note(buf, NN_AUXV, NT_AUXV, oc_conf->auxv_buf, AUXV_DESC_SZ); return buf; } @@ -159,7 +159,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf, * Returns number of bytes read on success, -errno on failure. */ static ssize_t read_opalcore(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *to, + const struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { struct opalcore *m; @@ -206,9 +206,9 @@ static ssize_t read_opalcore(struct file *file, struct kobject *kobj, return (tpos - pos); } -static struct bin_attribute opal_core_attr = { +static struct bin_attribute opal_core_attr __ro_after_init = { .attr = {.name = "core", .mode = 0400}, - .read = read_opalcore + .read_new = read_opalcore }; /* @@ -252,7 +252,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) * crashing CPU's prstatus. */ first_cpu_note = buf; - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS, + buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); for (i = 0; i < oc_conf->num_cpus; i++, bufp += size_per_thread) { @@ -279,7 +279,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) fill_prstatus(&prstatus, thread_pir, ®s); if (thread_pir != oc_conf->crashing_cpu) { - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, + buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); } else { @@ -287,7 +287,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) * Add crashing CPU as the first NT_PRSTATUS note for * GDB to process the core file appropriately. */ - append_elf64_note(first_cpu_note, CRASH_CORE_NOTE_NAME, + append_elf64_note(first_cpu_note, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); } @@ -599,7 +599,7 @@ static struct attribute *mpipl_attr[] = { NULL, }; -static struct bin_attribute *mpipl_bin_attr[] = { +static const struct bin_attribute *const mpipl_bin_attr[] = { &opal_core_attr, NULL, @@ -607,7 +607,7 @@ static struct bin_attribute *mpipl_bin_attr[] = { static const struct attribute_group mpipl_group = { .attrs = mpipl_attr, - .bin_attrs = mpipl_bin_attr, + .bin_attrs_new = mpipl_bin_attr, }; static int __init opalcore_init(void) diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c index 16c5860f1372..27e25693cf39 100644 --- a/arch/powerpc/platforms/powernv/opal-dump.c +++ b/arch/powerpc/platforms/powernv/opal-dump.c @@ -210,7 +210,7 @@ static struct attribute *dump_default_attrs[] = { }; ATTRIBUTE_GROUPS(dump_default); -static struct kobj_type dump_ktype = { +static const struct kobj_type dump_ktype = { .sysfs_ops = &dump_sysfs_ops, .release = &dump_release, .default_groups = dump_default_groups, @@ -286,7 +286,7 @@ out: } static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { ssize_t rc; @@ -342,7 +342,7 @@ static void create_dump_obj(uint32_t id, size_t size, uint32_t type) dump->dump_attr.attr.name = "dump"; dump->dump_attr.attr.mode = 0400; dump->dump_attr.size = size; - dump->dump_attr.read = dump_attr_read; + dump->dump_attr.read_new = dump_attr_read; dump->id = id; dump->size = size; diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 554fdd7f88b8..de33f354e9fd 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -146,7 +146,7 @@ static struct attribute *elog_default_attrs[] = { }; ATTRIBUTE_GROUPS(elog_default); -static struct kobj_type elog_ktype = { +static const struct kobj_type elog_ktype = { .sysfs_ops = &elog_sysfs_ops, .release = &elog_release, .default_groups = elog_default_groups, @@ -156,7 +156,7 @@ static struct kobj_type elog_ktype = { #define OPAL_MAX_ERRLOG_SIZE 16384 static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int opal_rc; @@ -203,7 +203,7 @@ static void create_elog_obj(uint64_t id, size_t size, uint64_t type) elog->raw_attr.attr.name = "raw"; elog->raw_attr.attr.mode = 0400; elog->raw_attr.size = size; - elog->raw_attr.read = raw_attr_read; + elog->raw_attr.read_new = raw_attr_read; elog->id = id; elog->size = size; diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c index 964f464b1b0e..c9c1dfb35464 100644 --- a/arch/powerpc/platforms/powernv/opal-fadump.c +++ b/arch/powerpc/platforms/powernv/opal-fadump.c @@ -513,8 +513,8 @@ out: final_note(note_buf); pr_debug("Updating elfcore header (%llx) with cpu notes\n", - fdh->elfcorehdr_addr); - fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr)); + fadump_conf->elfcorehdr_addr); + fadump_update_elfcore_header((char *)fadump_conf->elfcorehdr_addr); return 0; } @@ -526,12 +526,7 @@ static int __init opal_fadump_process(struct fw_dump *fadump_conf) if (!opal_fdm_active || !fadump_conf->fadumphdr_addr) return rc; - /* Validate the fadump crash info header */ fdh = __va(fadump_conf->fadumphdr_addr); - if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { - pr_err("Crash info header is not valid.\n"); - return rc; - } #ifdef CONFIG_OPAL_CORE /* @@ -545,18 +540,7 @@ static int __init opal_fadump_process(struct fw_dump *fadump_conf) kernel_initiated = true; #endif - rc = opal_fadump_build_cpu_notes(fadump_conf, fdh); - if (rc) - return rc; - - /* - * We are done validating dump info and elfcore header is now ready - * to be exported. set elfcorehdr_addr so that vmcore module will - * export the elfcore header through '/proc/vmcore'. - */ - elfcorehdr_addr = fdh->elfcorehdr_addr; - - return rc; + return opal_fadump_build_cpu_notes(fadump_conf, fdh); } static void opal_fadump_region_show(struct fw_dump *fadump_conf, @@ -615,6 +599,12 @@ static void opal_fadump_trigger(struct fadump_crash_info_header *fdh, pr_emerg("No backend support for MPIPL!\n"); } +/* FADUMP_MAX_MEM_REGS or lower */ +static int opal_fadump_max_boot_mem_rgns(void) +{ + return FADUMP_MAX_MEM_REGS; +} + static struct fadump_ops opal_fadump_ops = { .fadump_init_mem_struct = opal_fadump_init_mem_struct, .fadump_get_metadata_size = opal_fadump_get_metadata_size, @@ -627,6 +617,7 @@ static struct fadump_ops opal_fadump_ops = { .fadump_process = opal_fadump_process, .fadump_region_show = opal_fadump_region_show, .fadump_trigger = opal_fadump_trigger, + .fadump_max_boot_mem_rgns = opal_fadump_max_boot_mem_rgns, }; void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) @@ -674,8 +665,10 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) } } - fadump_conf->ops = &opal_fadump_ops; - fadump_conf->fadump_supported = 1; + fadump_conf->ops = &opal_fadump_ops; + fadump_conf->fadump_supported = 1; + /* TODO: Add support to pass additional parameters */ + fadump_conf->param_area_supported = 0; /* * Firmware supports 32-bit field for size. Align it to PAGE_SIZE diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c index d5ea04e8e4c5..fd8c8621e973 100644 --- a/arch/powerpc/platforms/powernv/opal-flash.c +++ b/arch/powerpc/platforms/powernv/opal-flash.c @@ -432,7 +432,7 @@ static int alloc_image_buf(char *buffer, size_t count) * and pre-allocate required memory. */ static ssize_t image_data_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int rc; @@ -493,7 +493,7 @@ out: static const struct bin_attribute image_data_attr = { .attr = {.name = "image", .mode = 0200}, .size = MAX_IMAGE_SIZE, /* Limit image size */ - .write = image_data_write, + .write_new = image_data_write, }; static struct kobj_attribute validate_attribute = diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index 56a1f7ce78d2..e180bd8e1400 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -191,7 +191,8 @@ int __init opal_event_init(void) * fall back to the legacy method (opal_event_request(...)) * anyway. */ dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event"); - opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS, + opal_event_irqchip.domain = irq_domain_create_linear(of_fwnode_handle(dn), + MAX_NUM_EVENTS, &opal_event_domain_ops, &opal_event_irqchip); of_node_put(dn); if (!opal_event_irqchip.domain) { @@ -282,6 +283,7 @@ int __init opal_event_init(void) name, NULL); if (rc) { pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start); + kfree(name); continue; } } diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c index 6c3bc4b4da98..bb4218fa796e 100644 --- a/arch/powerpc/platforms/powernv/opal-kmsg.c +++ b/arch/powerpc/platforms/powernv/opal-kmsg.c @@ -20,13 +20,13 @@ * message, it just ensures that OPAL completely flushes the console buffer. */ static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason) + struct kmsg_dump_detail *detail) { /* * Outside of a panic context the pollers will continue to run, * so we don't need to do any special flushing. */ - if (reason != KMSG_DUMP_PANIC) + if (detail->reason != KMSG_DUMP_PANIC) return; opal_flush_console(0); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index a16f07cdab26..8a7f39e106bd 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -393,7 +393,7 @@ void __init opal_lpc_init(void) for_each_compatible_node(np, NULL, "ibm,power8-lpc") { if (!of_device_is_available(np)) continue; - if (!of_get_property(np, "primary", NULL)) + if (!of_property_present(np, "primary")) continue; opal_lpc_chip_id = of_get_ibm_chip_id(np); of_node_put(np); diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index 22d6efe17b0d..f1988d0ab45c 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c @@ -94,15 +94,15 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) } static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *to, + const struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { return opal_msglog_copy(to, pos, count); } -static struct bin_attribute opal_msglog_attr = { +static struct bin_attribute opal_msglog_attr __ro_after_init = { .attr = {.name = "msglog", .mode = 0400}, - .read = opal_msglog_read + .read_new = opal_msglog_read }; struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name) diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 24f04f20d3e8..dc246ed4b7b4 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -443,7 +443,7 @@ static struct platform_driver opal_prd_driver = { .of_match_table = opal_prd_match, }, .probe = opal_prd_probe, - .remove_new = opal_prd_remove, + .remove = opal_prd_remove, }; module_platform_driver(opal_prd_driver); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 45dd77e3ccf6..9ec265fcaff4 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -180,10 +180,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node, /* * Allocate a buffer to hold the MC recoverable ranges. */ - mc_recoverable_range = memblock_alloc(size, __alignof__(u64)); - if (!mc_recoverable_range) - panic("%s: Failed to allocate %u bytes align=0x%lx\n", - __func__, size, __alignof__(u64)); + mc_recoverable_range = memblock_alloc_or_panic(size, __alignof__(u64)); for (i = 0; i < mc_recoverable_range_len; i++) { mc_recoverable_range[i].start_addr = @@ -792,14 +789,6 @@ static int __init opal_sysfs_init(void) return 0; } -static ssize_t export_attr_read(struct file *fp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, - loff_t off, size_t count) -{ - return memory_read_from_buffer(buf, count, &off, bin_attr->private, - bin_attr->size); -} - static int opal_add_one_export(struct kobject *parent, const char *export_name, struct device_node *np, const char *prop_name) { @@ -826,7 +815,7 @@ static int opal_add_one_export(struct kobject *parent, const char *export_name, sysfs_bin_attr_init(attr); attr->attr.name = name; attr->attr.mode = 0400; - attr->read = export_attr_read; + attr->read_new = sysfs_bin_attr_simple_read; attr->private = __va(vals[0]); attr->size = vals[1]; diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c deleted file mode 100644 index 7e419de71db8..000000000000 --- a/arch/powerpc/platforms/powernv/pci-cxl.c +++ /dev/null @@ -1,153 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2014-2016 IBM Corp. - */ - -#include <linux/module.h> -#include <misc/cxl-base.h> -#include <asm/pnv-pci.h> -#include <asm/opal.h> - -#include "pci.h" - -int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - struct pnv_ioda_pe *pe; - int rc; - - pe = pnv_ioda_get_pe(dev); - if (!pe) - return -ENODEV; - - pe_info(pe, "Switching PHB to CXL\n"); - - rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number); - if (rc == OPAL_UNSUPPORTED) - dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n"); - else if (rc) - dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc); - - return rc; -} -EXPORT_SYMBOL(pnv_phb_to_cxl_mode); - -/* Find PHB for cxl dev and allocate MSI hwirqs? - * Returns the absolute hardware IRQ number - */ -int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num); - - if (hwirq < 0) { - dev_warn(&dev->dev, "Failed to find a free MSI\n"); - return -ENOSPC; - } - - return phb->msi_base + hwirq; -} -EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs); - -void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - - msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num); -} -EXPORT_SYMBOL(pnv_cxl_release_hwirqs); - -void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs, - struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - int i, hwirq; - - for (i = 1; i < CXL_IRQ_RANGES; i++) { - if (!irqs->range[i]) - continue; - pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n", - i, irqs->offset[i], - irqs->range[i]); - hwirq = irqs->offset[i] - phb->msi_base; - msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, - irqs->range[i]); - } -} -EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges); - -int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs, - struct pci_dev *dev, int num) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - int i, hwirq, try; - - memset(irqs, 0, sizeof(struct cxl_irq_ranges)); - - /* 0 is reserved for the multiplexed PSL DSI interrupt */ - for (i = 1; i < CXL_IRQ_RANGES && num; i++) { - try = num; - while (try) { - hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try); - if (hwirq >= 0) - break; - try /= 2; - } - if (!try) - goto fail; - - irqs->offset[i] = phb->msi_base + hwirq; - irqs->range[i] = try; - pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n", - i, irqs->offset[i], irqs->range[i]); - num -= try; - } - if (num) - goto fail; - - return 0; -fail: - pnv_cxl_release_hwirq_ranges(irqs, dev); - return -ENOSPC; -} -EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges); - -int pnv_cxl_get_irq_count(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - - return phb->msi_bmp.irq_count; -} -EXPORT_SYMBOL(pnv_cxl_get_irq_count); - -int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, - unsigned int virq) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - struct pnv_phb *phb = hose->private_data; - unsigned int xive_num = hwirq - phb->msi_base; - struct pnv_ioda_pe *pe; - int rc; - - if (!(pe = pnv_ioda_get_pe(dev))) - return -ENODEV; - - /* Assign XIVE to PE */ - rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); - if (rc) { - pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x " - "hwirq 0x%x XIVE 0x%x PE\n", - pci_name(dev), rc, phb->msi_base, hwirq, xive_num); - return -EIO; - } - pnv_set_msi_irq_chip(phb, virq); - - return 0; -} -EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 23f5b5093ec1..d8ccf2c9b98a 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -39,8 +39,6 @@ #include <asm/mmzone.h> #include <asm/xive.h> -#include <misc/cxl-base.h> - #include "powernv.h" #include "pci.h" #include "../../../../drivers/pci/pci.h" @@ -1537,7 +1535,8 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) } } -static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group) +static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group, + struct device *dev __maybe_unused) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); @@ -1562,7 +1561,8 @@ static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group) return 0; } -static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) +static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group, + struct device *dev __maybe_unused) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); @@ -1634,47 +1634,6 @@ int64_t pnv_opal_pci_msi_eoi(struct irq_data *d) return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq); } -/* - * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers - */ -static void pnv_ioda2_msi_eoi(struct irq_data *d) -{ - int64_t rc; - unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); - struct pci_controller *hose = irq_data_get_irq_chip_data(d); - struct pnv_phb *phb = hose->private_data; - - rc = opal_pci_msi_eoi(phb->opal_id, hw_irq); - WARN_ON_ONCE(rc); - - icp_native_eoi(d); -} - -/* P8/CXL only */ -void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq) -{ - struct irq_data *idata; - struct irq_chip *ichip; - - /* The MSI EOI OPAL call is only needed on PHB3 */ - if (phb->model != PNV_PHB_MODEL_PHB3) - return; - - if (!phb->ioda.irq_chip_init) { - /* - * First time we setup an MSI IRQ, we need to setup the - * corresponding IRQ chip to route correctly. - */ - idata = irq_get_irq_data(virq); - ichip = irq_data_get_irq_chip(idata); - phb->ioda.irq_chip_init = 1; - phb->ioda.irq_chip = *ichip; - phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; - } - irq_set_chip(virq, &phb->ioda.irq_chip); - irq_set_chip_data(virq, phb->hose); -} - static struct irq_chip pnv_pci_msi_irq_chip; /* @@ -1922,7 +1881,7 @@ static const struct irq_domain_ops pnv_irq_domain_ops = { static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count) { struct pnv_phb *phb = hose->private_data; - struct irq_domain *parent = irq_get_default_host(); + struct irq_domain *parent = irq_get_default_domain(); hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id); if (!hose->fwnode) @@ -1938,7 +1897,7 @@ static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned return -ENOMEM; } - hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn), + hose->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(hose->dn), &pnv_msi_domain_info, hose->dev_domain); if (!hose->msi_domain) { diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c index 59882da3e742..cc7b1dd54ac6 100644 --- a/arch/powerpc/platforms/powernv/pci-sriov.c +++ b/arch/powerpc/platforms/powernv/pci-sriov.c @@ -238,7 +238,7 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) } else if (pdev->is_physfn) { /* * For PFs adjust their allocated IOV resources to match what - * the PHB can support using it's M64 BAR table. + * the PHB can support using its M64 BAR table. */ pnv_pci_ioda_fixup_iov_resources(pdev); } @@ -658,7 +658,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) list_add_tail(&pe->list, &phb->ioda.pe_list); mutex_unlock(&phb->ioda.pe_list_mutex); - /* associate this pe to it's pdn */ + /* associate this pe to its pdn */ list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) { if (vf_pdn->busno == vf_bus && vf_pdn->devfn == vf_devfn) { diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 35f566aa0424..b2c1da025410 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -14,7 +14,6 @@ #include <linux/io.h> #include <linux/msi.h> #include <linux/iommu.h> -#include <linux/sched/mm.h> #include <asm/sections.h> #include <asm/io.h> @@ -33,8 +32,6 @@ #include "powernv.h" #include "pci.h" -static DEFINE_MUTEX(tunnel_mutex); - int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) { struct device_node *node = np; @@ -744,64 +741,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid) return tbl; } -struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) -{ - struct pci_controller *hose = pci_bus_to_host(dev->bus); - - return of_node_get(hose->dn); -} -EXPORT_SYMBOL(pnv_pci_get_phb_node); - -int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) -{ - struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); - u64 tunnel_bar; - __be64 val; - int rc; - - if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) - return -ENXIO; - if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) - return -ENXIO; - - mutex_lock(&tunnel_mutex); - rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); - if (rc != OPAL_SUCCESS) { - rc = -EIO; - goto out; - } - tunnel_bar = be64_to_cpu(val); - if (enable) { - /* - * Only one device per PHB can use atomics. - * Our policy is first-come, first-served. - */ - if (tunnel_bar) { - if (tunnel_bar != addr) - rc = -EBUSY; - else - rc = 0; /* Setting same address twice is ok */ - goto out; - } - } else { - /* - * The device that owns atomics and wants to release - * them must pass the same address with enable == 0. - */ - if (tunnel_bar != addr) { - rc = -EPERM; - goto out; - } - addr = 0x0ULL; - } - rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); - rc = opal_error_code(rc); -out: - mutex_unlock(&tunnel_mutex); - return rc; -} -EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); - void pnv_pci_shutdown(void) { struct pci_controller *hose; diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 957f2b47a3c0..42075501663b 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -163,7 +163,6 @@ struct pnv_phb { unsigned int *io_segmap; /* IRQ chip */ - int irq_chip_init; struct irq_chip irq_chip; /* Sorted list of used PE's based @@ -274,7 +273,6 @@ int pnv_pci_cfg_write(struct pci_dn *pdn, int where, int size, u32 val); extern struct iommu_table *pnv_pci_table_alloc(int nid); -extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np); extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np); extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); @@ -282,7 +280,6 @@ extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn); extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev); -extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq); extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, __u64 window_size, __u32 levels); extern int pnv_eeh_post_init(void); diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 9e1a25398f98..8f41ef364fc6 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -28,7 +28,7 @@ #include <asm/xive.h> #include <asm/opal.h> #include <asm/runlatch.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/dbell.h> #include <asm/kvm_ppc.h> #include <asm/ppc-opcode.h> @@ -36,6 +36,7 @@ #include <asm/kexec.h> #include <asm/reg.h> #include <asm/powernv.h> +#include <asm/systemcfg.h> #include "powernv.h" @@ -136,7 +137,9 @@ static int pnv_smp_cpu_disable(void) * the generic fixup_irqs. --BenH. */ set_cpu_online(cpu, false); - vdso_data->processorCount--; +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG + systemcfg->processorCount--; +#endif if (cpu == boot_cpuid) boot_cpuid = cpumask_any(cpu_online_mask); if (xive_enabled()) @@ -434,7 +437,7 @@ void __init pnv_smp_init(void) smp_ops = &pnv_smp_ops; #ifdef CONFIG_HOTPLUG_CPU -#ifdef CONFIG_KEXEC_CORE +#ifdef CONFIG_CRASH_DUMP crash_wake_offline = 1; #endif #endif diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c index 67c8c4b2d8b1..157d9a8134e4 100644 --- a/arch/powerpc/platforms/powernv/ultravisor.c +++ b/arch/powerpc/platforms/powernv/ultravisor.c @@ -32,15 +32,15 @@ int __init early_init_dt_scan_ultravisor(unsigned long node, const char *uname, static struct memcons *uv_memcons; static ssize_t uv_msglog_read(struct file *file, struct kobject *kobj, - struct bin_attribute *bin_attr, char *to, + const struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { return memcons_copy(uv_memcons, to, pos, count); } -static struct bin_attribute uv_msglog_attr = { +static struct bin_attribute uv_msglog_attr __ro_after_init = { .attr = {.name = "msglog", .mode = 0400}, - .read = uv_msglog_read + .read_new = uv_msglog_read }; static int __init uv_init(void) diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index b664838008c1..5147df3a18ac 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -1059,7 +1059,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, } } else { /* - * Interrupt hanlder or fault window setup failed. Means + * Interrupt handler or fault window setup failed. Means * NX can not generate fault for page fault. So not * opening for user space tx window. */ diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index 878bc160246e..22d91ac424dd 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/reboot.h> #include <linux/rcuwait.h> +#include <linux/string_choices.h> #include <asm/firmware.h> #include <asm/lv1call.h> @@ -178,7 +179,7 @@ fail_malloc: return result; } -static int __ref ps3_setup_uhc_device( +static int __init ps3_setup_uhc_device( const struct ps3_repository_device *repo, enum ps3_match_id match_id, enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type) { @@ -724,7 +725,7 @@ static irqreturn_t ps3_notification_interrupt(int irq, void *data) static int ps3_notification_read_write(struct ps3_notification_device *dev, u64 lpar, int write) { - const char *op = write ? "write" : "read"; + const char *op = str_write_read(write); unsigned long flags; int res; @@ -770,49 +771,51 @@ static struct task_struct *probe_task; static int ps3_probe_thread(void *data) { - struct ps3_notification_device dev; + struct { + struct ps3_notification_device dev; + u8 buf[512]; + } *local; + struct ps3_notify_cmd *notify_cmd; + struct ps3_notify_event *notify_event; int res; unsigned int irq; u64 lpar; - void *buf; - struct ps3_notify_cmd *notify_cmd; - struct ps3_notify_event *notify_event; pr_debug(" -> %s:%u: kthread started\n", __func__, __LINE__); - buf = kzalloc(512, GFP_KERNEL); - if (!buf) + local = kzalloc(sizeof(*local), GFP_KERNEL); + if (!local) return -ENOMEM; - lpar = ps3_mm_phys_to_lpar(__pa(buf)); - notify_cmd = buf; - notify_event = buf; + lpar = ps3_mm_phys_to_lpar(__pa(&local->buf)); + notify_cmd = (struct ps3_notify_cmd *)&local->buf; + notify_event = (struct ps3_notify_event *)&local->buf; /* dummy system bus device */ - dev.sbd.bus_id = (u64)data; - dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID; - dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID; + local->dev.sbd.bus_id = (u64)data; + local->dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID; + local->dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID; - res = lv1_open_device(dev.sbd.bus_id, dev.sbd.dev_id, 0); + res = lv1_open_device(local->dev.sbd.bus_id, local->dev.sbd.dev_id, 0); if (res) { pr_err("%s:%u: lv1_open_device failed %s\n", __func__, __LINE__, ps3_result(res)); goto fail_free; } - res = ps3_sb_event_receive_port_setup(&dev.sbd, PS3_BINDING_CPU_ANY, - &irq); + res = ps3_sb_event_receive_port_setup(&local->dev.sbd, + PS3_BINDING_CPU_ANY, &irq); if (res) { pr_err("%s:%u: ps3_sb_event_receive_port_setup failed %d\n", __func__, __LINE__, res); goto fail_close_device; } - spin_lock_init(&dev.lock); - rcuwait_init(&dev.wait); + spin_lock_init(&local->dev.lock); + rcuwait_init(&local->dev.wait); res = request_irq(irq, ps3_notification_interrupt, 0, - "ps3_notification", &dev); + "ps3_notification", &local->dev); if (res) { pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__, res); @@ -823,7 +826,7 @@ static int ps3_probe_thread(void *data) notify_cmd->operation_code = 0; /* must be zero */ notify_cmd->event_mask = 1UL << notify_region_probe; - res = ps3_notification_read_write(&dev, lpar, 1); + res = ps3_notification_read_write(&local->dev, lpar, 1); if (res) goto fail_free_irq; @@ -834,36 +837,37 @@ static int ps3_probe_thread(void *data) memset(notify_event, 0, sizeof(*notify_event)); - res = ps3_notification_read_write(&dev, lpar, 0); + res = ps3_notification_read_write(&local->dev, lpar, 0); if (res) break; pr_debug("%s:%u: notify event type 0x%llx bus id %llu dev id %llu" " type %llu port %llu\n", __func__, __LINE__, - notify_event->event_type, notify_event->bus_id, - notify_event->dev_id, notify_event->dev_type, - notify_event->dev_port); + notify_event->event_type, notify_event->bus_id, + notify_event->dev_id, notify_event->dev_type, + notify_event->dev_port); if (notify_event->event_type != notify_region_probe || - notify_event->bus_id != dev.sbd.bus_id) { + notify_event->bus_id != local->dev.sbd.bus_id) { pr_warn("%s:%u: bad notify_event: event %llu, dev_id %llu, dev_type %llu\n", __func__, __LINE__, notify_event->event_type, notify_event->dev_id, notify_event->dev_type); continue; } - ps3_find_and_add_device(dev.sbd.bus_id, notify_event->dev_id); + ps3_find_and_add_device(local->dev.sbd.bus_id, + notify_event->dev_id); } while (!kthread_should_stop()); fail_free_irq: - free_irq(irq, &dev); + free_irq(irq, &local->dev); fail_sb_event_receive_port_destroy: - ps3_sb_event_receive_port_destroy(&dev.sbd, irq); + ps3_sb_event_receive_port_destroy(&local->dev.sbd, irq); fail_close_device: - lv1_close_device(dev.sbd.bus_id, dev.sbd.dev_id); + lv1_close_device(local->dev.sbd.bus_id, local->dev.sbd.dev_id); fail_free: - kfree(buf); + kfree(local); probe_task = NULL; diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 49871427f599..a4ad4b49eef7 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c @@ -378,9 +378,9 @@ int ps3_send_event_locally(unsigned int virq) /** * ps3_sb_event_receive_port_setup - Setup a system bus event receive port. + * @dev: The system bus device instance. * @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be * serviced on. - * @dev: The system bus device instance. * @virq: The assigned Linux virq. * * An event irq represents a virtual device interrupt. The interrupt_id @@ -743,8 +743,8 @@ void __init ps3_init_IRQ(void) unsigned cpu; struct irq_domain *host; - host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL); - irq_set_default_host(host); + host = irq_domain_create_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL); + irq_set_default_domain(host); for_each_possible_cpu(cpu) { struct ps3_private *pd = &per_cpu(ps3_private, cpu); diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c index 1abe33fbe529..b8c030eab138 100644 --- a/arch/powerpc/platforms/ps3/repository.c +++ b/arch/powerpc/platforms/ps3/repository.c @@ -940,7 +940,7 @@ int __init ps3_repository_read_vuart_sysmgr_port(unsigned int *port) /** * ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area. - * address: lpar address of cell_ext_os_area + * @lpar_addr: lpar address of cell_ext_os_area * @size: size of cell_ext_os_area */ diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 5144f11359f7..150c09b58ae8 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -115,10 +115,7 @@ static void __init prealloc(struct ps3_prealloc *p) if (!p->size) return; - p->address = memblock_alloc(p->size, p->align); - if (!p->address) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, p->size, p->align); + p->address = memblock_alloc_or_panic(p->size, p->align); printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, p->address); diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index 4a2520ec6d7f..61b37c9400b2 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c @@ -190,10 +190,10 @@ static void spu_unmap(struct spu *spu) static int __init setup_areas(struct spu *spu) { struct table {char* name; unsigned long addr; unsigned long size;}; - unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO)); spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr, - sizeof(struct spe_shadow), shadow_flags); + sizeof(struct spe_shadow), + pgprot_noncached_wc(PAGE_KERNEL_RO)); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap; diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index d6b5f5ecd515..afbaabf182d0 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -333,10 +333,10 @@ int ps3_mmio_region_init(struct ps3_system_bus_device *dev, EXPORT_SYMBOL_GPL(ps3_mmio_region_init); static int ps3_system_bus_match(struct device *_dev, - struct device_driver *_drv) + const struct device_driver *_drv) { int result; - struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv); + const struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv); struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); if (!dev->match_sub_id) @@ -453,10 +453,9 @@ static ssize_t modalias_show(struct device *_dev, struct device_attribute *a, char *buf) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); - int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id, - dev->match_sub_id); - return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; + return sysfs_emit(buf, "ps3:%d:%d\n", dev->match_id, + dev->match_sub_id); } static DEVICE_ATTR_RO(modalias); @@ -695,7 +694,7 @@ static const struct dma_map_ops ps3_sb_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; @@ -709,7 +708,7 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index afc0f6a61337..fa3c2fff082a 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -23,6 +23,7 @@ config PPC_PSERIES select FORCE_SMP select SWIOTLB select ARCH_SUPPORTS_PER_VMA_LOCK + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU default y config PARAVIRT @@ -128,6 +129,15 @@ config CMM will be reused for other LPARs. The interface allows firmware to balance memory across many LPARs. +config HTMDUMP + tristate "PowerVM data dumper" + depends on PPC_PSERIES && DEBUG_FS + default m + help + Select this option, if you want to enable the kernel debugfs + interface to dump the Hardware Trace Macro (HTM) function data + in the LPAR. + config HV_PERF_CTRS bool "Hypervisor supplied PMU events (24x7 & GPCI)" default y @@ -140,6 +150,20 @@ config HV_PERF_CTRS If unsure, select Y. +config VPA_PMU + tristate "VPA PMU events" + depends on KVM_BOOK3S_64_HV && HV_PERF_CTRS + help + Enable access to the VPA PMU counters via perf. This enables + code that support measurement for KVM on PowerVM(KoP) feature. + PAPR hypervisor has introduced three new counters in the VPA area + of LPAR CPUs for KVM L2 guest observability. Two for context switches + from host to guest and vice versa, and one counter for getting + the total time spent inside the KVM guest. This config enables code + that access these software counters via perf. + + If unsure, Select N. + config IBMVIO depends on PPC_PSERIES bool diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index f936962a2946..57222678bb3f 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -1,10 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG obj-y := lpar.o hvCall.o nvram.o reconfig.o \ of_helpers.o rtas-work-area.o papr-sysparm.o \ - papr-vpd.o \ + papr-rtas-common.o papr-vpd.o papr-indices.o \ + papr-platform-dump.o papr-phy-attest.o \ setup.o iommu.o event_sources.o ras.o \ firmware.o power.o dlpar.o mobility.o rng.o \ pci.o pci_dlpar.o eeh_pseries.o msi.o \ @@ -20,6 +20,7 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o obj-$(CONFIG_HVCS) += hvcserver.o obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o obj-$(CONFIG_CMM) += cmm.o +obj-$(CONFIG_HTMDUMP) += htmdump.o obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o obj-$(CONFIG_LPARCFG) += lparcfg.o obj-$(CONFIG_IBMVIO) += vio.o diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 47f8eabd1bee..213aa26dc8b3 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -23,6 +23,7 @@ #include <linux/uaccess.h> #include <asm/rtas.h> #include <asm/rtas-work-area.h> +#include <asm/prom.h> static struct workqueue_struct *pseries_hp_wq; @@ -250,11 +251,8 @@ int dlpar_detach_node(struct device_node *dn) struct device_node *child; int rc; - child = of_get_next_child(dn, NULL); - while (child) { + for_each_child_of_node(dn, child) dlpar_detach_node(child); - child = of_get_next_child(dn, child); - } rc = of_detach_node(dn); if (rc) @@ -264,6 +262,20 @@ int dlpar_detach_node(struct device_node *dn) return 0; } +static int dlpar_changeset_attach_cc_nodes(struct of_changeset *ocs, + struct device_node *dn) +{ + int rc; + + rc = of_changeset_attach_node(ocs, dn); + + if (!rc && dn->child) + rc = dlpar_changeset_attach_cc_nodes(ocs, dn->child); + if (!rc && dn->sibling) + rc = dlpar_changeset_attach_cc_nodes(ocs, dn->sibling); + + return rc; +} #define DR_ENTITY_SENSE 9003 #define DR_ENTITY_PRESENT 1 @@ -330,27 +342,206 @@ int dlpar_unisolate_drc(u32 drc_index) return 0; } -int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) +static struct device_node * +get_device_node_with_drc_index(u32 index) +{ + struct device_node *np = NULL; + u32 node_index; + int rc; + + for_each_node_with_property(np, "ibm,my-drc-index") { + rc = of_property_read_u32(np, "ibm,my-drc-index", + &node_index); + if (rc) { + pr_err("%s: %pOF: of_property_read_u32 %s: %d\n", + __func__, np, "ibm,my-drc-index", rc); + of_node_put(np); + return NULL; + } + + if (index == node_index) + break; + } + + return np; +} + +static struct device_node * +get_device_node_with_drc_info(u32 index) +{ + struct device_node *np = NULL; + struct of_drc_info drc; + struct property *info; + const __be32 *value; + u32 node_index; + int i, j, count; + + for_each_node_with_property(np, "ibm,drc-info") { + info = of_find_property(np, "ibm,drc-info", NULL); + if (info == NULL) { + /* XXX can this happen? */ + of_node_put(np); + return NULL; + } + value = of_prop_next_u32(info, NULL, &count); + if (value == NULL) + continue; + value++; + for (i = 0; i < count; i++) { + if (of_read_drc_info_cell(&info, &value, &drc)) + break; + if (index > drc.last_drc_index) + continue; + node_index = drc.drc_index_start; + for (j = 0; j < drc.num_sequential_elems; j++) { + if (index == node_index) + return np; + node_index += drc.sequential_inc; + } + } + } + + return NULL; +} + +static int dlpar_hp_dt_add(u32 index) +{ + struct device_node *np, *nodes; + struct of_changeset ocs; + int rc; + + /* + * Do not add device node(s) if already exists in the + * device tree. + */ + np = get_device_node_with_drc_index(index); + if (np) { + pr_err("%s: Adding device node for index (%d), but " + "already exists in the device tree\n", + __func__, index); + rc = -EINVAL; + goto out; + } + + np = get_device_node_with_drc_info(index); + + if (!np) + return -EIO; + + /* Next, configure the connector. */ + nodes = dlpar_configure_connector(cpu_to_be32(index), np); + if (!nodes) { + rc = -EIO; + goto out; + } + + /* + * Add the new nodes from dlpar_configure_connector() onto + * the device-tree. + */ + of_changeset_init(&ocs); + rc = dlpar_changeset_attach_cc_nodes(&ocs, nodes); + + if (!rc) + rc = of_changeset_apply(&ocs); + else + dlpar_free_cc_nodes(nodes); + + of_changeset_destroy(&ocs); + +out: + of_node_put(np); + return rc; +} + +static int changeset_detach_node_recursive(struct of_changeset *ocs, + struct device_node *node) { + struct device_node *child; int rc; - /* pseries error logs are in BE format, convert to cpu type */ - switch (hp_elog->id_type) { - case PSERIES_HP_ELOG_ID_DRC_COUNT: - hp_elog->_drc_u.drc_count = - be32_to_cpu(hp_elog->_drc_u.drc_count); + for_each_child_of_node(node, child) { + rc = changeset_detach_node_recursive(ocs, child); + if (rc) { + of_node_put(child); + return rc; + } + } + + return of_changeset_detach_node(ocs, node); +} + +static int dlpar_hp_dt_remove(u32 drc_index) +{ + struct device_node *np; + struct of_changeset ocs; + u32 index; + int rc = 0; + + /* + * Prune all nodes with a matching index. + */ + of_changeset_init(&ocs); + + for_each_node_with_property(np, "ibm,my-drc-index") { + rc = of_property_read_u32(np, "ibm,my-drc-index", &index); + if (rc) { + pr_err("%s: %pOF: of_property_read_u32 %s: %d\n", + __func__, np, "ibm,my-drc-index", rc); + of_node_put(np); + goto out; + } + + if (index == drc_index) { + rc = changeset_detach_node_recursive(&ocs, np); + if (rc) { + of_node_put(np); + goto out; + } + } + } + + rc = of_changeset_apply(&ocs); + +out: + of_changeset_destroy(&ocs); + return rc; +} + +static int dlpar_hp_dt(struct pseries_hp_errorlog *phpe) +{ + u32 drc_index; + int rc; + + if (phpe->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX) + return -EINVAL; + + drc_index = be32_to_cpu(phpe->_drc_u.drc_index); + + lock_device_hotplug(); + + switch (phpe->action) { + case PSERIES_HP_ELOG_ACTION_ADD: + rc = dlpar_hp_dt_add(drc_index); break; - case PSERIES_HP_ELOG_ID_DRC_INDEX: - hp_elog->_drc_u.drc_index = - be32_to_cpu(hp_elog->_drc_u.drc_index); + case PSERIES_HP_ELOG_ACTION_REMOVE: + rc = dlpar_hp_dt_remove(drc_index); + break; + default: + pr_err("Invalid action (%d) specified\n", phpe->action); + rc = -EINVAL; break; - case PSERIES_HP_ELOG_ID_DRC_IC: - hp_elog->_drc_u.ic.count = - be32_to_cpu(hp_elog->_drc_u.ic.count); - hp_elog->_drc_u.ic.index = - be32_to_cpu(hp_elog->_drc_u.ic.index); } + unlock_device_hotplug(); + + return rc; +} + +int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) +{ + int rc; + switch (hp_elog->resource) { case PSERIES_HP_ELOG_RESOURCE_MEM: rc = dlpar_memory(hp_elog); @@ -361,6 +552,9 @@ int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_RESOURCE_PMEM: rc = dlpar_hp_pmem(hp_elog); break; + case PSERIES_HP_ELOG_RESOURCE_DT: + rc = dlpar_hp_dt(hp_elog); + break; default: pr_warn_ratelimited("Invalid resource (%d) specified\n", @@ -413,6 +607,8 @@ static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog) hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; } else if (sysfs_streq(arg, "cpu")) { hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU; + } else if (sysfs_streq(arg, "dt")) { + hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_DT; } else { pr_err("Invalid resource specified.\n"); return -EINVAL; @@ -554,7 +750,7 @@ dlpar_store_out: static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr, char *buf) { - return sprintf(buf, "%s\n", "memory,cpu"); + return sprintf(buf, "%s\n", "memory,cpu,dt"); } static CLASS_ATTR_RW(dlpar); diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 3f1cdccebc9c..f293588b8c7b 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -191,7 +191,7 @@ static int dtl_enable(struct dtl *dtl) return -EBUSY; /* ensure there are no other conflicting dtl users */ - if (!read_trylock(&dtl_access_lock)) + if (!down_read_trylock(&dtl_access_lock)) return -EBUSY; n_entries = dtl_buf_entries; @@ -199,7 +199,7 @@ static int dtl_enable(struct dtl *dtl) if (!buf) { printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", __func__, dtl->cpu); - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); return -ENOMEM; } @@ -217,7 +217,7 @@ static int dtl_enable(struct dtl *dtl) spin_unlock(&dtl->lock); if (rc) { - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); kmem_cache_free(dtl_cache, buf); } @@ -232,7 +232,7 @@ static void dtl_disable(struct dtl *dtl) dtl->buf = NULL; dtl->buf_entries = 0; spin_unlock(&dtl->lock); - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); } /* file interface */ @@ -325,7 +325,6 @@ static const struct file_operations dtl_fops = { .open = dtl_file_open, .release = dtl_file_release, .read = dtl_file_read, - .llseek = no_llseek, }; static struct dentry *dtl_dir; diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index b1ae0c0d1187..b12ef382fec7 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -580,8 +580,10 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) switch(rets[0]) { case 0: - result = EEH_STATE_MMIO_ACTIVE | - EEH_STATE_DMA_ACTIVE; + result = EEH_STATE_MMIO_ACTIVE | + EEH_STATE_DMA_ACTIVE | + EEH_STATE_MMIO_ENABLED | + EEH_STATE_DMA_ENABLED; break; case 1: result = EEH_STATE_RESET_ACTIVE | @@ -784,6 +786,43 @@ static int pseries_notify_resume(struct eeh_dev *edev) } #endif +/** + * pseries_eeh_err_inject - Inject specified error to the indicated PE + * @pe: the indicated PE + * @type: error type + * @func: specific error type + * @addr: address + * @mask: address mask + * The routine is called to inject specified error, which is + * determined by @type and @func, to the indicated PE + */ +static int pseries_eeh_err_inject(struct eeh_pe *pe, int type, int func, + unsigned long addr, unsigned long mask) +{ + struct eeh_dev *pdev; + + /* Check on PCI error type */ + if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64) + return -EINVAL; + + switch (func) { + case EEH_ERR_FUNC_LD_MEM_ADDR: + case EEH_ERR_FUNC_LD_MEM_DATA: + case EEH_ERR_FUNC_ST_MEM_ADDR: + case EEH_ERR_FUNC_ST_MEM_DATA: + /* injects a MMIO error for all pdev's belonging to PE */ + pci_lock_rescan_remove(); + list_for_each_entry(pdev, &pe->edevs, entry) + eeh_pe_inject_mmio_error(pdev->pdev); + pci_unlock_rescan_remove(); + break; + default: + return -ERANGE; + } + + return 0; +} + static struct eeh_ops pseries_eeh_ops = { .name = "pseries", .probe = pseries_eeh_probe, @@ -792,7 +831,7 @@ static struct eeh_ops pseries_eeh_ops = { .reset = pseries_eeh_reset, .get_log = pseries_eeh_get_log, .configure_bridge = pseries_eeh_configure_bridge, - .err_inject = NULL, + .err_inject = pseries_eeh_err_inject, .read_config = pseries_eeh_read_config, .write_config = pseries_eeh_write_config, .next_error = NULL, diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index e62835a12d73..bc6926dbf148 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -33,6 +33,7 @@ #include <asm/xive.h> #include <asm/plpar_wrappers.h> #include <asm/topology.h> +#include <asm/systemcfg.h> #include "pseries.h" @@ -83,7 +84,9 @@ static int pseries_cpu_disable(void) int cpu = smp_processor_id(); set_cpu_online(cpu, false); - vdso_data->processorCount--; +#ifdef CONFIG_PPC64_PROC_SYSTEMCFG + systemcfg->processorCount--; +#endif /*fix boot_cpuid here*/ if (cpu == boot_cpuid) @@ -757,7 +760,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) u32 drc_index; int rc; - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); lock_device_hotplug(); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 3fe3ddb30c04..38dc4f7c9296 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -817,16 +817,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_ACTION_ADD: switch (hp_elog->id_type) { case PSERIES_HP_ELOG_ID_DRC_COUNT: - count = hp_elog->_drc_u.drc_count; + count = be32_to_cpu(hp_elog->_drc_u.drc_count); rc = dlpar_memory_add_by_count(count); break; case PSERIES_HP_ELOG_ID_DRC_INDEX: - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); rc = dlpar_memory_add_by_index(drc_index); break; case PSERIES_HP_ELOG_ID_DRC_IC: - count = hp_elog->_drc_u.ic.count; - drc_index = hp_elog->_drc_u.ic.index; + count = be32_to_cpu(hp_elog->_drc_u.ic.count); + drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); rc = dlpar_memory_add_by_ic(count, drc_index); break; default: @@ -838,16 +838,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_ACTION_REMOVE: switch (hp_elog->id_type) { case PSERIES_HP_ELOG_ID_DRC_COUNT: - count = hp_elog->_drc_u.drc_count; + count = be32_to_cpu(hp_elog->_drc_u.drc_count); rc = dlpar_memory_remove_by_count(count); break; case PSERIES_HP_ELOG_ID_DRC_INDEX: - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); rc = dlpar_memory_remove_by_index(drc_index); break; case PSERIES_HP_ELOG_ID_DRC_IC: - count = hp_elog->_drc_u.ic.count; - drc_index = hp_elog->_drc_u.ic.index; + count = be32_to_cpu(hp_elog->_drc_u.ic.count); + drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); rc = dlpar_memory_remove_by_ic(count, drc_index); break; default: diff --git a/arch/powerpc/platforms/pseries/htmdump.c b/arch/powerpc/platforms/pseries/htmdump.c new file mode 100644 index 000000000000..742ec52c9d4d --- /dev/null +++ b/arch/powerpc/platforms/pseries/htmdump.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) IBM Corporation, 2024 + */ + +#define pr_fmt(fmt) "htmdump: " fmt + +#include <linux/debugfs.h> +#include <linux/module.h> +#include <asm/io.h> +#include <asm/machdep.h> +#include <asm/plpar_wrappers.h> +#include <asm/kvm_guest.h> + +static void *htm_buf; +static void *htm_status_buf; +static void *htm_info_buf; +static void *htm_caps_buf; +static u32 nodeindex; +static u32 nodalchipindex; +static u32 coreindexonchip; +static u32 htmtype; +static u32 htmconfigure; +static u32 htmstart; +static u32 htmsetup; +static u64 htmflags; + +static struct dentry *htmdump_debugfs_dir; +#define HTM_ENABLE 1 +#define HTM_DISABLE 0 +#define HTM_NOWRAP 1 +#define HTM_WRAP 0 + +/* + * Check the return code for H_HTM hcall. + * Return non-zero value (1) if either H_PARTIAL or H_SUCCESS + * is returned. For other return codes: + * Return zero if H_NOT_AVAILABLE. + * Return -EBUSY if hcall return busy. + * Return -EINVAL if any parameter or operation is not valid. + * Return -EPERM if HTM Virtualization Engine Technology code + * is not applied. + * Return -EIO if the HTM state is not valid. + */ +static ssize_t htm_return_check(long rc) +{ + switch (rc) { + case H_SUCCESS: + /* H_PARTIAL for the case where all available data can't be + * returned due to buffer size constraint. + */ + case H_PARTIAL: + break; + /* H_NOT_AVAILABLE indicates reading from an offset outside the range, + * i.e. past end of file. + */ + case H_NOT_AVAILABLE: + return 0; + case H_BUSY: + case H_LONG_BUSY_ORDER_1_MSEC: + case H_LONG_BUSY_ORDER_10_MSEC: + case H_LONG_BUSY_ORDER_100_MSEC: + case H_LONG_BUSY_ORDER_1_SEC: + case H_LONG_BUSY_ORDER_10_SEC: + case H_LONG_BUSY_ORDER_100_SEC: + return -EBUSY; + case H_PARAMETER: + case H_P2: + case H_P3: + case H_P4: + case H_P5: + case H_P6: + return -EINVAL; + case H_STATE: + return -EIO; + case H_AUTHORITY: + return -EPERM; + } + + /* + * Return 1 for H_SUCCESS/H_PARTIAL + */ + return 1; +} + +static ssize_t htmdump_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_buf = filp->private_data; + unsigned long page, read_size, available; + loff_t offset; + long rc, ret; + + page = ALIGN_DOWN(*ppos, PAGE_SIZE); + offset = (*ppos) % PAGE_SIZE; + + /* + * Invoke H_HTM call with: + * - operation as htm dump (H_HTM_OP_DUMP_DATA) + * - last three values are address, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DUMP_DATA, virt_to_phys(htm_buf), + PAGE_SIZE, page); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_DATA, returning %ld\n", ret); + return ret; + } + + available = PAGE_SIZE; + read_size = min(count, available); + *ppos += read_size; + return simple_read_from_buffer(ubuf, count, &offset, htm_buf, available); +} + +static const struct file_operations htmdump_fops = { + .llseek = NULL, + .read = htmdump_read, + .open = simple_open, +}; + +static int htmconfigure_set(void *data, u64 val) +{ + long rc, ret; + unsigned long param1 = -1, param2 = -1; + + /* + * value as 1 : configure HTM. + * value as 0 : deconfigure HTM. Return -EINVAL for + * other values. + */ + if (val == HTM_ENABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm configure (H_HTM_OP_CONFIGURE) + * - If htmflags is set, param1 and param2 will be -1 + * which is an indicator to use default htm mode reg mask + * and htm mode reg value. + * - last three values are unused, hence set to zero + */ + if (!htmflags) { + param1 = 0; + param2 = 0; + } + + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_CONFIGURE, param1, param2, 0); + } else if (val == HTM_DISABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm deconfigure (H_HTM_OP_DECONFIGURE) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DECONFIGURE, 0, 0, 0); + } else + return -EINVAL; + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed, returning %ld\n", ret); + return ret; + } + + /* Set htmconfigure if operation succeeds */ + htmconfigure = val; + + return 0; +} + +static int htmconfigure_get(void *data, u64 *val) +{ + *val = htmconfigure; + return 0; +} + +static int htmstart_set(void *data, u64 val) +{ + long rc, ret; + + /* + * value as 1: start HTM + * value as 0: stop HTM + * Return -EINVAL for other values. + */ + if (val == HTM_ENABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm start (H_HTM_OP_START) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_START, 0, 0, 0); + + } else if (val == HTM_DISABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm stop (H_HTM_OP_STOP) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_STOP, 0, 0, 0); + } else + return -EINVAL; + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed, returning %ld\n", ret); + return ret; + } + + /* Set htmstart if H_HTM_OP_START/H_HTM_OP_STOP operation succeeds */ + htmstart = val; + + return 0; +} + +static int htmstart_get(void *data, u64 *val) +{ + *val = htmstart; + return 0; +} + +static ssize_t htmstatus_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_status_buf = filp->private_data; + long rc, ret; + u64 *num_entries; + u64 to_copy; + int htmstatus_flag; + + /* + * Invoke H_HTM call with: + * - operation as htm status (H_HTM_OP_STATUS) + * - last three values as addr, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_STATUS, virt_to_phys(htm_status_buf), + PAGE_SIZE, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_STATUS, returning %ld\n", ret); + return ret; + } + + /* + * HTM status buffer, start of buffer + 0x10 gives the + * number of HTM entries in the buffer. Each nest htm status + * entry is 0x6 bytes where each core htm status entry is + * 0x8 bytes. + * So total count to copy is: + * 32 bytes (for first 7 fields) + (number of HTM entries * entry size) + */ + num_entries = htm_status_buf + 0x10; + if (htmtype == 0x2) + htmstatus_flag = 0x8; + else + htmstatus_flag = 0x6; + to_copy = 32 + (be64_to_cpu(*num_entries) * htmstatus_flag); + return simple_read_from_buffer(ubuf, count, ppos, htm_status_buf, to_copy); +} + +static const struct file_operations htmstatus_fops = { + .llseek = NULL, + .read = htmstatus_read, + .open = simple_open, +}; + +static ssize_t htminfo_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_info_buf = filp->private_data; + long rc, ret; + u64 *num_entries; + u64 to_copy; + + /* + * Invoke H_HTM call with: + * - operation as htm status (H_HTM_OP_STATUS) + * - last three values as addr, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DUMP_SYSPROC_CONF, virt_to_phys(htm_info_buf), + PAGE_SIZE, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_SYSPROC_CONF, returning %ld\n", ret); + return ret; + } + + /* + * HTM status buffer, start of buffer + 0x10 gives the + * number of HTM entries in the buffer. Each entry of processor + * is 16 bytes. + * + * So total count to copy is: + * 32 bytes (for first 5 fields) + (number of HTM entries * entry size) + */ + num_entries = htm_info_buf + 0x10; + to_copy = 32 + (be64_to_cpu(*num_entries) * 16); + return simple_read_from_buffer(ubuf, count, ppos, htm_info_buf, to_copy); +} + +static ssize_t htmcaps_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_caps_buf = filp->private_data; + long rc, ret; + + /* + * Invoke H_HTM call with: + * - operation as htm capabilities (H_HTM_OP_CAPABILITIES) + * - last three values as addr, size (0x80 for Capabilities Output Buffer + * and zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_CAPABILITIES, virt_to_phys(htm_caps_buf), + 0x80, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_CAPABILITIES, returning %ld\n", ret); + return ret; + } + + return simple_read_from_buffer(ubuf, count, ppos, htm_caps_buf, 0x80); +} + +static const struct file_operations htminfo_fops = { + .llseek = NULL, + .read = htminfo_read, + .open = simple_open, +}; + +static const struct file_operations htmcaps_fops = { + .llseek = NULL, + .read = htmcaps_read, + .open = simple_open, +}; + +static int htmsetup_set(void *data, u64 val) +{ + long rc, ret; + + /* + * Input value: HTM buffer size in the power of 2 + * example: hex value 0x21 ( decimal: 33 ) is for + * 8GB + * Invoke H_HTM call with: + * - operation as htm start (H_HTM_OP_SETUP) + * - parameter 1 set to input value. + * - last two values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_SETUP, val, 0, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_SETUP, returning %ld\n", ret); + return ret; + } + + /* Set htmsetup if H_HTM_OP_SETUP operation succeeds */ + htmsetup = val; + + return 0; +} + +static int htmsetup_get(void *data, u64 *val) +{ + *val = htmsetup; + return 0; +} + +static int htmflags_set(void *data, u64 val) +{ + /* + * Input value: + * Currently supported flag value is to enable/disable + * HTM buffer wrap. wrap is used along with "configure" + * to prevent HTM buffer from wrapping. + * Writing 1 will set noWrap while configuring HTM + */ + if (val == HTM_NOWRAP) + htmflags = H_HTM_FLAGS_NOWRAP; + else if (val == HTM_WRAP) + htmflags = 0; + else + return -EINVAL; + + return 0; +} + +static int htmflags_get(void *data, u64 *val) +{ + *val = htmflags; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(htmconfigure_fops, htmconfigure_get, htmconfigure_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmstart_fops, htmstart_get, htmstart_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmsetup_fops, htmsetup_get, htmsetup_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmflags_fops, htmflags_get, htmflags_set, "%llu\n"); + +static int htmdump_init_debugfs(void) +{ + htm_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_buf) { + pr_err("Failed to allocate htmdump buf\n"); + return -ENOMEM; + } + + htmdump_debugfs_dir = debugfs_create_dir("htmdump", + arch_debugfs_dir); + + debugfs_create_u32("nodeindex", 0600, + htmdump_debugfs_dir, &nodeindex); + debugfs_create_u32("nodalchipindex", 0600, + htmdump_debugfs_dir, &nodalchipindex); + debugfs_create_u32("coreindexonchip", 0600, + htmdump_debugfs_dir, &coreindexonchip); + debugfs_create_u32("htmtype", 0600, + htmdump_debugfs_dir, &htmtype); + debugfs_create_file("trace", 0400, htmdump_debugfs_dir, htm_buf, &htmdump_fops); + + /* + * Debugfs interface files to control HTM operations: + */ + debugfs_create_file("htmconfigure", 0600, htmdump_debugfs_dir, NULL, &htmconfigure_fops); + debugfs_create_file("htmstart", 0600, htmdump_debugfs_dir, NULL, &htmstart_fops); + debugfs_create_file("htmsetup", 0600, htmdump_debugfs_dir, NULL, &htmsetup_fops); + debugfs_create_file("htmflags", 0600, htmdump_debugfs_dir, NULL, &htmflags_fops); + + /* Debugfs interface file to present status of HTM */ + htm_status_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_status_buf) { + pr_err("Failed to allocate htmstatus buf\n"); + return -ENOMEM; + } + + /* Debugfs interface file to present System Processor Configuration */ + htm_info_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_info_buf) { + pr_err("Failed to allocate htm info buf\n"); + return -ENOMEM; + } + + /* Debugfs interface file to present HTM capabilities */ + htm_caps_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_caps_buf) { + pr_err("Failed to allocate htm caps buf\n"); + return -ENOMEM; + } + + debugfs_create_file("htmstatus", 0400, htmdump_debugfs_dir, htm_status_buf, &htmstatus_fops); + debugfs_create_file("htminfo", 0400, htmdump_debugfs_dir, htm_info_buf, &htminfo_fops); + debugfs_create_file("htmcaps", 0400, htmdump_debugfs_dir, htm_caps_buf, &htmcaps_fops); + + return 0; +} + +static int __init htmdump_init(void) +{ + /* Disable on kvm guest */ + if (is_kvm_guest()) { + pr_info("htmdump not supported inside KVM guest\n"); + return -EOPNOTSUPP; + } + + if (htmdump_init_debugfs()) + return -ENOMEM; + + return 0; +} + +static void __exit htmdump_exit(void) +{ + debugfs_remove_recursive(htmdump_debugfs_dir); + kfree(htm_buf); +} + +module_init(htmdump_init); +module_exit(htmdump_exit); +MODULE_DESCRIPTION("PHYP Hardware Trace Macro (HTM) data dumper"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index b401282727a4..3436b0af795e 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -339,7 +339,7 @@ static struct attribute *ibmbus_bus_attrs[] = { }; ATTRIBUTE_GROUPS(ibmbus_bus); -static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) +static int ibmebus_bus_bus_match(struct device *dev, const struct device_driver *drv) { const struct of_device_id *matches = drv->of_match_table; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index e8c4129697b1..eec333dd2e59 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/crash_dump.h> #include <linux/memory.h> +#include <linux/vmalloc.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/iommu.h> @@ -51,7 +52,8 @@ enum { enum { DDW_EXT_SIZE = 0, DDW_EXT_RESET_DMA_WIN = 1, - DDW_EXT_QUERY_OUT_SIZE = 2 + DDW_EXT_QUERY_OUT_SIZE = 2, + DDW_EXT_LIMITED_ADDR_MODE = 3 }; static struct iommu_table *iommu_pseries_alloc_table(int node) @@ -67,6 +69,10 @@ static struct iommu_table *iommu_pseries_alloc_table(int node) return tbl; } +#ifdef CONFIG_IOMMU_API +static struct iommu_table_group_ops spapr_tce_table_group_ops; +#endif + static struct iommu_table_group *iommu_pseries_alloc_group(int node) { struct iommu_table_group *table_group; @@ -102,7 +108,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group, #endif /* Default DMA window table is at index 0, while DDW at 1. SR-IOV - * adapters only have table on index 1. + * adapters only have table on index 0(if not direct mapped). */ if (table_group->tables[0]) iommu_tce_table_put(table_group->tables[0]); @@ -143,7 +149,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, } -static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) +static void tce_clear_pSeries(struct iommu_table *tbl, long index, long npages) { __be64 *tcep; @@ -162,6 +168,39 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) return be64_to_cpu(*tcep); } +#ifdef CONFIG_IOMMU_API +static long pseries_tce_iommu_userspace_view_alloc(struct iommu_table *tbl) +{ + unsigned long cb = ALIGN(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE); + unsigned long *uas; + + if (tbl->it_indirect_levels) /* Impossible */ + return -EPERM; + + WARN_ON(tbl->it_userspace); + + uas = vzalloc(cb); + if (!uas) + return -ENOMEM; + + tbl->it_userspace = (__be64 *) uas; + + return 0; +} +#endif + +static void tce_iommu_userspace_view_free(struct iommu_table *tbl) +{ + vfree(tbl->it_userspace); + tbl->it_userspace = NULL; +} + +static void tce_free_pSeries(struct iommu_table *tbl) +{ + if (tbl->it_userspace) + tce_iommu_userspace_view_free(tbl); +} + static void tce_free_pSeriesLP(unsigned long liobn, long, long, long); static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); @@ -576,7 +615,7 @@ struct iommu_table_ops iommu_table_lpar_multi_ops; struct iommu_table_ops iommu_table_pseries_ops = { .set = tce_build_pSeries, - .clear = tce_free_pSeries, + .clear = tce_clear_pSeries, .get = tce_get_pseries }; @@ -685,17 +724,47 @@ static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned return rc; } + +static __be64 *tce_useraddr_pSeriesLP(struct iommu_table *tbl, long index, + bool __always_unused alloc) +{ + return tbl->it_userspace ? &tbl->it_userspace[index - tbl->it_offset] : NULL; +} #endif struct iommu_table_ops iommu_table_lpar_multi_ops = { .set = tce_buildmulti_pSeriesLP, #ifdef CONFIG_IOMMU_API .xchg_no_kill = tce_exchange_pseries, + .useraddrptr = tce_useraddr_pSeriesLP, #endif .clear = tce_freemulti_pSeriesLP, - .get = tce_get_pSeriesLP + .get = tce_get_pSeriesLP, + .free = tce_free_pSeries }; +#ifdef CONFIG_IOMMU_API +/* + * When the DMA window properties might have been removed, + * the parent node has the table_group setup on it. + */ +static struct device_node *pci_dma_find_parent_node(struct pci_dev *dev, + struct iommu_table_group *table_group) +{ + struct device_node *dn = pci_device_to_OF_node(dev); + struct pci_dn *rpdn; + + for (; dn && PCI_DN(dn); dn = dn->parent) { + rpdn = PCI_DN(dn); + + if (table_group == rpdn->table_group) + return dn; + } + + return NULL; +} +#endif + /* * Find nearest ibm,dma-window (default DMA window) or direct DMA window or * dynamic 64bit DMA window, walking up the device tree. @@ -786,8 +855,16 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) * parent bus. During reboot, there will be ibm,dma-window property to * define DMA window. For kdump, there will at least be default window or DDW * or both. + * There is an exception to the above. In case the PE goes into frozen + * state, firmware may not provide ibm,dma-window property at the time + * of LPAR boot up. */ + if (!pdn) { + pr_debug(" no ibm,dma-window property !\n"); + return; + } + ppci = PCI_DN(pdn); pr_debug(" parent is %pOF, iommu_table: 0x%p\n", @@ -804,13 +881,6 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) be32_to_cpu(prop.tce_shift), NULL, &iommu_table_lpar_multi_ops); - /* Only for normal boot with default window. Doesn't matter even - * if we set these with DDW which is 64bit during kdump, since - * these will not be used during kdump. - */ - ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base); - ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); - if (!iommu_init_table(tbl, ppci->phb->node, 0, 0)) panic("Failed to initialize iommu table"); @@ -909,7 +979,7 @@ static void __remove_dma_window(struct device_node *np, u32 *ddw_avail, u64 liob } static void remove_dma_window(struct device_node *np, u32 *ddw_avail, - struct property *win) + struct property *win, bool cleanup) { struct dynamic_dma_window_prop *dwp; u64 liobn; @@ -917,11 +987,44 @@ static void remove_dma_window(struct device_node *np, u32 *ddw_avail, dwp = win->value; liobn = (u64)be32_to_cpu(dwp->liobn); - clean_dma_window(np, dwp); + if (cleanup) + clean_dma_window(np, dwp); __remove_dma_window(np, ddw_avail, liobn); } -static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_name) +static void copy_property(struct device_node *pdn, const char *from, const char *to) +{ + struct property *src, *dst; + + src = of_find_property(pdn, from, NULL); + if (!src) + return; + + dst = kzalloc(sizeof(*dst), GFP_KERNEL); + if (!dst) + return; + + dst->name = kstrdup(to, GFP_KERNEL); + dst->value = kmemdup(src->value, src->length, GFP_KERNEL); + dst->length = src->length; + if (!dst->name || !dst->value) + return; + + if (of_add_property(pdn, dst)) { + pr_err("Unable to add DMA window property for %pOF", pdn); + goto free_prop; + } + + return; + +free_prop: + kfree(dst->name); + kfree(dst->value); + kfree(dst); +} + +static int remove_dma_window_named(struct device_node *np, bool remove_prop, const char *win_name, + bool cleanup) { struct property *win; u32 ddw_avail[DDW_APPLICABLE_SIZE]; @@ -936,13 +1039,20 @@ static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_ if (ret) return 0; - if (win->length >= sizeof(struct dynamic_dma_window_prop)) - remove_dma_window(np, ddw_avail, win); + remove_dma_window(np, ddw_avail, win, cleanup); if (!remove_prop) return 0; + /* Default window property if removed is lost as reset-pe doesn't restore it. + * Though FDT has a copy of it, the DLPAR hotplugged devices will not have a + * node on FDT until next reboot. So, back it up. + */ + if ((strcmp(win_name, "ibm,dma-window") == 0) && + !of_find_property(np, "ibm,dma-window-saved", NULL)) + copy_property(np, win_name, "ibm,dma-window-saved"); + ret = of_remove_property(np, win); if (ret) pr_warn("%pOF: failed to remove DMA window property: %d\n", @@ -1000,7 +1110,7 @@ static void find_existing_ddw_windows_named(const char *name) for_each_node_with_property(pdn, name) { dma64 = of_get_property(pdn, name, &len); if (!dma64 || len < sizeof(*dma64)) { - remove_ddw(pdn, true, name); + remove_dma_window_named(pdn, true, name, true); continue; } @@ -1175,17 +1285,13 @@ static LIST_HEAD(failed_ddw_pdn_list); static phys_addr_t ddw_memory_hotplug_max(void) { - resource_size_t max_addr = memory_hotplug_max(); - struct device_node *memory; - - for_each_node_by_type(memory, "memory") { - struct resource res; - - if (of_address_to_resource(memory, 0, &res)) - continue; + resource_size_t max_addr; - max_addr = max_t(resource_size_t, max_addr, res.end + 1); - } +#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) + max_addr = hot_add_drconf_memory_max(); +#else + max_addr = memblock_end_of_DRAM(); +#endif return max_addr; } @@ -1222,6 +1328,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn) ret); } +/* + * Platforms support placing PHB in limited address mode starting with LoPAR + * level 2.13 implement. In this mode, the DMA address returned by DDW is over + * 4GB but, less than 64-bits. This benefits IO adapters that don't support + * 64-bits for DMA addresses. + */ +static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn) +{ + int ret; + u32 cfg_addr, reset_dma_win, las_supported; + u64 buid; + struct device_node *dn; + struct pci_dn *pdn; + + ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win); + if (ret) + goto out; + + ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported); + + /* Limited Address Space extension available on the platform but DDW in + * limited addressing mode not supported + */ + if (!ret && !las_supported) + ret = -EPROTO; + + if (ret) { + dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret); + goto out; + } + + dn = pci_device_to_OF_node(dev); + pdn = PCI_DN(dn); + buid = pdn->phb->buid; + cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8); + + ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid), + BUID_LO(buid), 1); + if (ret) + dev_info(&dev->dev, + "ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ", + reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid), + ret); + +out: + return ret; +} + /* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */ static int iommu_get_page_shift(u32 query_page_size) { @@ -1289,14 +1443,14 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64 * * returns true if can map all pages (direct mapping), false otherwise.. */ -static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) +static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask) { int len = 0, ret; int max_ram_len = order_base_2(ddw_memory_hotplug_max()); struct ddw_query_response query; struct ddw_create_response create; int page_shift; - u64 win_addr; + u64 win_addr, dynamic_offset = 0; const char *win_name; struct device_node *dn; u32 ddw_avail[DDW_APPLICABLE_SIZE]; @@ -1304,9 +1458,13 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) struct property *win64; struct failed_ddw_pdn *fpdn; bool default_win_removed = false, direct_mapping = false; + bool dynamic_mapping = false; bool pmem_present; struct pci_dn *pci = PCI_DN(pdn); struct property *default_win = NULL; + bool limited_addr_req = false, limited_addr_enabled = false; + int dev_max_ddw; + int ddw_sz; dn = of_find_node_by_type(NULL, "ibm,pmemory"); pmem_present = dn != NULL; @@ -1333,7 +1491,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) * the ibm,ddw-applicable property holds the tokens for: * ibm,query-pe-dma-window * ibm,create-pe-dma-window - * ibm,remove-pe-dma-window * for the given node in that order. * the property is actually in the parent, not the PE */ @@ -1353,6 +1510,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (ret != 0) goto out_failed; + /* DMA Limited Addressing required? This is when the driver has + * requested to create DDW but supports mask which is less than 64-bits + */ + limited_addr_req = (dma_mask != DMA_BIT_MASK(64)); + + /* place the PHB in Limited Addressing mode */ + if (limited_addr_req) { + if (limited_dma_window(dev, pdn)) + goto out_failed; + + /* PHB is in Limited address mode */ + limited_addr_enabled = true; + } + /* * If there is no window available, remove the default DMA window, * if it's present. This will make all the resources available to the @@ -1377,7 +1548,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (reset_win_ext) goto out_failed; - remove_dma_window(pdn, ddw_avail, default_win); + remove_dma_window(pdn, ddw_avail, default_win, true); default_win_removed = true; /* Query again, to check if the window is available */ @@ -1399,6 +1570,14 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_failed; } + /* Maximum DMA window size that the device can address (in log2) */ + dev_max_ddw = fls64(dma_mask); + + /* If the device DMA mask is less than 64-bits, make sure the DMA window + * size is not bigger than what the device can access + */ + ddw_sz = min(order_base_2(query.largest_available_block << page_shift), + dev_max_ddw); /* * The "ibm,pmemory" can appear anywhere in the address space. @@ -1408,30 +1587,56 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) */ len = max_ram_len; if (pmem_present) { - if (query.largest_available_block >= - (1ULL << (MAX_PHYSMEM_BITS - page_shift))) + if (ddw_sz >= MAX_PHYSMEM_BITS) len = MAX_PHYSMEM_BITS; else dev_info(&dev->dev, "Skipping ibm,pmemory"); } /* check if the available block * number of ptes will map everything */ - if (query.largest_available_block < (1ULL << (len - page_shift))) { + if (ddw_sz < len) { dev_dbg(&dev->dev, "can't map partition max 0x%llx with %llu %llu-sized pages\n", 1ULL << len, query.largest_available_block, 1ULL << page_shift); - len = order_base_2(query.largest_available_block << page_shift); - win_name = DMA64_PROPNAME; + len = ddw_sz; + dynamic_mapping = true; } else { direct_mapping = !default_win_removed || (len == MAX_PHYSMEM_BITS) || (!pmem_present && (len == max_ram_len)); - win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME; + + /* DDW is big enough to direct map RAM. If there is vPMEM, check + * if enough space is left in DDW where we can dynamically + * allocate TCEs for vPMEM. For now, this Hybrid sharing of DDW + * is only for SR-IOV devices. + */ + if (default_win_removed && pmem_present && !direct_mapping) { + /* DDW is big enough to be split */ + if ((1ULL << ddw_sz) >= + MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) { + + direct_mapping = true; + + /* offset of the Dynamic part of DDW */ + dynamic_offset = 1ULL << max_ram_len; + } + + /* DDW will at least have dynamic allocation */ + dynamic_mapping = true; + + /* create max size DDW possible */ + len = ddw_sz; + } } + /* Even if the DDW is split into both direct mapped RAM and dynamically + * mapped vPMEM, the DDW property in OF will be marked as Direct. + */ + win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME; + ret = create_ddw(dev, ddw_avail, &create, page_shift, len); if (ret != 0) goto out_failed; @@ -1459,11 +1664,11 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (!window) goto out_del_prop; - if (direct_mapping) { - window->direct = true; + window->direct = direct_mapping; + if (direct_mapping) { /* DDW maps the whole partition, so enable direct DMA mapping */ - ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, + ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT, win64->value, tce_setrange_multi_pSeriesLP_walk); if (ret) { dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n", @@ -1473,12 +1678,18 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) clean_dma_window(pdn, win64->value); goto out_del_list; } - } else { + if (default_win_removed) { + iommu_tce_table_put(pci->table_group->tables[0]); + pci->table_group->tables[0] = NULL; + set_iommu_table_base(&dev->dev, NULL); + } + } + + if (dynamic_mapping) { struct iommu_table *newtbl; int i; unsigned long start = 0, end = 0; - - window->direct = false; + u64 dynamic_addr, dynamic_len; for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) { const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM; @@ -1498,20 +1709,27 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_del_list; } - iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr, - 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops); - iommu_init_table(newtbl, pci->phb->node, start, end); + /* If the DDW is split between directly mapped RAM and Dynamic + * mapped for TCES, offset into the DDW where the dynamic part + * begins. + */ + dynamic_addr = win_addr + dynamic_offset; + dynamic_len = (1UL << len) - dynamic_offset; + iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, + dynamic_addr, dynamic_len, page_shift, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(newtbl, pci->phb->node, + start >> page_shift, end >> page_shift); - pci->table_group->tables[1] = newtbl; + pci->table_group->tables[default_win_removed ? 0 : 1] = newtbl; set_iommu_table_base(&dev->dev, newtbl); } if (default_win_removed) { - iommu_tce_table_put(pci->table_group->tables[0]); - pci->table_group->tables[0] = NULL; - /* default_win is valid here because default_win_removed == true */ + if (!of_find_property(pdn, "ibm,dma-window-saved", NULL)) + copy_property(pdn, "ibm,dma-window", "ibm,dma-window-saved"); of_remove_property(pdn, default_win); dev_info(&dev->dev, "Removed default DMA window for %pOF\n", pdn); } @@ -1539,7 +1757,7 @@ out_remove_win: __remove_dma_window(pdn, ddw_avail, create.liobn); out_failed: - if (default_win_removed) + if (default_win_removed || limited_addr_enabled) reset_dma_window(dev, pdn); fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); @@ -1551,17 +1769,84 @@ out_failed: out_unlock: mutex_unlock(&dma_win_init_mutex); - /* - * If we have persistent memory and the window size is only as big - * as RAM, then we failed to create a window to cover persistent - * memory and need to set the DMA limit. + /* If we have persistent memory and the window size is not big enough + * to directly map both RAM and vPMEM, then we need to set DMA limit. */ - if (pmem_present && direct_mapping && len == max_ram_len) - dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len); + if (pmem_present && direct_mapping && len != MAX_PHYSMEM_BITS) + dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + + (1ULL << max_ram_len); + + dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n", + limited_addr_req, limited_addr_enabled, direct_mapping); return direct_mapping; } +static __u64 query_page_size_to_mask(u32 query_page_size) +{ + const long shift[] = { + (SZ_4K), (SZ_64K), (SZ_16M), + (SZ_32M), (SZ_64M), (SZ_128M), + (SZ_256M), (SZ_16G), (SZ_2M) + }; + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(shift); i++) { + if (query_page_size & (1 << i)) + ret |= shift[i]; + } + + return ret; +} + +static void spapr_tce_init_table_group(struct pci_dev *pdev, + struct device_node *pdn, + struct dynamic_dma_window_prop prop) +{ + struct iommu_table_group *table_group = PCI_DN(pdn)->table_group; + u32 ddw_avail[DDW_APPLICABLE_SIZE]; + + struct ddw_query_response query; + int ret; + + /* Only for normal boot with default window. Doesn't matter during + * kdump, since these will not be used during kdump. + */ + if (is_kdump_kernel()) + return; + + if (table_group->max_dynamic_windows_supported != 0) + return; /* already initialized */ + + table_group->tce32_start = be64_to_cpu(prop.dma_base); + table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); + + if (!of_find_property(pdn, "ibm,dma-window", NULL)) + dev_err(&pdev->dev, "default dma window missing!\n"); + + ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", + &ddw_avail[0], DDW_APPLICABLE_SIZE); + if (ret) { + table_group->max_dynamic_windows_supported = -1; + return; + } + + ret = query_ddw(pdev, ddw_avail, &query, pdn); + if (ret) { + dev_err(&pdev->dev, "%s: query_ddw failed\n", __func__); + table_group->max_dynamic_windows_supported = -1; + return; + } + + if (query.windows_available == 0) + table_group->max_dynamic_windows_supported = 1; + else + table_group->max_dynamic_windows_supported = IOMMU_TABLE_GROUP_MAX_TABLES; + + table_group->max_levels = 1; + table_group->pgsizes |= query_page_size_to_mask(query.page_size); +} + static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) { struct device_node *pdn, *dn; @@ -1601,13 +1886,6 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) be32_to_cpu(prop.tce_shift), NULL, &iommu_table_lpar_multi_ops); - /* Only for normal boot with default window. Doesn't matter even - * if we set these with DDW which is 64bit during kdump, since - * these will not be used during kdump. - */ - pci->table_group->tce32_start = be64_to_cpu(prop.dma_base); - pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); - iommu_init_table(tbl, pci->phb->node, 0, 0); iommu_register_group(pci->table_group, pci_domain_nr(pci->phb->bus), 0); @@ -1616,6 +1894,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pr_debug(" found DMA window, table: %p\n", pci->table_group); } + spapr_tce_init_table_group(dev, pdn, prop); + set_iommu_table_base(&dev->dev, pci->table_group->tables[0]); iommu_add_device(pci->table_group, &dev->dev); } @@ -1624,8 +1904,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) { struct device_node *dn = pci_device_to_OF_node(pdev), *pdn; - /* only attempt to use a new window if 64-bit DMA is requested */ - if (dma_mask < DMA_BIT_MASK(64)) + /* For DDW, DMA mask should be more than 32-bits. For mask more then + * 32-bits but less then 64-bits, DMA addressing is supported in + * Limited Addressing mode. + */ + if (dma_mask <= DMA_BIT_MASK(32)) return false; dev_dbg(&pdev->dev, "node is %pOF\n", dn); @@ -1638,11 +1921,501 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) */ pdn = pci_dma_find(dn, NULL); if (pdn && PCI_DN(pdn)) - return enable_ddw(pdev, pdn); + return enable_ddw(pdev, pdn, dma_mask); return false; } +#ifdef CONFIG_IOMMU_API +/* + * A simple iommu_table_group_ops which only allows reusing the existing + * iommu_table. This handles VFIO for POWER7 or the nested KVM. + * The ops does not allow creating windows and only allows reusing the existing + * one if it matches table_group->tce32_start/tce32_size/page_shift. + */ +static unsigned long spapr_tce_get_table_size(__u32 page_shift, + __u64 window_size, __u32 levels) +{ + unsigned long size; + + if (levels > 1) + return ~0U; + size = window_size >> (page_shift - 3); + return size; +} + +static struct pci_dev *iommu_group_get_first_pci_dev(struct iommu_group *group) +{ + struct pci_dev *pdev = NULL; + int ret; + + /* No IOMMU group ? */ + if (!group) + return NULL; + + ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table); + if (!ret || !pdev) + return NULL; + return pdev; +} + +static void restore_default_dma_window(struct pci_dev *pdev, struct device_node *pdn) +{ + reset_dma_window(pdev, pdn); + copy_property(pdn, "ibm,dma-window-saved", "ibm,dma-window"); +} + +static long remove_dynamic_dma_windows(struct pci_dev *pdev, struct device_node *pdn) +{ + struct pci_dn *pci = PCI_DN(pdn); + struct dma_win *window; + bool direct_mapping; + int len; + + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, &direct_mapping)) { + remove_dma_window_named(pdn, true, direct_mapping ? + DIRECT64_PROPNAME : DMA64_PROPNAME, true); + if (!direct_mapping) { + WARN_ON(!pci->table_group->tables[0] && !pci->table_group->tables[1]); + + if (pci->table_group->tables[1]) { + iommu_tce_table_put(pci->table_group->tables[1]); + pci->table_group->tables[1] = NULL; + } else if (pci->table_group->tables[0]) { + /* Default window was removed and only the DDW exists */ + iommu_tce_table_put(pci->table_group->tables[0]); + pci->table_group->tables[0] = NULL; + } + } + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { + if (window->device == pdn) { + list_del(&window->list); + kfree(window); + break; + } + } + spin_unlock(&dma_win_list_lock); + } + + return 0; +} + +static long pseries_setup_default_iommu_config(struct iommu_table_group *table_group, + struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + const __be32 *default_prop; + long liobn, offset, size; + struct device_node *pdn; + struct iommu_table *tbl; + struct pci_dn *pci; + + pdn = pci_dma_find_parent_node(pdev, table_group); + if (!pdn || !PCI_DN(pdn)) { + dev_warn(&pdev->dev, "No table_group configured for the node %pOF\n", pdn); + return -1; + } + pci = PCI_DN(pdn); + + /* The default window is restored if not present already on removal of DDW. + * However, if used by VFIO SPAPR sub driver, the user's order of removal of + * windows might have been different to not leading to auto restoration, + * suppose the DDW was removed first followed by the default one. + * So, restore the default window with reset-pe-dma call explicitly. + */ + restore_default_dma_window(pdev, pdn); + + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + of_parse_dma_window(pdn, default_prop, &liobn, &offset, &size); + tbl = iommu_pseries_alloc_table(pci->phb->node); + if (!tbl) { + dev_err(&pdev->dev, "couldn't create new IOMMU table\n"); + return -1; + } + + iommu_table_setparms_common(tbl, pci->phb->bus->number, liobn, offset, + size, IOMMU_PAGE_SHIFT_4K, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, 0, 0); + + pci->table_group->tables[0] = tbl; + set_iommu_table_base(&pdev->dev, tbl); + + return 0; +} + +static bool is_default_window_request(struct iommu_table_group *table_group, __u32 page_shift, + __u64 window_size) +{ + if ((window_size <= table_group->tce32_size) && + (page_shift == IOMMU_PAGE_SHIFT_4K)) + return true; + + return false; +} + +static long spapr_tce_create_table(struct iommu_table_group *table_group, int num, + __u32 page_shift, __u64 window_size, __u32 levels, + struct iommu_table **ptbl) +{ + struct pci_dev *pdev = iommu_group_get_first_pci_dev(table_group->group); + u32 ddw_avail[DDW_APPLICABLE_SIZE]; + struct ddw_create_response create; + unsigned long liobn, offset, size; + unsigned long start = 0, end = 0; + struct ddw_query_response query; + const __be32 *default_prop; + struct failed_ddw_pdn *fpdn; + unsigned int window_shift; + struct device_node *pdn; + struct iommu_table *tbl; + struct dma_win *window; + struct property *win64; + struct pci_dn *pci; + u64 win_addr; + int len, i; + long ret; + + if (!is_power_of_2(window_size) || levels > 1) + return -EINVAL; + + window_shift = order_base_2(window_size); + + mutex_lock(&dma_win_init_mutex); + + ret = -ENODEV; + + pdn = pci_dma_find_parent_node(pdev, table_group); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + goto out_failed; + } + pci = PCI_DN(pdn); + + /* If the enable DDW failed for the pdn, dont retry! */ + list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) { + if (fpdn->pdn == pdn) { + dev_info(&pdev->dev, "%pOF in failed DDW device list\n", pdn); + goto out_unlock; + } + } + + tbl = iommu_pseries_alloc_table(pci->phb->node); + if (!tbl) { + dev_dbg(&pdev->dev, "couldn't create new IOMMU table\n"); + goto out_unlock; + } + + if (num == 0) { + bool direct_mapping; + /* The request is not for default window? Ensure there is no DDW window already */ + if (!is_default_window_request(table_group, page_shift, window_size)) { + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, + &direct_mapping)) { + dev_warn(&pdev->dev, "%pOF: 64-bit window already present.", pdn); + ret = -EPERM; + goto out_unlock; + } + } else { + /* Request is for Default window, ensure there is no DDW if there is a + * need to reset. reset-pe otherwise removes the DDW also + */ + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + if (!default_prop) { + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, + &direct_mapping)) { + dev_warn(&pdev->dev, "%pOF: Attempt to create window#0 when 64-bit window is present. Preventing the attempt as that would destroy the 64-bit window", + pdn); + ret = -EPERM; + goto out_unlock; + } + + restore_default_dma_window(pdev, pdn); + + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + of_parse_dma_window(pdn, default_prop, &liobn, &offset, &size); + /* Limit the default window size to window_size */ + iommu_table_setparms_common(tbl, pci->phb->bus->number, liobn, + offset, 1UL << window_shift, + IOMMU_PAGE_SHIFT_4K, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, + start >> IOMMU_PAGE_SHIFT_4K, + end >> IOMMU_PAGE_SHIFT_4K); + + table_group->tables[0] = tbl; + + mutex_unlock(&dma_win_init_mutex); + + goto exit; + } + } + } + + ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", + &ddw_avail[0], DDW_APPLICABLE_SIZE); + if (ret) { + dev_info(&pdev->dev, "ibm,ddw-applicable not found\n"); + goto out_failed; + } + ret = -ENODEV; + + pr_err("%s: Calling query %pOF\n", __func__, pdn); + ret = query_ddw(pdev, ddw_avail, &query, pdn); + if (ret) + goto out_failed; + ret = -ENODEV; + + len = window_shift; + if (query.largest_available_block < (1ULL << (len - page_shift))) { + dev_dbg(&pdev->dev, "can't map window 0x%llx with %llu %llu-sized pages\n", + 1ULL << len, query.largest_available_block, + 1ULL << page_shift); + ret = -EINVAL; /* Retry with smaller window size */ + goto out_unlock; + } + + if (create_ddw(pdev, ddw_avail, &create, page_shift, len)) { + pr_err("%s: Create ddw failed %pOF\n", __func__, pdn); + goto out_failed; + } + + win_addr = ((u64)create.addr_hi << 32) | create.addr_lo; + win64 = ddw_property_create(DMA64_PROPNAME, create.liobn, win_addr, page_shift, len); + if (!win64) + goto remove_window; + + ret = of_add_property(pdn, win64); + if (ret) { + dev_err(&pdev->dev, "unable to add DMA window property for %pOF: %ld", pdn, ret); + goto free_property; + } + ret = -ENODEV; + + window = ddw_list_new_entry(pdn, win64->value); + if (!window) + goto remove_property; + + window->direct = false; + + for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) { + const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM; + + /* Look for MMIO32 */ + if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) { + start = pci->phb->mem_resources[i].start; + end = pci->phb->mem_resources[i].end; + break; + } + } + + /* New table for using DDW instead of the default DMA window */ + iommu_table_setparms_common(tbl, pci->phb->bus->number, create.liobn, win_addr, + 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, start >> page_shift, end >> page_shift); + + pci->table_group->tables[num] = tbl; + set_iommu_table_base(&pdev->dev, tbl); + pdev->dev.archdata.dma_offset = win_addr; + + spin_lock(&dma_win_list_lock); + list_add(&window->list, &dma_win_list); + spin_unlock(&dma_win_list_lock); + + mutex_unlock(&dma_win_init_mutex); + + goto exit; + +remove_property: + of_remove_property(pdn, win64); +free_property: + kfree(win64->name); + kfree(win64->value); + kfree(win64); +remove_window: + __remove_dma_window(pdn, ddw_avail, create.liobn); + +out_failed: + fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); + if (!fpdn) + goto out_unlock; + fpdn->pdn = pdn; + list_add(&fpdn->list, &failed_ddw_pdn_list); + +out_unlock: + mutex_unlock(&dma_win_init_mutex); + + return ret; +exit: + /* Allocate the userspace view */ + pseries_tce_iommu_userspace_view_alloc(tbl); + tbl->it_allocated_size = spapr_tce_get_table_size(page_shift, window_size, levels); + + *ptbl = iommu_tce_table_get(tbl); + + return 0; +} + +static bool is_default_window_table(struct iommu_table_group *table_group, struct iommu_table *tbl) +{ + if (((tbl->it_size << tbl->it_page_shift) <= table_group->tce32_size) && + (tbl->it_page_shift == IOMMU_PAGE_SHIFT_4K)) + return true; + + return false; +} + +static long spapr_tce_set_window(struct iommu_table_group *table_group, + int num, struct iommu_table *tbl) +{ + return tbl == table_group->tables[num] ? 0 : -EPERM; +} + +static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num) +{ + struct pci_dev *pdev = iommu_group_get_first_pci_dev(table_group->group); + struct device_node *dn = pci_device_to_OF_node(pdev), *pdn; + struct iommu_table *tbl = table_group->tables[num]; + struct failed_ddw_pdn *fpdn; + struct dma_win *window; + const char *win_name; + int ret = -ENODEV; + + if (!tbl) /* The table was never created OR window was never opened */ + return 0; + + mutex_lock(&dma_win_init_mutex); + + if ((num == 0) && is_default_window_table(table_group, tbl)) + win_name = "ibm,dma-window"; + else + win_name = DMA64_PROPNAME; + + pdn = pci_dma_find(dn, NULL); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + goto out_failed; + } + + /* Dont clear the TCEs, User should have done it */ + if (remove_dma_window_named(pdn, true, win_name, false)) { + pr_err("%s: The existing DDW removal failed for node %pOF\n", __func__, pdn); + goto out_failed; /* Could not remove it either! */ + } + + if (strcmp(win_name, DMA64_PROPNAME) == 0) { + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { + if (window->device == pdn) { + list_del(&window->list); + kfree(window); + break; + } + } + spin_unlock(&dma_win_list_lock); + } + + iommu_tce_table_put(table_group->tables[num]); + table_group->tables[num] = NULL; + + ret = 0; + + goto out_unlock; + +out_failed: + fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); + if (!fpdn) + goto out_unlock; + fpdn->pdn = pdn; + list_add(&fpdn->list, &failed_ddw_pdn_list); + +out_unlock: + mutex_unlock(&dma_win_init_mutex); + + return ret; +} + +static long spapr_tce_take_ownership(struct iommu_table_group *table_group, struct device *dev) +{ + struct iommu_table *tbl = table_group->tables[0]; + struct pci_dev *pdev = to_pci_dev(dev); + struct device_node *dn = pci_device_to_OF_node(pdev); + struct device_node *pdn; + + /* SRIOV VFs using direct map by the host driver OR multifunction devices + * where the ownership was taken on the attempt by the first function + */ + if (!tbl && (table_group->max_dynamic_windows_supported != 1)) + return 0; + + mutex_lock(&dma_win_init_mutex); + + pdn = pci_dma_find(dn, NULL); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + mutex_unlock(&dma_win_init_mutex); + return -1; + } + + /* + * Though rtas call reset-pe removes the DDW, it doesn't clear the entries on the table + * if there are any. In case of direct map, the entries will be left over, which + * is fine for PEs with 2 DMA windows where the second window is created with create-pe + * at which point the table is cleared. However, on VFs having only one DMA window, the + * default window would end up seeing the entries left over from the direct map done + * on the second window. So, remove the ddw explicitly so that clean_dma_window() + * cleans up the entries if any. + */ + if (remove_dynamic_dma_windows(pdev, pdn)) { + dev_warn(&pdev->dev, "The existing DDW removal failed for node %pOF\n", pdn); + mutex_unlock(&dma_win_init_mutex); + return -1; + } + + /* The table_group->tables[0] is not null now, it must be the default window + * Remove it, let the userspace create it as it needs. + */ + if (table_group->tables[0]) { + remove_dma_window_named(pdn, true, "ibm,dma-window", true); + iommu_tce_table_put(tbl); + table_group->tables[0] = NULL; + } + set_iommu_table_base(dev, NULL); + + mutex_unlock(&dma_win_init_mutex); + + return 0; +} + +static void spapr_tce_release_ownership(struct iommu_table_group *table_group, struct device *dev) +{ + struct iommu_table *tbl = table_group->tables[0]; + + if (tbl) { /* Default window already restored */ + return; + } + + mutex_lock(&dma_win_init_mutex); + + /* Restore the default window */ + pseries_setup_default_iommu_config(table_group, dev); + + mutex_unlock(&dma_win_init_mutex); + + return; +} + +static struct iommu_table_group_ops spapr_tce_table_group_ops = { + .get_table_size = spapr_tce_get_table_size, + .create_table = spapr_tce_create_table, + .set_window = spapr_tce_set_window, + .unset_window = spapr_tce_unset_window, + .take_ownership = spapr_tce_take_ownership, + .release_ownership = spapr_tce_release_ownership, +}; +#endif + static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -1650,11 +2423,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, struct memory_notify *arg = data; int ret = 0; + /* This notifier can get called when onlining persistent memory as well. + * TCEs are not pre-mapped for persistent memory. Persistent memory will + * always be above ddw_memory_hotplug_max() + */ + switch (action) { case MEM_GOING_ONLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - if (window->direct) { + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); } @@ -1666,7 +2445,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, case MEM_OFFLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - if (window->direct) { + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); } @@ -1704,8 +2484,8 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti * we have to remove the property when releasing * the device node. */ - if (remove_ddw(np, false, DIRECT64_PROPNAME)) - remove_ddw(np, false, DMA64_PROPNAME); + if (remove_dma_window_named(np, false, DIRECT64_PROPNAME, true)) + remove_dma_window_named(np, false, DMA64_PROPNAME, true); if (pci && pci->table_group) iommu_pseries_free_group(pci->table_group, diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 096d09ed89f6..431be156ca9b 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary) } else xics_kexec_teardown_cpu(secondary); } - -void pseries_machine_kexec(struct kimage *image) -{ - if (firmware_has_feature(FW_FEATURE_SET_MODE)) - pseries_disable_reloc_on_exc(); - - default_machine_kexec(image); -} diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 4e9916bb03d7..6a415febc53b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/jump_label.h> #include <linux/delay.h> +#include <linux/seq_file.h> #include <linux/stop_machine.h> #include <linux/spinlock.h> #include <linux/cpuhotplug.h> @@ -169,7 +170,7 @@ struct vcpu_dispatch_data { */ #define NR_CPUS_H NR_CPUS -DEFINE_RWLOCK(dtl_access_lock); +DECLARE_RWSEM(dtl_access_lock); static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data); static DEFINE_PER_CPU(u64, dtl_entry_ridx); static DEFINE_PER_CPU(struct dtl_worker, dtl_workers); @@ -463,7 +464,7 @@ static int dtl_worker_enable(unsigned long *time_limit) { int rc = 0, state; - if (!write_trylock(&dtl_access_lock)) { + if (!down_write_trylock(&dtl_access_lock)) { rc = -EBUSY; goto out; } @@ -479,7 +480,7 @@ static int dtl_worker_enable(unsigned long *time_limit) pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n"); free_dtl_buffers(time_limit); reset_global_dtl_mask(); - write_unlock(&dtl_access_lock); + up_write(&dtl_access_lock); rc = -EINVAL; goto out; } @@ -494,7 +495,7 @@ static void dtl_worker_disable(unsigned long *time_limit) cpuhp_remove_state(dtl_worker_state); free_dtl_buffers(time_limit); reset_global_dtl_mask(); - write_unlock(&dtl_access_lock); + up_write(&dtl_access_lock); } static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, @@ -1886,10 +1887,10 @@ out: * h_get_mpp * H_GET_MPP hcall returns info in 7 parms */ -int h_get_mpp(struct hvcall_mpp_data *mpp_data) +long h_get_mpp(struct hvcall_mpp_data *mpp_data) { - int rc; - unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; + long rc; rc = plpar_hcall9(H_GET_MPP, retbuf); diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index f73c4d1c26af..cc22924f159f 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -29,7 +29,6 @@ #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/time.h> -#include <asm/vdso_datapage.h> #include <asm/vio.h> #include <asm/mmu.h> #include <asm/machdep.h> @@ -113,8 +112,8 @@ struct hvcall_ppp_data { */ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data) { - unsigned long rc; - unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; + long rc; rc = plpar_hcall9(H_GET_PPP, retbuf); @@ -170,20 +169,24 @@ out: kfree(buf); } -static unsigned h_pic(unsigned long *pool_idle_time, - unsigned long *num_procs) +static long h_pic(unsigned long *pool_idle_time, + unsigned long *num_procs) { - unsigned long rc; - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = {0}; rc = plpar_hcall(H_PIC, retbuf); - *pool_idle_time = retbuf[0]; - *num_procs = retbuf[1]; + if (pool_idle_time) + *pool_idle_time = retbuf[0]; + if (num_procs) + *num_procs = retbuf[1]; return rc; } +unsigned long boot_pool_idle_time; + /* * parse_ppp_data * Parse out the data returned from h_get_ppp and h_pic @@ -193,7 +196,7 @@ static void parse_ppp_data(struct seq_file *m) struct hvcall_ppp_data ppp_data; struct device_node *root; const __be32 *perf_level; - int rc; + long rc; rc = h_get_ppp(&ppp_data); if (rc) @@ -215,9 +218,15 @@ static void parse_ppp_data(struct seq_file *m) seq_printf(m, "pool_capacity=%d\n", ppp_data.active_procs_in_pool * 100); - h_pic(&pool_idle_time, &pool_procs); - seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time); - seq_printf(m, "pool_num_procs=%ld\n", pool_procs); + /* In case h_pic call is not successful, this would result in + * APP values being wrong in tools like lparstat. + */ + + if (h_pic(&pool_idle_time, &pool_procs) == H_SUCCESS) { + seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time); + seq_printf(m, "pool_num_procs=%ld\n", pool_procs); + seq_printf(m, "boot_pool_idle_time=%ld\n", boot_pool_idle_time); + } } seq_printf(m, "unallocated_capacity_weight=%d\n", @@ -361,8 +370,8 @@ static int read_dt_lpar_name(struct seq_file *m) static void read_lpar_name(struct seq_file *m) { - if (read_rtas_lpar_name(m) && read_dt_lpar_name(m)) - pr_err_once("Error can't get the LPAR name"); + if (read_rtas_lpar_name(m)) + read_dt_lpar_name(m); } #define SPLPAR_MAXLENGTH 1026*(sizeof(char)) @@ -520,7 +529,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL); if (lrdrp == NULL) { - partition_potential_processors = vdso_data->processorCount; + partition_potential_processors = num_possible_cpus(); } else { partition_potential_processors = be32_to_cpup(lrdrp + 4); } @@ -543,7 +552,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) } else { /* non SPLPAR case */ seq_printf(m, "system_active_processors=%d\n", - partition_potential_processors); + partition_active_processors); seq_printf(m, "system_potential_processors=%d\n", partition_potential_processors); @@ -792,6 +801,7 @@ static const struct proc_ops lparcfg_proc_ops = { static int __init lparcfg_init(void) { umode_t mode = 0444; + long retval; /* Allow writing if we have FW_FEATURE_SPLPAR */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) @@ -801,6 +811,16 @@ static int __init lparcfg_init(void) printk(KERN_ERR "Failed to create powerpc/lparcfg\n"); return -EIO; } + + /* If this call fails, it would result in APP values + * being wrong for since boot reports of lparstat + */ + retval = h_pic(&boot_pool_idle_time, NULL); + + if (retval != H_SUCCESS) + pr_debug("H_PIC failed during lparcfg init retval: %ld\n", + retval); + return 0; } machine_device_initcall(pseries, lparcfg_init); diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 1798f0f14d58..62bd8e2d5d4c 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -53,7 +53,7 @@ struct update_props_workarea { static unsigned int nmi_wd_lpm_factor = 200; #ifdef CONFIG_SYSCTL -static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = { +static const struct ctl_table nmi_wd_lpm_factor_ctl_table[] = { { .procname = "nmi_wd_lpm_factor", .data = &nmi_wd_lpm_factor, diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6dfb55b52d36..ee1c8c6898a3 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -9,6 +9,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> +#include <linux/seq_file.h> #include <asm/rtas.h> #include <asm/hw_irq.h> @@ -524,7 +525,12 @@ static struct msi_domain_info pseries_msi_domain_info = { static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { - __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + struct pci_dev *dev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + + if (dev->current_state == PCI_D0) + __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + else + get_cached_msi_msg(data->irq, msg); } static struct irq_chip pseries_msi_irq_chip = { @@ -610,7 +616,7 @@ static const struct irq_domain_ops pseries_irq_domain_ops = { static int __pseries_msi_allocate_domains(struct pci_controller *phb, unsigned int count) { - struct irq_domain *parent = irq_get_default_host(); + struct irq_domain *parent = irq_get_default_domain(); phb->fwnode = irq_domain_alloc_named_id_fwnode("pSeries-MSI", phb->global_number); @@ -627,7 +633,7 @@ static int __pseries_msi_allocate_domains(struct pci_controller *phb, return -ENOMEM; } - phb->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(phb->dn), + phb->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(phb->dn), &pseries_msi_domain_info, phb->dev_domain); if (!phb->msi_domain) { diff --git a/arch/powerpc/platforms/pseries/papr-indices.c b/arch/powerpc/platforms/pseries/papr-indices.c new file mode 100644 index 000000000000..3c7545591c45 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-indices.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-indices: " fmt + +#include <linux/build_bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/lockdep.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_helpers.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-indices.h> +#include "papr-rtas-common.h" + +/* + * Function-specific return values for ibm,set-dynamic-indicator and + * ibm,get-dynamic-sensor-state RTAS calls. + * PAPR+ v2.13 7.3.18 and 7.3.19. + */ +#define RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR -3 + +/** + * struct rtas_get_indices_params - Parameters (in and out) for + * ibm,get-indices. + * @is_sensor: In: Caller-provided whether sensor or indicator. + * @indice_type:In: Caller-provided indice (sensor or indicator) token + * @work_area: In: Caller-provided work area buffer for results. + * @next: In: Sequence number. Out: Next sequence number. + * @status: Out: RTAS call status. + */ +struct rtas_get_indices_params { + u8 is_sensor; + u32 indice_type; + struct rtas_work_area *work_area; + u32 next; + s32 status; +}; + +/* + * rtas_ibm_get_indices() - Call ibm,get-indices to fill a work area buffer. + * @params: See &struct rtas_ibm_get_indices_params. + * + * Calls ibm,get-indices until it errors or successfully deposits data + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS + * error statuses to reasonable errno values. + * + * The caller is expected to invoke rtas_ibm_get_indices() multiple times + * to retrieve all indices data for the provided indice type. Only one + * sequence should be in progress at any time; starting a new sequence + * will disrupt any sequence already in progress. Serialization of + * indices retrieval sequences is the responsibility of the caller. + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 otherwise. + */ +static int rtas_ibm_get_indices(struct rtas_get_indices_params *params) +{ + struct rtas_work_area *work_area = params->work_area; + const s32 token = rtas_function_token(RTAS_FN_IBM_GET_INDICES); + u32 rets; + s32 fwrc; + int ret; + + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + lockdep_assert_held(&rtas_ibm_get_indices_lock); + + do { + fwrc = rtas_call(token, 5, 2, &rets, params->is_sensor, + params->indice_type, + rtas_work_area_phys(work_area), + rtas_work_area_size(work_area), + params->next); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_INVALID_PARAMETER: /* Indicator type is not supported */ + ret = -EINVAL; + break; + case RTAS_SEQ_START_OVER: + ret = -EAGAIN; + pr_info_ratelimited("Indices changed during retrieval, retrying\n"); + params->next = 1; + break; + case RTAS_SEQ_MORE_DATA: + params->next = rets; + ret = 0; + break; + case RTAS_SEQ_COMPLETE: + params->next = 0; + ret = 0; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,get-indices status %d\n", fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Internal indices sequence APIs. A sequence is a series of calls to + * ibm,get-indices for a given location code. The sequence ends when + * an error is encountered or all indices for the input has been + * returned. + */ + +/* + * indices_sequence_begin() - Begin a indices retrieval sequence. + * + * Context: May sleep. + */ +static void indices_sequence_begin(struct papr_rtas_sequence *seq) +{ + struct rtas_get_indices_params *param; + + param = (struct rtas_get_indices_params *)seq->params; + /* + * We could allocate the work area before acquiring the + * function lock, but that would allow concurrent requests to + * exhaust the limited work area pool for no benefit. So + * allocate the work area under the lock. + */ + mutex_lock(&rtas_ibm_get_indices_lock); + param->work_area = rtas_work_area_alloc(RTAS_GET_INDICES_BUF_SIZE); + param->next = 1; + param->status = 0; +} + +/* + * indices_sequence_end() - Finalize a indices retrieval sequence. + * + * Releases resources obtained by indices_sequence_begin(). + */ +static void indices_sequence_end(struct papr_rtas_sequence *seq) +{ + struct rtas_get_indices_params *param; + + param = (struct rtas_get_indices_params *)seq->params; + rtas_work_area_free(param->work_area); + mutex_unlock(&rtas_ibm_get_indices_lock); +} + +/* + * Work function to be passed to papr_rtas_blob_generate(). + * + * ibm,get-indices RTAS call fills the work area with the certain + * format but does not return the bytes written in the buffer. So + * instead of kernel parsing this work area to determine the buffer + * length, copy the complete work area (RTAS_GET_INDICES_BUF_SIZE) + * to the blob and let the user space to obtain the data. + * Means RTAS_GET_INDICES_BUF_SIZE data will be returned for each + * read(). + */ + +static const char *indices_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) +{ + struct rtas_get_indices_params *p; + bool init_state; + + p = (struct rtas_get_indices_params *)seq->params; + init_state = (p->next == 1) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) + return NULL; + if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_indices(p))) + return NULL; + + *len = RTAS_GET_INDICES_BUF_SIZE; + return rtas_work_area_raw_buf(p->work_area); +} + +/* + * papr_indices_handle_read - returns indices blob data to the user space + * + * ibm,get-indices RTAS call fills the work area with the certian + * format but does not return the bytes written in the buffer and + * copied RTAS_GET_INDICES_BUF_SIZE data to the blob for each RTAS + * call. So send RTAS_GET_INDICES_BUF_SIZE buffer to the user space + * for each read(). + */ +static ssize_t papr_indices_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + const struct papr_rtas_blob *blob = file->private_data; + + /* we should not instantiate a handle without any data attached. */ + if (!papr_rtas_blob_has_data(blob)) { + pr_err_once("handle without data\n"); + return -EIO; + } + + if (size < RTAS_GET_INDICES_BUF_SIZE) { + pr_err_once("Invalid buffer length %ld, expect %d\n", + size, RTAS_GET_INDICES_BUF_SIZE); + return -EINVAL; + } else if (size > RTAS_GET_INDICES_BUF_SIZE) + size = RTAS_GET_INDICES_BUF_SIZE; + + return simple_read_from_buffer(buf, size, off, blob->data, blob->len); +} + +static const struct file_operations papr_indices_handle_ops = { + .read = papr_indices_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, +}; + +/* + * papr_indices_create_handle() - Create a fd-based handle for reading + * indices data + * @ubuf: Input parameters to RTAS call such as whether sensor or indicator + * and indice type in user memory + * + * Handler for PAPR_INDICES_IOC_GET ioctl command. Validates @ubuf + * and instantiates an immutable indices "blob" for it. The blob is + * attached to a file descriptor for reading by user space. The memory + * backing the blob is freed when the file is released. + * + * The entire requested indices is retrieved by this call and all + * necessary RTAS interactions are performed before returning the fd + * to user space. This keeps the read handler simple and ensures that + * the kernel can prevent interleaving of ibm,get-indices call sequences. + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_indices_create_handle(struct papr_indices_io_block __user *ubuf) +{ + struct papr_rtas_sequence seq = {}; + struct rtas_get_indices_params params = {}; + int fd; + + if (get_user(params.is_sensor, &ubuf->indices.is_sensor)) + return -EFAULT; + + if (get_user(params.indice_type, &ubuf->indices.indice_type)) + return -EFAULT; + + seq = (struct papr_rtas_sequence) { + .begin = indices_sequence_begin, + .end = indices_sequence_end, + .work = indices_sequence_fill_work_area, + }; + + seq.params = ¶ms; + fd = papr_rtas_setup_file_interface(&seq, + &papr_indices_handle_ops, "[papr-indices]"); + + return fd; +} + +/* + * Create work area with the input parameters. This function is used + * for both ibm,set-dynamic-indicator and ibm,get-dynamic-sensor-state + * RTAS Calls. + */ +static struct rtas_work_area * +papr_dynamic_indice_buf_from_user(struct papr_indices_io_block __user *ubuf, + struct papr_indices_io_block *kbuf) +{ + struct rtas_work_area *work_area; + u32 length; + __be32 len_be; + + if (copy_from_user(kbuf, ubuf, sizeof(*kbuf))) + return ERR_PTR(-EFAULT); + + + if (!string_is_terminated(kbuf->dynamic_param.location_code_str, + ARRAY_SIZE(kbuf->dynamic_param.location_code_str))) + return ERR_PTR(-EINVAL); + + /* + * The input data in the work area should be as follows: + * - 32-bit integer length of the location code string, + * including NULL. + * - Location code string, NULL terminated, identifying the + * token (sensor or indicator). + * PAPR 2.13 - R1–7.3.18–5 ibm,set-dynamic-indicator + * - R1–7.3.19–5 ibm,get-dynamic-sensor-state + */ + /* + * Length that user space passed should also include NULL + * terminator. + */ + length = strlen(kbuf->dynamic_param.location_code_str) + 1; + if (length > LOC_CODE_SIZE) + return ERR_PTR(-EINVAL); + + len_be = cpu_to_be32(length); + + work_area = rtas_work_area_alloc(LOC_CODE_SIZE + sizeof(u32)); + memcpy(rtas_work_area_raw_buf(work_area), &len_be, sizeof(u32)); + memcpy((rtas_work_area_raw_buf(work_area) + sizeof(u32)), + &kbuf->dynamic_param.location_code_str, length); + + return work_area; +} + +/** + * papr_dynamic_indicator_ioc_set - ibm,set-dynamic-indicator RTAS Call + * PAPR 2.13 7.3.18 + * + * @ubuf: Input parameters to RTAS call such as indicator token and + * new state. + * + * Returns success or -errno. + */ +static long papr_dynamic_indicator_ioc_set(struct papr_indices_io_block __user *ubuf) +{ + struct papr_indices_io_block kbuf; + struct rtas_work_area *work_area; + s32 fwrc, token, ret; + + token = rtas_function_token(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + mutex_lock(&rtas_ibm_set_dynamic_indicator_lock); + work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf); + if (IS_ERR(work_area)) { + ret = PTR_ERR(work_area); + goto out; + } + + do { + fwrc = rtas_call(token, 3, 1, NULL, + kbuf.dynamic_param.token, + kbuf.dynamic_param.state, + rtas_work_area_phys(work_area)); + } while (rtas_busy_delay(fwrc)); + + rtas_work_area_free(work_area); + + switch (fwrc) { + case RTAS_SUCCESS: + ret = 0; + break; + case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */ + ret = -EOPNOTSUPP; + break; + default: + pr_err("unexpected ibm,set-dynamic-indicator result %d\n", + fwrc); + fallthrough; + case RTAS_HARDWARE_ERROR: /* Hardware/platform error */ + ret = -EIO; + break; + } + +out: + mutex_unlock(&rtas_ibm_set_dynamic_indicator_lock); + return ret; +} + +/** + * papr_dynamic_sensor_ioc_get - ibm,get-dynamic-sensor-state RTAS Call + * PAPR 2.13 7.3.19 + * + * @ubuf: Input parameters to RTAS call such as sensor token + * Copies the state in user space buffer. + * + * + * Returns success or -errno. + */ + +static long papr_dynamic_sensor_ioc_get(struct papr_indices_io_block __user *ubuf) +{ + struct papr_indices_io_block kbuf; + struct rtas_work_area *work_area; + s32 fwrc, token, ret; + u32 rets; + + token = rtas_function_token(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + mutex_lock(&rtas_ibm_get_dynamic_sensor_state_lock); + work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf); + if (IS_ERR(work_area)) { + ret = PTR_ERR(work_area); + goto out; + } + + do { + fwrc = rtas_call(token, 2, 2, &rets, + kbuf.dynamic_param.token, + rtas_work_area_phys(work_area)); + } while (rtas_busy_delay(fwrc)); + + rtas_work_area_free(work_area); + + switch (fwrc) { + case RTAS_SUCCESS: + if (put_user(rets, &ubuf->dynamic_param.state)) + ret = -EFAULT; + else + ret = 0; + break; + case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */ + ret = -EOPNOTSUPP; + break; + default: + pr_err("unexpected ibm,get-dynamic-sensor result %d\n", + fwrc); + fallthrough; + case RTAS_HARDWARE_ERROR: /* Hardware/platform error */ + ret = -EIO; + break; + } + +out: + mutex_unlock(&rtas_ibm_get_dynamic_sensor_state_lock); + return ret; +} + +/* + * Top-level ioctl handler for /dev/papr-indices. + */ +static long papr_indices_dev_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_INDICES_IOC_GET: + ret = papr_indices_create_handle(argp); + break; + case PAPR_DYNAMIC_SENSOR_IOC_GET: + ret = papr_dynamic_sensor_ioc_get(argp); + break; + case PAPR_DYNAMIC_INDICATOR_IOC_SET: + if (filp->f_mode & FMODE_WRITE) + ret = papr_dynamic_indicator_ioc_set(argp); + else + ret = -EBADF; + break; + default: + ret = -ENOIOCTLCMD; + break; + } + + return ret; +} + +static const struct file_operations papr_indices_ops = { + .unlocked_ioctl = papr_indices_dev_ioctl, +}; + +static struct miscdevice papr_indices_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-indices", + .fops = &papr_indices_ops, +}; + +static __init int papr_indices_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_GET_INDICES)) + return -ENODEV; + + if (!rtas_function_implemented(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR)) + return -ENODEV; + + if (!rtas_function_implemented(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE)) + return -ENODEV; + + return misc_register(&papr_indices_dev); +} +machine_device_initcall(pseries, papr_indices_init); diff --git a/arch/powerpc/platforms/pseries/papr-phy-attest.c b/arch/powerpc/platforms/pseries/papr-phy-attest.c new file mode 100644 index 000000000000..1907f2411567 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-phy-attest.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-phy-attest: " fmt + +#include <linux/build_bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/lockdep.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_helpers.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-physical-attestation.h> +#include "papr-rtas-common.h" + +/** + * struct rtas_phy_attest_params - Parameters (in and out) for + * ibm,physical-attestation. + * + * @cmd: In: Caller-provided attestation command buffer. Must be + * RTAS-addressable. + * @work_area: In: Caller-provided work area buffer for attestation + * command structure + * Out: Caller-provided work area buffer for the response + * @cmd_len: In: Caller-provided attestation command structure + * length + * @sequence: In: Sequence number. Out: Next sequence number. + * @written: Out: Bytes written by ibm,physical-attestation to + * @work_area. + * @status: Out: RTAS call status. + */ +struct rtas_phy_attest_params { + struct papr_phy_attest_io_block cmd; + struct rtas_work_area *work_area; + u32 cmd_len; + u32 sequence; + u32 written; + s32 status; +}; + +/** + * rtas_physical_attestation() - Call ibm,physical-attestation to + * fill a work area buffer. + * @params: See &struct rtas_phy_attest_params. + * + * Calls ibm,physical-attestation until it errors or successfully + * deposits data into the supplied work area. Handles RTAS retry + * statuses. Maps RTAS error statuses to reasonable errno values. + * + * The caller is expected to invoke rtas_physical_attestation() + * multiple times to retrieve all the data for the provided + * attestation command. Only one sequence should be in progress at + * any time; starting a new sequence will disrupt any sequence + * already in progress. Serialization of attestation retrieval + * sequences is the responsibility of the caller. + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 otherwise. + */ +static int rtas_physical_attestation(struct rtas_phy_attest_params *params) +{ + struct rtas_work_area *work_area; + s32 fwrc, token; + u32 rets[2]; + int ret; + + work_area = params->work_area; + token = rtas_function_token(RTAS_FN_IBM_PHYSICAL_ATTESTATION); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + lockdep_assert_held(&rtas_ibm_physical_attestation_lock); + + do { + fwrc = rtas_call(token, 3, 3, rets, + rtas_work_area_phys(work_area), + params->cmd_len, + params->sequence); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_INVALID_PARAMETER: + ret = -EINVAL; + break; + case RTAS_SEQ_MORE_DATA: + params->sequence = rets[0]; + fallthrough; + case RTAS_SEQ_COMPLETE: + params->written = rets[1]; + /* + * Kernel or firmware bug, do not continue. + */ + if (WARN(params->written > rtas_work_area_size(work_area), + "possible write beyond end of work area")) + ret = -EFAULT; + else + ret = 0; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,get-phy_attest status %d\n", fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Internal physical-attestation sequence APIs. A physical-attestation + * sequence is a series of calls to get ibm,physical-attestation + * for a given attestation command. The sequence ends when an error + * is encountered or all data for the attestation command has been + * returned. + */ + +/** + * phy_attest_sequence_begin() - Begin a response data for attestation + * command retrieval sequence. + * @seq: user specified parameters for RTAS call from seq struct. + * + * Context: May sleep. + */ +static void phy_attest_sequence_begin(struct papr_rtas_sequence *seq) +{ + struct rtas_phy_attest_params *param; + + /* + * We could allocate the work area before acquiring the + * function lock, but that would allow concurrent requests to + * exhaust the limited work area pool for no benefit. So + * allocate the work area under the lock. + */ + mutex_lock(&rtas_ibm_physical_attestation_lock); + param = (struct rtas_phy_attest_params *)seq->params; + param->work_area = rtas_work_area_alloc(SZ_4K); + memcpy(rtas_work_area_raw_buf(param->work_area), ¶m->cmd, + param->cmd_len); + param->sequence = 1; + param->status = 0; +} + +/** + * phy_attest_sequence_end() - Finalize a attestation command + * response retrieval sequence. + * @seq: Sequence state. + * + * Releases resources obtained by phy_attest_sequence_begin(). + */ +static void phy_attest_sequence_end(struct papr_rtas_sequence *seq) +{ + struct rtas_phy_attest_params *param; + + param = (struct rtas_phy_attest_params *)seq->params; + rtas_work_area_free(param->work_area); + mutex_unlock(&rtas_ibm_physical_attestation_lock); + kfree(param); +} + +/* + * Generator function to be passed to papr_rtas_blob_generate(). + */ +static const char *phy_attest_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) +{ + struct rtas_phy_attest_params *p; + bool init_state; + + p = (struct rtas_phy_attest_params *)seq->params; + init_state = (p->written == 0) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) + return NULL; + if (papr_rtas_sequence_set_err(seq, rtas_physical_attestation(p))) + return NULL; + *len = p->written; + return rtas_work_area_raw_buf(p->work_area); +} + +static const struct file_operations papr_phy_attest_handle_ops = { + .read = papr_rtas_common_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, +}; + +/** + * papr_phy_attest_create_handle() - Create a fd-based handle for + * reading the response for the given attestation command. + * @ulc: Attestation command in user memory; defines the scope of + * data for the attestation command to retrieve. + * + * Handler for PAPR_PHYSICAL_ATTESTATION_IOC_CREATE_HANDLE ioctl + * command. Validates @ulc and instantiates an immutable response + * "blob" for attestation command. The blob is attached to a file + * descriptor for reading by user space. The memory backing the blob + * is freed when the file is released. + * + * The entire requested response buffer for the attestation command + * retrieved by this call and all necessary RTAS interactions are + * performed before returning the fd to user space. This keeps the + * read handler simple and ensures that kernel can prevent + * interleaving ibm,physical-attestation call sequences. + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_phy_attest_create_handle(struct papr_phy_attest_io_block __user *ulc) +{ + struct rtas_phy_attest_params *params; + struct papr_rtas_sequence seq = {}; + int fd; + + /* + * Freed in phy_attest_sequence_end(). + */ + params = kzalloc(sizeof(*params), GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + if (copy_from_user(¶ms->cmd, ulc, + sizeof(struct papr_phy_attest_io_block))) + return -EFAULT; + + params->cmd_len = be32_to_cpu(params->cmd.length); + seq = (struct papr_rtas_sequence) { + .begin = phy_attest_sequence_begin, + .end = phy_attest_sequence_end, + .work = phy_attest_sequence_fill_work_area, + }; + + seq.params = (void *)params; + + fd = papr_rtas_setup_file_interface(&seq, + &papr_phy_attest_handle_ops, + "[papr-physical-attestation]"); + + return fd; +} + +/* + * Top-level ioctl handler for /dev/papr-physical-attestation. + */ +static long papr_phy_attest_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_PHY_ATTEST_IOC_HANDLE: + ret = papr_phy_attest_create_handle(argp); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_phy_attest_ops = { + .unlocked_ioctl = papr_phy_attest_dev_ioctl, +}; + +static struct miscdevice papr_phy_attest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-physical-attestation", + .fops = &papr_phy_attest_ops, +}; + +static __init int papr_phy_attest_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_PHYSICAL_ATTESTATION)) + return -ENODEV; + + return misc_register(&papr_phy_attest_dev); +} +machine_device_initcall(pseries, papr_phy_attest_init); diff --git a/arch/powerpc/platforms/pseries/papr-platform-dump.c b/arch/powerpc/platforms/pseries/papr-platform-dump.c new file mode 100644 index 000000000000..f8d55eccdb6b --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-platform-dump.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-platform-dump: " fmt + +#include <linux/anon_inodes.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-platform-dump.h> + +/* + * Function-specific return values for ibm,platform-dump, derived from + * PAPR+ v2.13 7.3.3.4.1 "ibm,platform-dump RTAS Call". + */ +#define RTAS_IBM_PLATFORM_DUMP_COMPLETE 0 /* Complete dump retrieved. */ +#define RTAS_IBM_PLATFORM_DUMP_CONTINUE 1 /* Continue dump */ +#define RTAS_NOT_AUTHORIZED -9002 /* Not Authorized */ + +#define RTAS_IBM_PLATFORM_DUMP_START 2 /* Linux status to start dump */ + +/** + * struct ibm_platform_dump_params - Parameters (in and out) for + * ibm,platform-dump + * @work_area: In: work area buffer for results. + * @buf_length: In: work area buffer length in bytes + * @dump_tag_hi: In: Most-significant 32 bits of a Dump_Tag representing + * an id of the dump being processed. + * @dump_tag_lo: In: Least-significant 32 bits of a Dump_Tag representing + * an id of the dump being processed. + * @sequence_hi: In: Sequence number in most-significant 32 bits. + * Out: Next sequence number in most-significant 32 bits. + * @sequence_lo: In: Sequence number in Least-significant 32 bits + * Out: Next sequence number in Least-significant 32 bits. + * @bytes_ret_hi: Out: Bytes written in most-significant 32 bits. + * @bytes_ret_lo: Out: Bytes written in Least-significant 32 bits. + * @status: Out: RTAS call status. + * @list: Maintain the list of dumps are in progress. Can + * retrieve multiple dumps with different dump IDs at + * the same time but not with the same dump ID. This list + * is used to determine whether the dump for the same ID + * is in progress. + */ +struct ibm_platform_dump_params { + struct rtas_work_area *work_area; + u32 buf_length; + u32 dump_tag_hi; + u32 dump_tag_lo; + u32 sequence_hi; + u32 sequence_lo; + u32 bytes_ret_hi; + u32 bytes_ret_lo; + s32 status; + struct list_head list; +}; + +/* + * Multiple dumps with different dump IDs can be retrieved at the same + * time, but not with dame dump ID. platform_dump_list_mutex and + * platform_dump_list are used to prevent this behavior. + */ +static DEFINE_MUTEX(platform_dump_list_mutex); +static LIST_HEAD(platform_dump_list); + +/** + * rtas_ibm_platform_dump() - Call ibm,platform-dump to fill a work area + * buffer. + * @params: See &struct ibm_platform_dump_params. + * @buf_addr: Address of dump buffer (work_area) + * @buf_length: Length of the buffer in bytes (min. 1024) + * + * Calls ibm,platform-dump until it errors or successfully deposits data + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS + * error statuses to reasonable errno values. + * + * Can request multiple dumps with different dump IDs at the same time, + * but not with the same dump ID which is prevented with the check in + * the ioctl code (papr_platform_dump_create_handle()). + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 for dump complete and 1 for continue dump + */ +static int rtas_ibm_platform_dump(struct ibm_platform_dump_params *params, + phys_addr_t buf_addr, u32 buf_length) +{ + u32 rets[4]; + s32 fwrc; + int ret = 0; + + do { + fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_PLATFORM_DUMP), + 6, 5, + rets, + params->dump_tag_hi, + params->dump_tag_lo, + params->sequence_hi, + params->sequence_lo, + buf_addr, + buf_length); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_NOT_AUTHORIZED: + ret = -EPERM; + break; + case RTAS_IBM_PLATFORM_DUMP_CONTINUE: + case RTAS_IBM_PLATFORM_DUMP_COMPLETE: + params->sequence_hi = rets[0]; + params->sequence_lo = rets[1]; + params->bytes_ret_hi = rets[2]; + params->bytes_ret_lo = rets[3]; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,platform-dump status %d\n", + fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Platform dump is used with multiple RTAS calls to retrieve the + * complete dump for the provided dump ID. Once the complete dump is + * retrieved, the hypervisor returns dump complete status (0) for the + * last RTAS call and expects the caller issues one more call with + * NULL buffer to invalidate the dump so that the hypervisor can remove + * the dump. + * + * After the specific dump is invalidated in the hypervisor, expect the + * dump complete status for the new sequence - the user space initiates + * new request for the same dump ID. + */ +static ssize_t papr_platform_dump_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + struct ibm_platform_dump_params *params = file->private_data; + u64 total_bytes; + s32 fwrc; + + /* + * Dump already completed with the previous read calls. + * In case if the user space issues further reads, returns + * -EINVAL. + */ + if (!params->buf_length) { + pr_warn_once("Platform dump completed for dump ID %llu\n", + (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo)); + return -EINVAL; + } + + /* + * The hypervisor returns status 0 if no more data available to + * download. The dump will be invalidated with ioctl (see below). + */ + if (params->status == RTAS_IBM_PLATFORM_DUMP_COMPLETE) { + params->buf_length = 0; + /* + * Returns 0 to the user space so that user + * space read stops. + */ + return 0; + } + + if (size < SZ_1K) { + pr_err_once("Buffer length should be minimum 1024 bytes\n"); + return -EINVAL; + } else if (size > params->buf_length) { + /* + * Allocate 4K work area. So if the user requests > 4K, + * resize the buffer length. + */ + size = params->buf_length; + } + + fwrc = rtas_ibm_platform_dump(params, + rtas_work_area_phys(params->work_area), + size); + if (fwrc < 0) + return fwrc; + + total_bytes = (u64) (((u64)params->bytes_ret_hi << 32) | + params->bytes_ret_lo); + + /* + * Kernel or firmware bug, do not continue. + */ + if (WARN(total_bytes > size, "possible write beyond end of work area")) + return -EFAULT; + + if (copy_to_user(buf, rtas_work_area_raw_buf(params->work_area), + total_bytes)) + return -EFAULT; + + return total_bytes; +} + +static int papr_platform_dump_handle_release(struct inode *inode, + struct file *file) +{ + struct ibm_platform_dump_params *params = file->private_data; + + if (params->work_area) + rtas_work_area_free(params->work_area); + + mutex_lock(&platform_dump_list_mutex); + list_del(¶ms->list); + mutex_unlock(&platform_dump_list_mutex); + + kfree(params); + file->private_data = NULL; + return 0; +} + +/* + * This ioctl is used to invalidate the dump assuming the user space + * issue this ioctl after obtain the complete dump. + * Issue the last RTAS call with NULL buffer to invalidate the dump + * which means dump will be freed in the hypervisor. + */ +static long papr_platform_dump_invalidate_ioctl(struct file *file, + unsigned int ioctl, unsigned long arg) +{ + struct ibm_platform_dump_params *params; + u64 __user *argp = (void __user *)arg; + u64 param_dump_tag, dump_tag; + + if (ioctl != PAPR_PLATFORM_DUMP_IOC_INVALIDATE) + return -ENOIOCTLCMD; + + if (get_user(dump_tag, argp)) + return -EFAULT; + + /* + * private_data is freeded during release(), so should not + * happen. + */ + if (!file->private_data) { + pr_err("No valid FD to invalidate dump for the ID(%llu)\n", + dump_tag); + return -EINVAL; + } + + params = file->private_data; + param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo); + if (dump_tag != param_dump_tag) { + pr_err("Invalid dump ID(%llu) to invalidate dump\n", + dump_tag); + return -EINVAL; + } + + if (params->status != RTAS_IBM_PLATFORM_DUMP_COMPLETE) { + pr_err("Platform dump is not complete, but requested " + "to invalidate dump for ID(%llu)\n", + dump_tag); + return -EINPROGRESS; + } + + return rtas_ibm_platform_dump(params, 0, 0); +} + +static const struct file_operations papr_platform_dump_handle_ops = { + .read = papr_platform_dump_handle_read, + .release = papr_platform_dump_handle_release, + .unlocked_ioctl = papr_platform_dump_invalidate_ioctl, +}; + +/** + * papr_platform_dump_create_handle() - Create a fd-based handle for + * reading platform dump + * + * Handler for PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE ioctl command + * Allocates RTAS parameter struct and work area and attached to the + * file descriptor for reading by user space with the multiple RTAS + * calls until the dump is completed. This memory allocation is freed + * when the file is released. + * + * Multiple dump requests with different IDs are allowed at the same + * time, but not with the same dump ID. So if the user space is + * already opened file descriptor for the specific dump ID, return + * -EALREADY for the next request. + * + * @dump_tag: Dump ID for the dump requested to retrieve from the + * hypervisor + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_platform_dump_create_handle(u64 dump_tag) +{ + struct ibm_platform_dump_params *params; + u64 param_dump_tag; + struct file *file; + long err; + int fd; + + /* + * Return failure if the user space is already opened FD for + * the specific dump ID. This check will prevent multiple dump + * requests for the same dump ID at the same time. Generally + * should not expect this, but in case. + */ + list_for_each_entry(params, &platform_dump_list, list) { + param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo); + if (dump_tag == param_dump_tag) { + pr_err("Platform dump for ID(%llu) is already in progress\n", + dump_tag); + return -EALREADY; + } + } + + params = kzalloc(sizeof(struct ibm_platform_dump_params), + GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + params->work_area = rtas_work_area_alloc(SZ_4K); + params->buf_length = SZ_4K; + params->dump_tag_hi = (u32)(dump_tag >> 32); + params->dump_tag_lo = (u32)(dump_tag & 0x00000000ffffffffULL); + params->status = RTAS_IBM_PLATFORM_DUMP_START; + + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); + if (fd < 0) { + err = fd; + goto free_area; + } + + file = anon_inode_getfile_fmode("[papr-platform-dump]", + &papr_platform_dump_handle_ops, + (void *)params, O_RDONLY, + FMODE_LSEEK | FMODE_PREAD); + if (IS_ERR(file)) { + err = PTR_ERR(file); + goto put_fd; + } + + fd_install(fd, file); + + list_add(¶ms->list, &platform_dump_list); + + pr_info("%s (%d) initiated platform dump for dump tag %llu\n", + current->comm, current->pid, dump_tag); + return fd; +put_fd: + put_unused_fd(fd); +free_area: + rtas_work_area_free(params->work_area); + kfree(params); + return err; +} + +/* + * Top-level ioctl handler for /dev/papr-platform-dump. + */ +static long papr_platform_dump_dev_ioctl(struct file *filp, + unsigned int ioctl, + unsigned long arg) +{ + u64 __user *argp = (void __user *)arg; + u64 dump_tag; + long ret; + + if (get_user(dump_tag, argp)) + return -EFAULT; + + switch (ioctl) { + case PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE: + mutex_lock(&platform_dump_list_mutex); + ret = papr_platform_dump_create_handle(dump_tag); + mutex_unlock(&platform_dump_list_mutex); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_platform_dump_ops = { + .unlocked_ioctl = papr_platform_dump_dev_ioctl, +}; + +static struct miscdevice papr_platform_dump_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-platform-dump", + .fops = &papr_platform_dump_ops, +}; + +static __init int papr_platform_dump_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_PLATFORM_DUMP)) + return -ENODEV; + + return misc_register(&papr_platform_dump_dev); +} +machine_device_initcall(pseries, papr_platform_dump_init); diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.c b/arch/powerpc/platforms/pseries/papr-rtas-common.c new file mode 100644 index 000000000000..33c606e3378a --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-rtas-common.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-common: " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/sched/signal.h> +#include "papr-rtas-common.h" + +/* + * Sequence based RTAS HCALL has to issue multiple times to retrieve + * complete data from the hypervisor. For some of these RTAS calls, + * the OS should not interleave calls with different input until the + * sequence is completed. So data is collected for these calls during + * ioctl handle and export to user space with read() handle. + * This file provides common functions needed for such sequence based + * RTAS calls Ex: ibm,get-vpd and ibm,get-indices. + */ + +bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob) +{ + return blob->data && blob->len; +} + +void papr_rtas_blob_free(const struct papr_rtas_blob *blob) +{ + if (blob) { + kvfree(blob->data); + kfree(blob); + } +} + +/** + * papr_rtas_blob_extend() - Append data to a &struct papr_rtas_blob. + * @blob: The blob to extend. + * @data: The new data to append to @blob. + * @len: The length of @data. + * + * Context: May sleep. + * Return: -ENOMEM on allocation failure, 0 otherwise. + */ +static int papr_rtas_blob_extend(struct papr_rtas_blob *blob, + const char *data, size_t len) +{ + const size_t new_len = blob->len + len; + const size_t old_len = blob->len; + const char *old_ptr = blob->data; + char *new_ptr; + + new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT); + if (!new_ptr) + return -ENOMEM; + + memcpy(&new_ptr[old_len], data, len); + blob->data = new_ptr; + blob->len = new_len; + return 0; +} + +/** + * papr_rtas_blob_generate() - Construct a new &struct papr_rtas_blob. + * @seq: work function of the caller that is called to obtain + * data with the caller RTAS call. + * + * The @work callback is invoked until it returns NULL. @seq is + * passed to @work in its first argument on each call. When + * @work returns data, it should store the data length in its + * second argument. + * + * Context: May sleep. + * Return: A completely populated &struct papr_rtas_blob, or NULL on error. + */ +static const struct papr_rtas_blob * +papr_rtas_blob_generate(struct papr_rtas_sequence *seq) +{ + struct papr_rtas_blob *blob; + const char *buf; + size_t len; + int err = 0; + + blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT); + if (!blob) + return NULL; + + if (!seq->work) + return ERR_PTR(-EINVAL); + + + while (err == 0 && (buf = seq->work(seq, &len))) + err = papr_rtas_blob_extend(blob, buf, len); + + if (err != 0 || !papr_rtas_blob_has_data(blob)) + goto free_blob; + + return blob; +free_blob: + papr_rtas_blob_free(blob); + return NULL; +} + +int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq, int err) +{ + /* Preserve the first error recorded. */ + if (seq->error == 0) + seq->error = err; + + return seq->error; +} + +/* + * Higher-level retrieval code below. These functions use the + * papr_rtas_blob_* and sequence_* APIs defined above to create fd-based + * handles for consumption by user space. + */ + +/** + * papr_rtas_run_sequence() - Run a single retrieval sequence. + * @seq: Functions of the caller to complete the sequence + * + * Context: May sleep. Holds a mutex and an RTAS work area for its + * duration. Typically performs multiple sleepable slab + * allocations. + * + * Return: A populated &struct papr_rtas_blob on success. Encoded error + * pointer otherwise. + */ +static const struct papr_rtas_blob *papr_rtas_run_sequence(struct papr_rtas_sequence *seq) +{ + const struct papr_rtas_blob *blob; + + if (seq->begin) + seq->begin(seq); + + blob = papr_rtas_blob_generate(seq); + if (!blob) + papr_rtas_sequence_set_err(seq, -ENOMEM); + + if (seq->end) + seq->end(seq); + + + if (seq->error) { + papr_rtas_blob_free(blob); + return ERR_PTR(seq->error); + } + + return blob; +} + +/** + * papr_rtas_retrieve() - Return the data blob that is exposed to + * user space. + * @seq: RTAS call specific functions to be invoked until the + * sequence is completed. + * + * Run sequences against @param until a blob is successfully + * instantiated, or a hard error is encountered, or a fatal signal is + * pending. + * + * Context: May sleep. + * Return: A fully populated data blob when successful. Encoded error + * pointer otherwise. + */ +const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq) +{ + const struct papr_rtas_blob *blob; + + /* + * EAGAIN means the sequence returns error with a -4 (data + * changed and need to start the sequence) status from RTAS calls + * and we should attempt a new sequence. PAPR+ (v2.13 R1–7.3.20–5 + * - ibm,get-vpd, R1–7.3.17–6 - ibm,get-indices) indicates that + * this should be a transient condition, not something that + * happens continuously. But we'll stop trying on a fatal signal. + */ + do { + blob = papr_rtas_run_sequence(seq); + if (!IS_ERR(blob)) /* Success. */ + break; + if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */ + break; + cond_resched(); + } while (!fatal_signal_pending(current)); + + return blob; +} + +/** + * papr_rtas_setup_file_interface - Complete the sequence and obtain + * the data and export to user space with fd-based handles. Then the + * user spave gets the data with read() handle. + * @seq: RTAS call specific functions to get the data. + * @fops: RTAS call specific file operations such as read(). + * @name: RTAS call specific char device node. + * + * Return: FD handle for consumption by user space + */ +long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq, + const struct file_operations *fops, + char *name) +{ + const struct papr_rtas_blob *blob; + struct file *file; + long ret; + int fd; + + blob = papr_rtas_retrieve(seq); + if (IS_ERR(blob)) + return PTR_ERR(blob); + + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto free_blob; + } + + file = anon_inode_getfile_fmode(name, fops, (void *)blob, + O_RDONLY, FMODE_LSEEK | FMODE_PREAD); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto put_fd; + } + + fd_install(fd, file); + return fd; + +put_fd: + put_unused_fd(fd); +free_blob: + papr_rtas_blob_free(blob); + return ret; +} + +/* + * papr_rtas_sequence_should_stop() - Determine whether RTAS retrieval + * sequence should continue. + * + * Examines the sequence error state and outputs of the last call to + * the specific RTAS to determine whether the sequence in progress + * should continue or stop. + * + * Return: True if the sequence has encountered an error or if all data + * for this sequence has been retrieved. False otherwise. + */ +bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq, + s32 status, bool init_state) +{ + bool done; + + if (seq->error) + return true; + + switch (status) { + case RTAS_SEQ_COMPLETE: + if (init_state) + done = false; /* Initial state. */ + else + done = true; /* All data consumed. */ + break; + case RTAS_SEQ_MORE_DATA: + done = false; /* More data available. */ + break; + default: + done = true; /* Error encountered. */ + break; + } + + return done; +} + +/* + * User space read to retrieve data for the corresponding RTAS call. + * papr_rtas_blob is filled with the data using the corresponding RTAS + * call sequence API. + */ +ssize_t papr_rtas_common_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + const struct papr_rtas_blob *blob = file->private_data; + + /* We should not instantiate a handle without any data attached. */ + if (!papr_rtas_blob_has_data(blob)) { + pr_err_once("handle without data\n"); + return -EIO; + } + + return simple_read_from_buffer(buf, size, off, blob->data, blob->len); +} + +int papr_rtas_common_handle_release(struct inode *inode, + struct file *file) +{ + const struct papr_rtas_blob *blob = file->private_data; + + papr_rtas_blob_free(blob); + + return 0; +} + +loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off, + int whence) +{ + const struct papr_rtas_blob *blob = file->private_data; + + return fixed_size_llseek(file, off, whence, blob->len); +} diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.h b/arch/powerpc/platforms/pseries/papr-rtas-common.h new file mode 100644 index 000000000000..4ceabcaf4905 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-rtas-common.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_POWERPC_PAPR_RTAS_COMMON_H +#define _ASM_POWERPC_PAPR_RTAS_COMMON_H + +#include <linux/types.h> + +/* + * Return codes for sequence based RTAS calls. + * Not listed under PAPR+ v2.13 7.2.8: "Return Codes". + * But defined in the specific section of each RTAS call. + */ +#define RTAS_SEQ_COMPLETE 0 /* All data has been retrieved. */ +#define RTAS_SEQ_MORE_DATA 1 /* More data is available */ +#define RTAS_SEQ_START_OVER -4 /* Data changed, restart call sequence. */ + +/* + * Internal "blob" APIs for accumulating RTAS call results into + * an immutable buffer to be attached to a file descriptor. + */ +struct papr_rtas_blob { + const char *data; + size_t len; +}; + +/** + * struct papr_sequence - State for managing a sequence of RTAS calls. + * @error: Shall be zero as long as the sequence has not encountered an error, + * -ve errno otherwise. Use papr_rtas_sequence_set_err() to update. + * @params: Parameter block to pass to rtas_*() calls. + * @begin: Work area allocation and initialize the needed parameter + * values passed to RTAS call + * @end: Free the allocated work area + * @work: Obtain data with RTAS call and invoke it until the sequence is + * completed. + * + */ +struct papr_rtas_sequence { + int error; + void *params; + void (*begin)(struct papr_rtas_sequence *seq); + void (*end)(struct papr_rtas_sequence *seq); + const char *(*work)(struct papr_rtas_sequence *seq, size_t *len); +}; + +extern bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob); +extern void papr_rtas_blob_free(const struct papr_rtas_blob *blob); +extern int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq, + int err); +extern const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq); +extern long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq, + const struct file_operations *fops, char *name); +extern bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq, + s32 status, bool init_state); +extern ssize_t papr_rtas_common_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off); +extern int papr_rtas_common_handle_release(struct inode *inode, + struct file *file); +extern loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off, + int whence); +#endif /* _ASM_POWERPC_PAPR_RTAS_COMMON_H */ + diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c index c29e85db5f35..f38c188fc4a1 100644 --- a/arch/powerpc/platforms/pseries/papr-vpd.c +++ b/arch/powerpc/platforms/pseries/papr-vpd.c @@ -2,7 +2,6 @@ #define pr_fmt(fmt) "papr-vpd: " fmt -#include <linux/anon_inodes.h> #include <linux/build_bug.h> #include <linux/file.h> #include <linux/fs.h> @@ -20,14 +19,7 @@ #include <asm/rtas-work-area.h> #include <asm/rtas.h> #include <uapi/asm/papr-vpd.h> - -/* - * Function-specific return values for ibm,get-vpd, derived from PAPR+ - * v2.13 7.3.20 "ibm,get-vpd RTAS Call". - */ -#define RTAS_IBM_GET_VPD_COMPLETE 0 /* All VPD has been retrieved. */ -#define RTAS_IBM_GET_VPD_MORE_DATA 1 /* More VPD is available. */ -#define RTAS_IBM_GET_VPD_START_OVER -4 /* VPD changed, restart call sequence. */ +#include "papr-rtas-common.h" /** * struct rtas_ibm_get_vpd_params - Parameters (in and out) for ibm,get-vpd. @@ -91,13 +83,14 @@ static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params) case RTAS_INVALID_PARAMETER: ret = -EINVAL; break; - case RTAS_IBM_GET_VPD_START_OVER: + case RTAS_SEQ_START_OVER: ret = -EAGAIN; + pr_info_ratelimited("VPD changed during retrieval, retrying\n"); break; - case RTAS_IBM_GET_VPD_MORE_DATA: + case RTAS_SEQ_MORE_DATA: params->sequence = rets[0]; fallthrough; - case RTAS_IBM_GET_VPD_COMPLETE: + case RTAS_SEQ_COMPLETE: params->written = rets[1]; /* * Kernel or firmware bug, do not continue. @@ -119,94 +112,6 @@ static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params) } /* - * Internal VPD "blob" APIs for accumulating ibm,get-vpd results into - * an immutable buffer to be attached to a file descriptor. - */ -struct vpd_blob { - const char *data; - size_t len; -}; - -static bool vpd_blob_has_data(const struct vpd_blob *blob) -{ - return blob->data && blob->len; -} - -static void vpd_blob_free(const struct vpd_blob *blob) -{ - if (blob) { - kvfree(blob->data); - kfree(blob); - } -} - -/** - * vpd_blob_extend() - Append data to a &struct vpd_blob. - * @blob: The blob to extend. - * @data: The new data to append to @blob. - * @len: The length of @data. - * - * Context: May sleep. - * Return: -ENOMEM on allocation failure, 0 otherwise. - */ -static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len) -{ - const size_t new_len = blob->len + len; - const size_t old_len = blob->len; - const char *old_ptr = blob->data; - char *new_ptr; - - new_ptr = old_ptr ? - kvrealloc(old_ptr, old_len, new_len, GFP_KERNEL_ACCOUNT) : - kvmalloc(len, GFP_KERNEL_ACCOUNT); - - if (!new_ptr) - return -ENOMEM; - - memcpy(&new_ptr[old_len], data, len); - blob->data = new_ptr; - blob->len = new_len; - return 0; -} - -/** - * vpd_blob_generate() - Construct a new &struct vpd_blob. - * @generator: Function that supplies the blob data. - * @arg: Context pointer supplied by caller, passed to @generator. - * - * The @generator callback is invoked until it returns NULL. @arg is - * passed to @generator in its first argument on each call. When - * @generator returns data, it should store the data length in its - * second argument. - * - * Context: May sleep. - * Return: A completely populated &struct vpd_blob, or NULL on error. - */ -static const struct vpd_blob * -vpd_blob_generate(const char * (*generator)(void *, size_t *), void *arg) -{ - struct vpd_blob *blob; - const char *buf; - size_t len; - int err = 0; - - blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT); - if (!blob) - return NULL; - - while (err == 0 && (buf = generator(arg, &len))) - err = vpd_blob_extend(blob, buf, len); - - if (err != 0 || !vpd_blob_has_data(blob)) - goto free_blob; - - return blob; -free_blob: - vpd_blob_free(blob); - return NULL; -} - -/* * Internal VPD sequence APIs. A VPD sequence is a series of calls to * ibm,get-vpd for a given location code. The sequence ends when an * error is encountered or all VPD for the location code has been @@ -214,30 +119,14 @@ free_blob: */ /** - * struct vpd_sequence - State for managing a VPD sequence. - * @error: Shall be zero as long as the sequence has not encountered an error, - * -ve errno otherwise. Use vpd_sequence_set_err() to update this. - * @params: Parameter block to pass to rtas_ibm_get_vpd(). - */ -struct vpd_sequence { - int error; - struct rtas_ibm_get_vpd_params params; -}; - -/** * vpd_sequence_begin() - Begin a VPD retrieval sequence. - * @seq: Uninitialized sequence state. - * @loc_code: Location code that defines the scope of the VPD to return. - * - * Initializes @seq with the resources necessary to carry out a VPD - * sequence. Callers must pass @seq to vpd_sequence_end() regardless - * of whether the sequence succeeds. + * @seq: vpd call parameters from sequence struct * * Context: May sleep. */ -static void vpd_sequence_begin(struct vpd_sequence *seq, - const struct papr_location_code *loc_code) +static void vpd_sequence_begin(struct papr_rtas_sequence *seq) { + struct rtas_ibm_get_vpd_params *vpd_params; /* * Use a static data structure for the location code passed to * RTAS to ensure it's in the RMA and avoid a separate work @@ -245,6 +134,7 @@ static void vpd_sequence_begin(struct vpd_sequence *seq, */ static struct papr_location_code static_loc_code; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; /* * We could allocate the work area before acquiring the * function lock, but that would allow concurrent requests to @@ -252,14 +142,12 @@ static void vpd_sequence_begin(struct vpd_sequence *seq, * allocate the work area under the lock. */ mutex_lock(&rtas_ibm_get_vpd_lock); - static_loc_code = *loc_code; - *seq = (struct vpd_sequence) { - .params = { - .work_area = rtas_work_area_alloc(SZ_4K), - .loc_code = &static_loc_code, - .sequence = 1, - }, - }; + static_loc_code = *(struct papr_location_code *)vpd_params->loc_code; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; + vpd_params->work_area = rtas_work_area_alloc(SZ_4K); + vpd_params->loc_code = &static_loc_code; + vpd_params->sequence = 1; + vpd_params->status = 0; } /** @@ -268,180 +156,39 @@ static void vpd_sequence_begin(struct vpd_sequence *seq, * * Releases resources obtained by vpd_sequence_begin(). */ -static void vpd_sequence_end(struct vpd_sequence *seq) -{ - rtas_work_area_free(seq->params.work_area); - mutex_unlock(&rtas_ibm_get_vpd_lock); -} - -/** - * vpd_sequence_should_stop() - Determine whether a VPD retrieval sequence - * should continue. - * @seq: VPD sequence state. - * - * Examines the sequence error state and outputs of the last call to - * ibm,get-vpd to determine whether the sequence in progress should - * continue or stop. - * - * Return: True if the sequence has encountered an error or if all VPD for - * this sequence has been retrieved. False otherwise. - */ -static bool vpd_sequence_should_stop(const struct vpd_sequence *seq) +static void vpd_sequence_end(struct papr_rtas_sequence *seq) { - bool done; + struct rtas_ibm_get_vpd_params *vpd_params; - if (seq->error) - return true; - - switch (seq->params.status) { - case 0: - if (seq->params.written == 0) - done = false; /* Initial state. */ - else - done = true; /* All data consumed. */ - break; - case 1: - done = false; /* More data available. */ - break; - default: - done = true; /* Error encountered. */ - break; - } - - return done; -} - -static int vpd_sequence_set_err(struct vpd_sequence *seq, int err) -{ - /* Preserve the first error recorded. */ - if (seq->error == 0) - seq->error = err; - - return seq->error; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; + rtas_work_area_free(vpd_params->work_area); + mutex_unlock(&rtas_ibm_get_vpd_lock); } /* - * Generator function to be passed to vpd_blob_generate(). + * Generator function to be passed to papr_rtas_blob_generate(). */ -static const char *vpd_sequence_fill_work_area(void *arg, size_t *len) +static const char *vpd_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) { - struct vpd_sequence *seq = arg; - struct rtas_ibm_get_vpd_params *p = &seq->params; + struct rtas_ibm_get_vpd_params *p; + bool init_state; + + p = (struct rtas_ibm_get_vpd_params *)seq->params; + init_state = (p->written == 0) ? true : false; - if (vpd_sequence_should_stop(seq)) + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) return NULL; - if (vpd_sequence_set_err(seq, rtas_ibm_get_vpd(p))) + if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_vpd(p))) return NULL; *len = p->written; return rtas_work_area_raw_buf(p->work_area); } -/* - * Higher-level VPD retrieval code below. These functions use the - * vpd_blob_* and vpd_sequence_* APIs defined above to create fd-based - * VPD handles for consumption by user space. - */ - -/** - * papr_vpd_run_sequence() - Run a single VPD retrieval sequence. - * @loc_code: Location code that defines the scope of VPD to return. - * - * Context: May sleep. Holds a mutex and an RTAS work area for its - * duration. Typically performs multiple sleepable slab - * allocations. - * - * Return: A populated &struct vpd_blob on success. Encoded error - * pointer otherwise. - */ -static const struct vpd_blob *papr_vpd_run_sequence(const struct papr_location_code *loc_code) -{ - const struct vpd_blob *blob; - struct vpd_sequence seq; - - vpd_sequence_begin(&seq, loc_code); - blob = vpd_blob_generate(vpd_sequence_fill_work_area, &seq); - if (!blob) - vpd_sequence_set_err(&seq, -ENOMEM); - vpd_sequence_end(&seq); - - if (seq.error) { - vpd_blob_free(blob); - return ERR_PTR(seq.error); - } - - return blob; -} - -/** - * papr_vpd_retrieve() - Return the VPD for a location code. - * @loc_code: Location code that defines the scope of VPD to return. - * - * Run VPD sequences against @loc_code until a blob is successfully - * instantiated, or a hard error is encountered, or a fatal signal is - * pending. - * - * Context: May sleep. - * Return: A fully populated VPD blob when successful. Encoded error - * pointer otherwise. - */ -static const struct vpd_blob *papr_vpd_retrieve(const struct papr_location_code *loc_code) -{ - const struct vpd_blob *blob; - - /* - * EAGAIN means the sequence errored with a -4 (VPD changed) - * status from ibm,get-vpd, and we should attempt a new - * sequence. PAPR+ v2.13 R1–7.3.20–5 indicates that this - * should be a transient condition, not something that happens - * continuously. But we'll stop trying on a fatal signal. - */ - do { - blob = papr_vpd_run_sequence(loc_code); - if (!IS_ERR(blob)) /* Success. */ - break; - if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */ - break; - pr_info_ratelimited("VPD changed during retrieval, retrying\n"); - cond_resched(); - } while (!fatal_signal_pending(current)); - - return blob; -} - -static ssize_t papr_vpd_handle_read(struct file *file, char __user *buf, size_t size, loff_t *off) -{ - const struct vpd_blob *blob = file->private_data; - - /* bug: we should not instantiate a handle without any data attached. */ - if (!vpd_blob_has_data(blob)) { - pr_err_once("handle without data\n"); - return -EIO; - } - - return simple_read_from_buffer(buf, size, off, blob->data, blob->len); -} - -static int papr_vpd_handle_release(struct inode *inode, struct file *file) -{ - const struct vpd_blob *blob = file->private_data; - - vpd_blob_free(blob); - - return 0; -} - -static loff_t papr_vpd_handle_seek(struct file *file, loff_t off, int whence) -{ - const struct vpd_blob *blob = file->private_data; - - return fixed_size_llseek(file, off, whence, blob->len); -} - - static const struct file_operations papr_vpd_handle_ops = { - .read = papr_vpd_handle_read, - .llseek = papr_vpd_handle_seek, - .release = papr_vpd_handle_release, + .read = papr_rtas_common_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, }; /** @@ -463,10 +210,9 @@ static const struct file_operations papr_vpd_handle_ops = { */ static long papr_vpd_create_handle(struct papr_location_code __user *ulc) { + struct rtas_ibm_get_vpd_params vpd_params = {}; + struct papr_rtas_sequence seq = {}; struct papr_location_code klc; - const struct vpd_blob *blob; - struct file *file; - long err; int fd; if (copy_from_user(&klc, ulc, sizeof(klc))) @@ -475,31 +221,19 @@ static long papr_vpd_create_handle(struct papr_location_code __user *ulc) if (!string_is_terminated(klc.str, ARRAY_SIZE(klc.str))) return -EINVAL; - blob = papr_vpd_retrieve(&klc); - if (IS_ERR(blob)) - return PTR_ERR(blob); + seq = (struct papr_rtas_sequence) { + .begin = vpd_sequence_begin, + .end = vpd_sequence_end, + .work = vpd_sequence_fill_work_area, + }; - fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); - if (fd < 0) { - err = fd; - goto free_blob; - } + vpd_params.loc_code = &klc; + seq.params = (void *)&vpd_params; - file = anon_inode_getfile("[papr-vpd]", &papr_vpd_handle_ops, - (void *)blob, O_RDONLY); - if (IS_ERR(file)) { - err = PTR_ERR(file); - goto put_fd; - } + fd = papr_rtas_setup_file_interface(&seq, &papr_vpd_handle_ops, + "[papr-vpd]"); - file->f_mode |= FMODE_LSEEK | FMODE_PREAD; - fd_install(fd, file); return fd; -put_fd: - put_unused_fd(fd); -free_blob: - vpd_blob_free(blob); - return err; } /* diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index c233f9db039b..f7c9271bda58 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/ioport.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/ndctl.h> #include <linux/sched.h> @@ -16,9 +17,10 @@ #include <linux/nd.h> #include <asm/plpar_wrappers.h> -#include <asm/papr_pdsm.h> +#include <uapi/linux/papr_pdsm.h> +#include <linux/papr_scm.h> #include <asm/mce.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/perf_event.h> #define BIND_ANY_ADDR (~0ul) @@ -29,46 +31,6 @@ (1ul << ND_CMD_SET_CONFIG_DATA) | \ (1ul << ND_CMD_CALL)) -/* DIMM health bitmap indicators */ -/* SCM device is unable to persist memory contents */ -#define PAPR_PMEM_UNARMED (1ULL << (63 - 0)) -/* SCM device failed to persist memory contents */ -#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1)) -/* SCM device contents are persisted from previous IPL */ -#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2)) -/* SCM device contents are not persisted from previous IPL */ -#define PAPR_PMEM_EMPTY (1ULL << (63 - 3)) -/* SCM device memory life remaining is critically low */ -#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4)) -/* SCM device will be garded off next IPL due to failure */ -#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5)) -/* SCM contents cannot persist due to current platform health status */ -#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6)) -/* SCM device is unable to persist memory contents in certain conditions */ -#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7)) -/* SCM device is encrypted */ -#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8)) -/* SCM device has been scrubbed and locked */ -#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9)) - -/* Bits status indicators for health bitmap indicating unarmed dimm */ -#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -/* Bits status indicators for health bitmap indicating unflushed dimm */ -#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY) - -/* Bits status indicators for health bitmap indicating unrestored dimm */ -#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY) - -/* Bit status indicators for smart event notification */ -#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \ - PAPR_PMEM_HEALTH_FATAL | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS) -#define PAPR_SCM_PERF_STATS_VERSION 0x1 - /* Struct holding a single performance metric */ struct papr_scm_perf_stat { u8 stat_id[8]; @@ -582,7 +544,7 @@ static int drc_pmem_query_health(struct papr_scm_priv *p) /* Jiffies offset for which the health data is assumed to be same */ cache_timeout = p->lasthealth_jiffies + - msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000); + secs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL); /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */ if (time_after(jiffies, cache_timeout)) @@ -1548,7 +1510,7 @@ static const struct of_device_id papr_scm_match[] = { static struct platform_driver papr_scm_driver = { .probe = papr_scm_probe, - .remove_new = papr_scm_remove, + .remove = papr_scm_remove, .driver = { .name = "papr_scm", .of_match_table = papr_scm_match, @@ -1575,5 +1537,6 @@ static void __exit papr_scm_exit(void) module_exit(papr_scm_exit); MODULE_DEVICE_TABLE(of, papr_scm_match); +MODULE_DESCRIPTION("PAPR Storage Class Memory interface driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("IBM Corporation"); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 1772ae3d193d..6dbc73eb2ca2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -18,33 +18,6 @@ #include <asm/pci.h> #include "pseries.h" -#if 0 -void pcibios_name_device(struct pci_dev *dev) -{ - struct device_node *dn; - - /* - * Add IBM loc code (slot) as a prefix to the device names for service - */ - dn = pci_device_to_OF_node(dev); - if (dn) { - const char *loc_code = of_get_property(dn, "ibm,loc-code", - NULL); - if (loc_code) { - int loc_len = strlen(loc_code); - if (loc_len < sizeof(dev->dev.name)) { - memmove(dev->dev.name+loc_len+1, dev->dev.name, - sizeof(dev->dev.name)-loc_len-1); - memcpy(dev->dev.name, loc_code, loc_len); - dev->dev.name[loc_len] = ' '; - dev->dev.name[sizeof(dev->dev.name)-1] = '\0'; - } - } - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); -#endif - #ifdef CONFIG_PCI_IOV #define MAX_VFS_FOR_MAP_PE 256 struct pe_map_bar_entry { diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 4448386268d9..52e2623a741d 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <linux/export.h> +#include <linux/node.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/firmware.h> @@ -21,9 +22,22 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn) { struct pci_controller *phb; + int nid; pr_debug("PCI: Initializing new hotplug PHB %pOF\n", dn); + nid = of_node_to_nid(dn); + if (likely((nid) >= 0)) { + if (!node_online(nid)) { + if (__register_one_node(nid)) { + pr_err("PCI: Failed to register node %d\n", nid); + } else { + update_numa_distance(dn); + node_set_online(nid); + } + } + } + phb = pcibios_alloc_controller(dn); if (!phb) return NULL; diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c index febe18f251d0..b1667ed05f98 100644 --- a/arch/powerpc/platforms/pseries/plpks.c +++ b/arch/powerpc/platforms/pseries/plpks.c @@ -415,8 +415,7 @@ static int plpks_confirm_object_flushed(struct label *label, break; } - usleep_range(PLPKS_FLUSH_SLEEP, - PLPKS_FLUSH_SLEEP + PLPKS_FLUSH_SLEEP_RANGE); + fsleep(PLPKS_FLUSH_SLEEP); timeout = timeout + PLPKS_FLUSH_SLEEP; } while (timeout < PLPKS_MAX_TIMEOUT); @@ -464,9 +463,10 @@ int plpks_signed_update_var(struct plpks_var *var, u64 flags) continuetoken = retbuf[0]; if (pseries_status_to_err(rc) == -EBUSY) { - int delay_ms = get_longbusy_msecs(rc); - mdelay(delay_ms); - timeout += delay_ms; + int delay_us = get_longbusy_msecs(rc) * 1000; + + fsleep(delay_us); + timeout += delay_us; } rc = pseries_status_to_err(rc); } while (rc == -EBUSY && timeout < PLPKS_MAX_TIMEOUT); @@ -683,7 +683,7 @@ void __init plpks_early_init_devtree(void) out: fdt_nop_property(fdt, chosen_node, "ibm,plpks-pw"); // Since we've cleared the password, we must update the FDT checksum - early_init_dt_verify(fdt); + early_init_dt_verify(fdt, __pa(fdt)); } static __init int pseries_plpks_init(void) diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c index 3c290b9ed01b..0f1d45f32e4a 100644 --- a/arch/powerpc/platforms/pseries/pmem.c +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -121,7 +121,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) return -EINVAL; } - drc_index = hp_elog->_drc_u.drc_index; + drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); lock_device_hotplug(); diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index bba4ad192b0f..3968a6970fa8 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { } #endif extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary); -void pseries_machine_kexec(struct kimage *image); extern void pSeries_final_fixup(void); diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c index b5853e9fcc3c..eceb3289383e 100644 --- a/arch/powerpc/platforms/pseries/rtas-fadump.c +++ b/arch/powerpc/platforms/pseries/rtas-fadump.c @@ -18,6 +18,7 @@ #include <asm/page.h> #include <asm/rtas.h> +#include <asm/setup.h> #include <asm/fadump.h> #include <asm/fadump-internal.h> @@ -29,9 +30,6 @@ static const struct rtas_fadump_mem_struct *fdm_active; static void rtas_fadump_update_config(struct fw_dump *fadump_conf, const struct rtas_fadump_mem_struct *fdm) { - fadump_conf->boot_mem_dest_addr = - be64_to_cpu(fdm->rmr_region.destination_address); - fadump_conf->fadumphdr_addr = (fadump_conf->boot_mem_dest_addr + fadump_conf->boot_memory_size); } @@ -43,20 +41,56 @@ static void rtas_fadump_update_config(struct fw_dump *fadump_conf, static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf, const struct rtas_fadump_mem_struct *fdm) { - fadump_conf->boot_mem_addr[0] = - be64_to_cpu(fdm->rmr_region.source_address); - fadump_conf->boot_mem_sz[0] = be64_to_cpu(fdm->rmr_region.source_len); - fadump_conf->boot_memory_size = fadump_conf->boot_mem_sz[0]; + unsigned long base, size, last_end, hole_size; - fadump_conf->boot_mem_top = fadump_conf->boot_memory_size; - fadump_conf->boot_mem_regs_cnt = 1; + last_end = 0; + hole_size = 0; + fadump_conf->boot_memory_size = 0; + fadump_conf->boot_mem_regs_cnt = 0; + pr_debug("Boot memory regions:\n"); + for (int i = 0; i < be16_to_cpu(fdm->header.dump_num_sections); i++) { + int type = be16_to_cpu(fdm->rgn[i].source_data_type); + u64 addr; - /* - * Start address of reserve dump area (permanent reservation) for - * re-registering FADump after dump capture. - */ - fadump_conf->reserve_dump_area_start = - be64_to_cpu(fdm->cpu_state_data.destination_address); + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + addr = be64_to_cpu(fdm->rgn[i].destination_address); + + fadump_conf->cpu_state_dest_vaddr = (u64)__va(addr); + /* + * Start address of reserve dump area (permanent reservation) for + * re-registering FADump after dump capture. + */ + fadump_conf->reserve_dump_area_start = addr; + break; + case RTAS_FADUMP_HPTE_REGION: + /* Not processed currently. */ + break; + case RTAS_FADUMP_REAL_MODE_REGION: + base = be64_to_cpu(fdm->rgn[i].source_address); + size = be64_to_cpu(fdm->rgn[i].source_len); + pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size); + if (!base) { + fadump_conf->boot_mem_dest_addr = + be64_to_cpu(fdm->rgn[i].destination_address); + } + + fadump_conf->boot_mem_addr[fadump_conf->boot_mem_regs_cnt] = base; + fadump_conf->boot_mem_sz[fadump_conf->boot_mem_regs_cnt] = size; + fadump_conf->boot_memory_size += size; + hole_size += (base - last_end); + last_end = base + size; + fadump_conf->boot_mem_regs_cnt++; + break; + case RTAS_FADUMP_PARAM_AREA: + fadump_conf->param_area = be64_to_cpu(fdm->rgn[i].destination_address); + break; + default: + pr_warn("Section type %d unsupported on this kernel. Ignoring!\n", type); + break; + } + } + fadump_conf->boot_mem_top = fadump_conf->boot_memory_size + hole_size; rtas_fadump_update_config(fadump_conf, fdm); } @@ -64,16 +98,15 @@ static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf, static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) { u64 addr = fadump_conf->reserve_dump_area_start; + u16 sec_cnt = 0; memset(&fdm, 0, sizeof(struct rtas_fadump_mem_struct)); addr = addr & PAGE_MASK; fdm.header.dump_format_version = cpu_to_be32(0x00000001); - fdm.header.dump_num_sections = cpu_to_be16(3); fdm.header.dump_status_flag = 0; fdm.header.offset_first_dump_section = - cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct, - cpu_state_data)); + cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct, rgn)); /* * Fields for disk dump option. @@ -89,25 +122,22 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) /* Kernel dump sections */ /* cpu state data section. */ - fdm.cpu_state_data.request_flag = - cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); - fdm.cpu_state_data.source_data_type = - cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA); - fdm.cpu_state_data.source_address = 0; - fdm.cpu_state_data.source_len = - cpu_to_be64(fadump_conf->cpu_state_data_size); - fdm.cpu_state_data.destination_address = cpu_to_be64(addr); + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA); + fdm.rgn[sec_cnt].source_address = 0; + fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->cpu_state_data_size); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr); addr += fadump_conf->cpu_state_data_size; + sec_cnt++; /* hpte region section */ - fdm.hpte_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); - fdm.hpte_region.source_data_type = - cpu_to_be16(RTAS_FADUMP_HPTE_REGION); - fdm.hpte_region.source_address = 0; - fdm.hpte_region.source_len = - cpu_to_be64(fadump_conf->hpte_region_size); - fdm.hpte_region.destination_address = cpu_to_be64(addr); + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_HPTE_REGION); + fdm.rgn[sec_cnt].source_address = 0; + fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->hpte_region_size); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr); addr += fadump_conf->hpte_region_size; + sec_cnt++; /* * Align boot memory area destination address to page boundary to @@ -115,14 +145,29 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf) */ addr = PAGE_ALIGN(addr); - /* RMA region section */ - fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); - fdm.rmr_region.source_data_type = - cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION); - fdm.rmr_region.source_address = cpu_to_be64(0); - fdm.rmr_region.source_len = cpu_to_be64(fadump_conf->boot_memory_size); - fdm.rmr_region.destination_address = cpu_to_be64(addr); - addr += fadump_conf->boot_memory_size; + /* First boot memory region destination address */ + fadump_conf->boot_mem_dest_addr = addr; + for (int i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) { + /* Boot memory regions */ + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION); + fdm.rgn[sec_cnt].source_address = cpu_to_be64(fadump_conf->boot_mem_addr[i]); + fdm.rgn[sec_cnt].source_len = cpu_to_be64(fadump_conf->boot_mem_sz[i]); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(addr); + addr += fadump_conf->boot_mem_sz[i]; + sec_cnt++; + } + + /* Parameters area */ + if (fadump_conf->param_area) { + fdm.rgn[sec_cnt].request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG); + fdm.rgn[sec_cnt].source_data_type = cpu_to_be16(RTAS_FADUMP_PARAM_AREA); + fdm.rgn[sec_cnt].source_address = cpu_to_be64(fadump_conf->param_area); + fdm.rgn[sec_cnt].source_len = cpu_to_be64(COMMAND_LINE_SIZE); + fdm.rgn[sec_cnt].destination_address = cpu_to_be64(fadump_conf->param_area); + sec_cnt++; + } + fdm.header.dump_num_sections = cpu_to_be16(sec_cnt); rtas_fadump_update_config(fadump_conf, &fdm); @@ -136,14 +181,21 @@ static u64 rtas_fadump_get_bootmem_min(void) static int rtas_fadump_register(struct fw_dump *fadump_conf) { - unsigned int wait_time; + unsigned int wait_time, fdm_size; int rc, err = -EIO; + /* + * Platform requires the exact size of the Dump Memory Structure. + * Avoid including any unused rgns in the calculation, as this + * could result in a parameter error (-3) from the platform. + */ + fdm_size = sizeof(struct rtas_fadump_section_header); + fdm_size += be16_to_cpu(fdm.header.dump_num_sections) * sizeof(struct rtas_fadump_section); + /* TODO: Add upper time limit for the delay */ do { rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1, - NULL, FADUMP_REGISTER, &fdm, - sizeof(struct rtas_fadump_mem_struct)); + NULL, FADUMP_REGISTER, &fdm, fdm_size); wait_time = rtas_busy_delay_time(rc); if (wait_time) @@ -161,9 +213,7 @@ static int rtas_fadump_register(struct fw_dump *fadump_conf) pr_err("Failed to register. Hardware Error(%d).\n", rc); break; case -3: - if (!is_fadump_boot_mem_contiguous()) - pr_err("Can't have holes in boot memory area.\n"); - else if (!is_fadump_reserved_mem_contiguous()) + if (!is_fadump_reserved_mem_contiguous()) pr_err("Can't have holes in reserved memory area.\n"); pr_err("Failed to register. Parameter Error(%d).\n", rc); @@ -316,11 +366,9 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf) u32 num_cpus, *note_buf; int i, rc = 0, cpu = 0; struct pt_regs regs; - unsigned long addr; void *vaddr; - addr = be64_to_cpu(fdm_active->cpu_state_data.destination_address); - vaddr = __va(addr); + vaddr = (void *)fadump_conf->cpu_state_dest_vaddr; reg_header = vaddr; if (be64_to_cpu(reg_header->magic_number) != @@ -375,11 +423,8 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf) } final_note(note_buf); - if (fdh) { - pr_debug("Updating elfcore header (%llx) with cpu notes\n", - fdh->elfcorehdr_addr); - fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr)); - } + pr_debug("Updating elfcore header (%llx) with cpu notes\n", fadump_conf->elfcorehdr_addr); + fadump_update_elfcore_header((char *)fadump_conf->elfcorehdr_addr); return 0; error_out: @@ -389,57 +434,66 @@ error_out: } /* - * Validate and process the dump data stored by firmware before exporting - * it through '/proc/vmcore'. + * Validate and process the dump data stored by the firmware, and update + * the CPU notes of elfcorehdr. */ static int __init rtas_fadump_process(struct fw_dump *fadump_conf) { - struct fadump_crash_info_header *fdh; - int rc = 0; - if (!fdm_active || !fadump_conf->fadumphdr_addr) return -EINVAL; /* Check if the dump data is valid. */ - if ((be16_to_cpu(fdm_active->header.dump_status_flag) == - RTAS_FADUMP_ERROR_FLAG) || - (fdm_active->cpu_state_data.error_flags != 0) || - (fdm_active->rmr_region.error_flags != 0)) { - pr_err("Dump taken by platform is not valid\n"); - return -EINVAL; - } - if ((fdm_active->rmr_region.bytes_dumped != - fdm_active->rmr_region.source_len) || - !fdm_active->cpu_state_data.bytes_dumped) { - pr_err("Dump taken by platform is incomplete\n"); - return -EINVAL; - } + for (int i = 0; i < be16_to_cpu(fdm_active->header.dump_num_sections); i++) { + int type = be16_to_cpu(fdm_active->rgn[i].source_data_type); + int rc = 0; - /* Validate the fadump crash info header */ - fdh = __va(fadump_conf->fadumphdr_addr); - if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { - pr_err("Crash info header is not valid.\n"); - return -EINVAL; + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + case RTAS_FADUMP_HPTE_REGION: + case RTAS_FADUMP_REAL_MODE_REGION: + if (fdm_active->rgn[i].error_flags != 0) { + pr_err("Dump taken by platform is not valid (%d)\n", i); + rc = -EINVAL; + } + if (fdm_active->rgn[i].bytes_dumped != fdm_active->rgn[i].source_len) { + pr_err("Dump taken by platform is incomplete (%d)\n", i); + rc = -EINVAL; + } + if (rc) { + pr_warn("Region type: %u src addr: 0x%llx dest addr: 0x%llx\n", + be16_to_cpu(fdm_active->rgn[i].source_data_type), + be64_to_cpu(fdm_active->rgn[i].source_address), + be64_to_cpu(fdm_active->rgn[i].destination_address)); + return rc; + } + break; + case RTAS_FADUMP_PARAM_AREA: + if (fdm_active->rgn[i].bytes_dumped != fdm_active->rgn[i].source_len || + fdm_active->rgn[i].error_flags != 0) { + pr_warn("Failed to process additional parameters! Proceeding anyway..\n"); + fadump_conf->param_area = 0; + } + break; + default: + /* + * If the first/crashed kernel added a new region type that the + * second/fadump kernel doesn't recognize, skip it and process + * assuming backward compatibility. + */ + pr_warn("Unknown region found: type: %u src addr: 0x%llx dest addr: 0x%llx\n", + be16_to_cpu(fdm_active->rgn[i].source_data_type), + be64_to_cpu(fdm_active->rgn[i].source_address), + be64_to_cpu(fdm_active->rgn[i].destination_address)); + break; + } } - rc = rtas_fadump_build_cpu_notes(fadump_conf); - if (rc) - return rc; - - /* - * We are done validating dump info and elfcore header is now ready - * to be exported. set elfcorehdr_addr so that vmcore module will - * export the elfcore header through '/proc/vmcore'. - */ - elfcorehdr_addr = fdh->elfcorehdr_addr; - - return 0; + return rtas_fadump_build_cpu_notes(fadump_conf); } static void rtas_fadump_region_show(struct fw_dump *fadump_conf, struct seq_file *m) { - const struct rtas_fadump_section *cpu_data_section; const struct rtas_fadump_mem_struct *fdm_ptr; if (fdm_active) @@ -447,27 +501,49 @@ static void rtas_fadump_region_show(struct fw_dump *fadump_conf, else fdm_ptr = &fdm; - cpu_data_section = &(fdm_ptr->cpu_state_data); - seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", - be64_to_cpu(cpu_data_section->destination_address), - be64_to_cpu(cpu_data_section->destination_address) + - be64_to_cpu(cpu_data_section->source_len) - 1, - be64_to_cpu(cpu_data_section->source_len), - be64_to_cpu(cpu_data_section->bytes_dumped)); - - seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", - be64_to_cpu(fdm_ptr->hpte_region.destination_address), - be64_to_cpu(fdm_ptr->hpte_region.destination_address) + - be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1, - be64_to_cpu(fdm_ptr->hpte_region.source_len), - be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped)); - - seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", - be64_to_cpu(fdm_ptr->rmr_region.source_address), - be64_to_cpu(fdm_ptr->rmr_region.destination_address)); - seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", - be64_to_cpu(fdm_ptr->rmr_region.source_len), - be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped)); + + for (int i = 0; i < be16_to_cpu(fdm_ptr->header.dump_num_sections); i++) { + int type = be16_to_cpu(fdm_ptr->rgn[i].source_data_type); + + switch (type) { + case RTAS_FADUMP_CPU_STATE_DATA: + seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", + be64_to_cpu(fdm_ptr->rgn[i].destination_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address) + + be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1, + be64_to_cpu(fdm_ptr->rgn[i].source_len), + be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped)); + break; + case RTAS_FADUMP_HPTE_REGION: + seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n", + be64_to_cpu(fdm_ptr->rgn[i].destination_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address) + + be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1, + be64_to_cpu(fdm_ptr->rgn[i].source_len), + be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped)); + break; + case RTAS_FADUMP_REAL_MODE_REGION: + seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", + be64_to_cpu(fdm_ptr->rgn[i].source_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address)); + seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", + be64_to_cpu(fdm_ptr->rgn[i].source_len), + be64_to_cpu(fdm_ptr->rgn[i].bytes_dumped)); + break; + case RTAS_FADUMP_PARAM_AREA: + seq_printf(m, "\n[%#016llx-%#016llx]: cmdline append: '%s'\n", + be64_to_cpu(fdm_ptr->rgn[i].destination_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address) + + be64_to_cpu(fdm_ptr->rgn[i].source_len) - 1, + (char *)__va(be64_to_cpu(fdm_ptr->rgn[i].destination_address))); + break; + default: + seq_printf(m, "Unknown region type %d : Src: %#016llx, Dest: %#016llx, ", + type, be64_to_cpu(fdm_ptr->rgn[i].source_address), + be64_to_cpu(fdm_ptr->rgn[i].destination_address)); + break; + } + } /* Dump is active. Show preserved area start address. */ if (fdm_active) { @@ -483,6 +559,20 @@ static void rtas_fadump_trigger(struct fadump_crash_info_header *fdh, rtas_os_term((char *)msg); } +/* FADUMP_MAX_MEM_REGS or lower */ +static int rtas_fadump_max_boot_mem_rgns(void) +{ + /* + * Version 1 of Kernel Assisted Dump Memory Structure (PAPR) supports 10 sections. + * With one each section taken for CPU state data & HPTE respectively, 8 sections + * can be used for boot memory regions. + * + * If new region(s) is(are) defined, maximum boot memory regions will decrease + * proportionally. + */ + return RTAS_FADUMP_MAX_BOOT_MEM_REGS; +} + static struct fadump_ops rtas_fadump_ops = { .fadump_init_mem_struct = rtas_fadump_init_mem_struct, .fadump_get_bootmem_min = rtas_fadump_get_bootmem_min, @@ -492,6 +582,7 @@ static struct fadump_ops rtas_fadump_ops = { .fadump_process = rtas_fadump_process, .fadump_region_show = rtas_fadump_region_show, .fadump_trigger = rtas_fadump_trigger, + .fadump_max_boot_mem_rgns = rtas_fadump_max_boot_mem_rgns, }; void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) @@ -508,9 +599,10 @@ void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) if (!token) return; - fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token); - fadump_conf->ops = &rtas_fadump_ops; - fadump_conf->fadump_supported = 1; + fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token); + fadump_conf->ops = &rtas_fadump_ops; + fadump_conf->fadump_supported = 1; + fadump_conf->param_area_supported = 1; /* Firmware supports 64-bit value for size, align it to pagesize. */ fadump_conf->max_copy_size = ALIGN_DOWN(U64_MAX, PAGE_SIZE); diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.h b/arch/powerpc/platforms/pseries/rtas-fadump.h index fd59bd7ca9c3..c109abf6befd 100644 --- a/arch/powerpc/platforms/pseries/rtas-fadump.h +++ b/arch/powerpc/platforms/pseries/rtas-fadump.h @@ -23,12 +23,24 @@ #define RTAS_FADUMP_HPTE_REGION 0x0002 #define RTAS_FADUMP_REAL_MODE_REGION 0x0011 +/* OS defined sections */ +#define RTAS_FADUMP_PARAM_AREA 0x0100 + /* Dump request flag */ #define RTAS_FADUMP_REQUEST_FLAG 0x00000001 /* Dump status flag */ #define RTAS_FADUMP_ERROR_FLAG 0x2000 +/* + * The Firmware Assisted Dump Memory structure supports a maximum of 10 sections + * in the dump memory structure. Presently, three sections are used for + * CPU state data, HPTE & Parameters area, while the remaining seven sections + * can be used for boot memory regions. + */ +#define MAX_SECTIONS 10 +#define RTAS_FADUMP_MAX_BOOT_MEM_REGS 7 + /* Kernel Dump section info */ struct rtas_fadump_section { __be32 request_flag; @@ -61,20 +73,15 @@ struct rtas_fadump_section_header { * Firmware Assisted dump memory structure. This structure is required for * registering future kernel dump with power firmware through rtas call. * - * No disk dump option. Hence disk dump path string section is not included. + * In version 1, the platform permits one section header, dump-disk path + * and ten sections. + * + * Note: No disk dump option. Hence disk dump path string section is not + * included. */ struct rtas_fadump_mem_struct { struct rtas_fadump_section_header header; - - /* Kernel dump sections */ - struct rtas_fadump_section cpu_state_data; - struct rtas_fadump_section hpte_region; - - /* - * TODO: Extend multiple boot memory regions support in the kernel - * for this platform. - */ - struct rtas_fadump_section rmr_region; + struct rtas_fadump_section rgn[MAX_SECTIONS]; }; /* diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 284a6fa04b0c..b10a25325238 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -343,8 +343,8 @@ static int alloc_dispatch_log_kmem_cache(void) { void (*ctor)(void *) = get_dtl_cache_ctor(); - dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, - DISPATCH_LOG_BYTES, 0, ctor); + dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor); if (!dtl_cache) { pr_warn("Failed to create dispatch trace log buffer cache\n"); pr_warn("Stolen time statistics will be unreliable\n"); @@ -1159,7 +1159,6 @@ define_machine(pseries) { .machine_check_exception = pSeries_machine_check_exception, .machine_check_log_err = pSeries_machine_check_log_err, #ifdef CONFIG_KEXEC_CORE - .machine_kexec = pseries_machine_kexec, .kexec_cpu_down = pseries_kexec_cpu_down, #endif #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index c597711ef20a..db99725e752b 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -39,7 +39,7 @@ #include <asm/xive.h> #include <asm/dbell.h> #include <asm/plpar_wrappers.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/svm.h> #include <asm/kvm_guest.h> diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index 3b4045d508ec..384c9dc1899a 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -8,6 +8,7 @@ #include <linux/mm.h> #include <linux/memblock.h> +#include <linux/mem_encrypt.h> #include <linux/cc_platform.h> #include <asm/machdep.h> #include <asm/svm.h> diff --git a/arch/powerpc/platforms/pseries/vas-sysfs.c b/arch/powerpc/platforms/pseries/vas-sysfs.c index f9f682724e77..9e05a0e99cad 100644 --- a/arch/powerpc/platforms/pseries/vas-sysfs.c +++ b/arch/powerpc/platforms/pseries/vas-sysfs.c @@ -162,13 +162,13 @@ static const struct sysfs_ops vas_sysfs_ops = { .store = vas_type_store, }; -static struct kobj_type vas_def_attr_type = { +static const struct kobj_type vas_def_attr_type = { .release = vas_type_release, .sysfs_ops = &vas_sysfs_ops, .default_groups = vas_def_capab_groups, }; -static struct kobj_type vas_qos_attr_type = { +static const struct kobj_type vas_qos_attr_type = { .release = vas_type_release, .sysfs_ops = &vas_sysfs_ops, .default_groups = vas_qos_capab_groups, diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c index 71d52a670d95..c25eb1a38185 100644 --- a/arch/powerpc/platforms/pseries/vas.c +++ b/arch/powerpc/platforms/pseries/vas.c @@ -38,7 +38,27 @@ static long hcall_return_busy_check(long rc) { /* Check if we are stalled for some time */ if (H_IS_LONG_BUSY(rc)) { - msleep(get_longbusy_msecs(rc)); + unsigned int ms; + /* + * Allocate, Modify and Deallocate HCALLs returns + * H_LONG_BUSY_ORDER_1_MSEC or H_LONG_BUSY_ORDER_10_MSEC + * for the long delay. So the sleep time should always + * be either 1 or 10msecs, but in case if the HCALL + * returns the long delay > 10 msecs, clamp the sleep + * time to 10msecs. + */ + ms = clamp(get_longbusy_msecs(rc), 1, 10); + + /* + * msleep() will often sleep at least 20 msecs even + * though the hypervisor suggests that the OS reissue + * HCALLs after 1 or 10msecs. Also the delay hint from + * the HCALL is just a suggestion. So OK to pause for + * less time than the hinted delay. Use usleep_range() + * to ensure we don't sleep much longer than actually + * needed. + */ + usleep_range(ms * (USEC_PER_MSEC / 10), ms * USEC_PER_MSEC); rc = H_BUSY; } else if (rc == H_BUSY) { cond_resched(); @@ -228,7 +248,7 @@ static irqreturn_t pseries_vas_irq_handler(int irq, void *data) struct pseries_vas_window *txwin = data; /* - * The thread hanlder will process this interrupt if it is + * The thread handler will process this interrupt if it is * already running. */ atomic_inc(&txwin->pending_faults); diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 90ff85c879bf..ac1d2d2c9a88 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -611,7 +611,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = { .get_required_mask = dma_iommu_get_required_mask, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; @@ -1576,10 +1576,10 @@ void vio_unregister_device(struct vio_dev *viodev) } EXPORT_SYMBOL(vio_unregister_device); -static int vio_bus_match(struct device *dev, struct device_driver *drv) +static int vio_bus_match(struct device *dev, const struct device_driver *drv) { const struct vio_dev *vio_dev = to_vio_dev(dev); - struct vio_driver *vio_drv = to_vio_driver(drv); + const struct vio_driver *vio_drv = to_vio_driver(drv); const struct vio_device_id *ids = vio_drv->id_table; return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); @@ -1592,13 +1592,9 @@ static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env) const char *cp; dn = dev->of_node; - if (!dn) - return -ENODEV; - cp = of_get_property(dn, "compatible", NULL); - if (!cp) - return -ENODEV; + if (dn && (cp = of_get_property(dn, "compatible", NULL))) + add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); - add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); return 0; } @@ -1693,7 +1689,7 @@ struct vio_dev *vio_find_node(struct device_node *vnode) /* construct the kobject name from the device node */ if (of_node_is_type(vnode_parent, "vdevice")) { const __be32 *prop; - + prop = of_get_property(vnode, "reg", NULL); if (!prop) goto out; diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile index 78473d69cd2b..e9890085953e 100644 --- a/arch/powerpc/purgatory/Makefile +++ b/arch/powerpc/purgatory/Makefile @@ -1,8 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -KASAN_SANITIZE := n -KCSAN_SANITIZE := n - targets += trampoline_$(BITS).o purgatory.ro # When profile-guided optimization is enabled, llvm emits two different diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index 5aa92ff3622d..18ff2c4a814a 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig @@ -5,12 +5,12 @@ config PPC4xx_PCI_EXPRESS bool - depends on PCI && 4xx + depends on PCI && 44x config PPC4xx_HSTA_MSI bool depends on PCI_MSI - depends on PCI && 4xx + depends on PCI && 44x config PPC_MSI_BITMAP bool diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 9cb1d029511a..0834a9a12600 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -1,7 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) - mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) obj-$(CONFIG_MPIC_TIMER) += mpic_timer.o @@ -14,7 +12,6 @@ obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o obj-$(CONFIG_PPC_MPC106) += grackle.o obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o -obj-$(CONFIG_PPC_PMI) += pmi.o obj-$(CONFIG_U3_DART) += dart_iommu.o obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o obj-$(CONFIG_FSL_SOC) += fsl_soc.o fsl_mpic_err.o diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index e14493685fe8..4a59ed1d62ce 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -207,7 +207,7 @@ unsigned int cpm2_get_irq(void) if (irq == 0) return(-1); - return irq_linear_revmap(cpm2_pic_host, irq); + return irq_find_mapping(cpm2_pic_host, irq); } static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq, @@ -259,7 +259,8 @@ void cpm2_pic_init(struct device_node *node) out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); /* create a legacy host */ - cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL); + cpm2_pic_host = irq_domain_create_linear(of_fwnode_handle(node), 64, + &cpm2_pic_host_ops, NULL); if (cpm2_pic_host == NULL) { printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); return; diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index 47db732981a8..e22fc638dbc7 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -138,7 +138,7 @@ static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, out_be32(&iop->dat, cpm2_gc->cpdata); } -static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc); @@ -150,6 +150,8 @@ static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) __cpm2_gpio32_set(mm_gc, pin_mask, value); spin_unlock_irqrestore(&cpm2_gc->lock, flags); + + return 0; } static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) @@ -208,7 +210,7 @@ int cpm2_gpiochip_add32(struct device *dev) gc->direction_input = cpm2_gpio32_dir_in; gc->direction_output = cpm2_gpio32_dir_out; gc->get = cpm2_gpio32_get; - gc->set = cpm2_gpio32_set; + gc->set_rv = cpm2_gpio32_set; gc->parent = dev; gc->owner = THIS_MODULE; diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 98096bbfd62e..c0d10c149661 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -24,7 +24,6 @@ #include <linux/suspend.h> #include <linux/memblock.h> #include <linux/gfp.h> -#include <linux/kmemleak.h> #include <linux/of_address.h> #include <asm/io.h> #include <asm/iommu.h> @@ -243,9 +242,6 @@ static void __init allocate_dart(void) if (!dart_tablebase) panic("Failed to allocate 16MB below 2GB for DART table\n"); - /* There is no point scanning the DART space for leaks*/ - kmemleak_no_scan((void *)dart_tablebase); - /* Allocate a spare page to map all invalid DART pages. We need to do * that to work around what looks like a problem with the HT bridge * prefetching into invalid pages and corrupting data diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c index 70ce66eadff1..cb44a69958e7 100644 --- a/arch/powerpc/sysdev/dcr.c +++ b/arch/powerpc/sysdev/dcr.c @@ -11,107 +11,6 @@ #include <linux/of_address.h> #include <asm/dcr.h> -#ifdef CONFIG_PPC_DCR_MMIO -static struct device_node *find_dcr_parent(struct device_node *node) -{ - struct device_node *par, *tmp; - const u32 *p; - - for (par = of_node_get(node); par;) { - if (of_property_read_bool(par, "dcr-controller")) - break; - p = of_get_property(par, "dcr-parent", NULL); - tmp = par; - if (p == NULL) - par = of_get_parent(par); - else - par = of_find_node_by_phandle(*p); - of_node_put(tmp); - } - return par; -} -#endif - -#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) - -bool dcr_map_ok_generic(dcr_host_t host) -{ - if (host.type == DCR_HOST_NATIVE) - return dcr_map_ok_native(host.host.native); - else if (host.type == DCR_HOST_MMIO) - return dcr_map_ok_mmio(host.host.mmio); - else - return false; -} -EXPORT_SYMBOL_GPL(dcr_map_ok_generic); - -dcr_host_t dcr_map_generic(struct device_node *dev, - unsigned int dcr_n, - unsigned int dcr_c) -{ - dcr_host_t host; - struct device_node *dp; - const char *prop; - - host.type = DCR_HOST_INVALID; - - dp = find_dcr_parent(dev); - if (dp == NULL) - return host; - - prop = of_get_property(dp, "dcr-access-method", NULL); - - pr_debug("dcr_map_generic(dcr-access-method = %s)\n", prop); - - if (!strcmp(prop, "native")) { - host.type = DCR_HOST_NATIVE; - host.host.native = dcr_map_native(dev, dcr_n, dcr_c); - } else if (!strcmp(prop, "mmio")) { - host.type = DCR_HOST_MMIO; - host.host.mmio = dcr_map_mmio(dev, dcr_n, dcr_c); - } - - of_node_put(dp); - return host; -} -EXPORT_SYMBOL_GPL(dcr_map_generic); - -void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c) -{ - if (host.type == DCR_HOST_NATIVE) - dcr_unmap_native(host.host.native, dcr_c); - else if (host.type == DCR_HOST_MMIO) - dcr_unmap_mmio(host.host.mmio, dcr_c); - else /* host.type == DCR_HOST_INVALID */ - WARN_ON(true); -} -EXPORT_SYMBOL_GPL(dcr_unmap_generic); - -u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n) -{ - if (host.type == DCR_HOST_NATIVE) - return dcr_read_native(host.host.native, dcr_n); - else if (host.type == DCR_HOST_MMIO) - return dcr_read_mmio(host.host.mmio, dcr_n); - else /* host.type == DCR_HOST_INVALID */ - WARN_ON(true); - return 0; -} -EXPORT_SYMBOL_GPL(dcr_read_generic); - -void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value) -{ - if (host.type == DCR_HOST_NATIVE) - dcr_write_native(host.host.native, dcr_n, value); - else if (host.type == DCR_HOST_MMIO) - dcr_write_mmio(host.host.mmio, dcr_n, value); - else /* host.type == DCR_HOST_INVALID */ - WARN_ON(true); -} -EXPORT_SYMBOL_GPL(dcr_write_generic); - -#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ - unsigned int dcr_resource_start(const struct device_node *np, unsigned int index) { @@ -137,86 +36,5 @@ unsigned int dcr_resource_len(const struct device_node *np, unsigned int index) } EXPORT_SYMBOL_GPL(dcr_resource_len); -#ifdef CONFIG_PPC_DCR_MMIO - -static u64 of_translate_dcr_address(struct device_node *dev, - unsigned int dcr_n, - unsigned int *out_stride) -{ - struct device_node *dp; - const u32 *p; - unsigned int stride; - u64 ret = OF_BAD_ADDR; - - dp = find_dcr_parent(dev); - if (dp == NULL) - return OF_BAD_ADDR; - - /* Stride is not properly defined yet, default to 0x10 for Axon */ - p = of_get_property(dp, "dcr-mmio-stride", NULL); - stride = (p == NULL) ? 0x10 : *p; - - /* XXX FIXME: Which property name is to use of the 2 following ? */ - p = of_get_property(dp, "dcr-mmio-range", NULL); - if (p == NULL) - p = of_get_property(dp, "dcr-mmio-space", NULL); - if (p == NULL) - goto done; - - /* Maybe could do some better range checking here */ - ret = of_translate_address(dp, p); - if (ret != OF_BAD_ADDR) - ret += (u64)(stride) * (u64)dcr_n; - if (out_stride) - *out_stride = stride; - - done: - of_node_put(dp); - return ret; -} - -dcr_host_mmio_t dcr_map_mmio(struct device_node *dev, - unsigned int dcr_n, - unsigned int dcr_c) -{ - dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n }; - u64 addr; - - pr_debug("dcr_map(%pOF, 0x%x, 0x%x)\n", - dev, dcr_n, dcr_c); - - addr = of_translate_dcr_address(dev, dcr_n, &ret.stride); - pr_debug("translates to addr: 0x%llx, stride: 0x%x\n", - (unsigned long long) addr, ret.stride); - if (addr == OF_BAD_ADDR) - return ret; - pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride); - ret.token = ioremap(addr, dcr_c * ret.stride); - if (ret.token == NULL) - return ret; - pr_debug("mapped at 0x%p -> base is 0x%p\n", - ret.token, ret.token - dcr_n * ret.stride); - ret.token -= dcr_n * ret.stride; - return ret; -} -EXPORT_SYMBOL_GPL(dcr_map_mmio); - -void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c) -{ - dcr_host_mmio_t h = host; - - if (h.token == NULL) - return; - h.token += host.base * h.stride; - iounmap(h.token); - h.token = NULL; -} -EXPORT_SYMBOL_GPL(dcr_unmap_mmio); - -#endif /* defined(CONFIG_PPC_DCR_MMIO) */ - -#ifdef CONFIG_PPC_DCR_NATIVE DEFINE_SPINLOCK(dcr_ind_lock); EXPORT_SYMBOL_GPL(dcr_ind_lock); -#endif /* defined(CONFIG_PPC_DCR_NATIVE) */ - diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c index 040827671d21..b6f9774038e1 100644 --- a/arch/powerpc/sysdev/ehv_pic.c +++ b/arch/powerpc/sysdev/ehv_pic.c @@ -175,7 +175,7 @@ unsigned int ehv_pic_get_irq(void) * this will also setup revmap[] in the slow path for the first * time, next calls will always use fast path by indexing revmap */ - return irq_linear_revmap(global_ehv_pic->irqhost, irq); + return irq_find_mapping(global_ehv_pic->irqhost, irq); } static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node, @@ -269,8 +269,9 @@ void __init ehv_pic_init(void) return; } - ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS, - &ehv_pic_host_ops, ehv_pic); + ehv_pic->irqhost = irq_domain_create_linear(of_fwnode_handle(np), + NR_EHV_PIC_INTS, + &ehv_pic_host_ops, ehv_pic); if (!ehv_pic->irqhost) { of_node_put(np); kfree(ehv_pic); @@ -291,5 +292,5 @@ void __init ehv_pic_init(void) ehv_pic->coreint_flag = of_property_read_bool(np, "has-external-proxy"); global_ehv_pic = ehv_pic; - irq_set_default_host(global_ehv_pic->irqhost); + irq_set_default_domain(global_ehv_pic->irqhost); } diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c index 39186ad6b3c3..3dabc9621810 100644 --- a/arch/powerpc/sysdev/fsl_gtm.c +++ b/arch/powerpc/sysdev/fsl_gtm.c @@ -77,7 +77,7 @@ struct gtm { static LIST_HEAD(gtms); /** - * gtm_get_timer - request GTM timer to use it with the rest of GTM API + * gtm_get_timer16 - request GTM timer to use it with the rest of GTM API * Context: non-IRQ * * This function reserves GTM timer for later use. It returns gtm_timer @@ -110,7 +110,7 @@ struct gtm_timer *gtm_get_timer16(void) EXPORT_SYMBOL(gtm_get_timer16); /** - * gtm_get_specific_timer - request specific GTM timer + * gtm_get_specific_timer16 - request specific GTM timer * @gtm: specific GTM, pass here GTM's device_node->data * @timer: specific timer number, Timer1 is 0. * Context: non-IRQ @@ -260,7 +260,7 @@ int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec, bool reload) EXPORT_SYMBOL(gtm_set_timer16); /** - * gtm_set_exact_utimer16 - (re)set 16 bits timer + * gtm_set_exact_timer16 - (re)set 16 bits timer * @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer * @usec: timer interval in microseconds * @reload: if set, the timer will reset upon expiry rather than diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 8e6c84df4ca1..4fe8a7b1b288 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -75,7 +75,7 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p) srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK; cascade_virq = msi_data->cascade_array[srs]->virq; - seq_printf(p, " fsl-msi-%d", cascade_virq); + seq_printf(p, "fsl-msi-%d", cascade_virq); } @@ -412,7 +412,7 @@ static int fsl_of_msi_probe(struct platform_device *dev) } platform_set_drvdata(dev, msi); - msi->irqhost = irq_domain_add_linear(dev->dev.of_node, + msi->irqhost = irq_domain_create_linear(of_fwnode_handle(dev->dev.of_node), NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi); if (msi->irqhost == NULL) { @@ -564,10 +564,12 @@ static const struct fsl_msi_feature ipic_msi_feature = { .msiir_offset = 0x38, }; +#ifdef CONFIG_EPAPR_PARAVIRT static const struct fsl_msi_feature vmpic_msi_feature = { .fsl_pic_ip = FSL_PIC_IP_VMPIC, .msiir_offset = 0, }; +#endif static const struct of_device_id fsl_of_msi_ids[] = { { @@ -601,7 +603,7 @@ static struct platform_driver fsl_of_msi_driver = { .of_match_table = fsl_of_msi_ids, }, .probe = fsl_of_msi_probe, - .remove_new = fsl_of_msi_remove, + .remove = fsl_of_msi_remove, }; static __init int fsl_of_msi_init(void) diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c index a6c424680c37..0bc3f0b36528 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.c +++ b/arch/powerpc/sysdev/ge/ge_pic.c @@ -214,8 +214,9 @@ void __init gef_pic_init(struct device_node *np) } /* Setup an irq_domain structure */ - gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS, - &gef_pic_host_ops, NULL); + gef_pic_irq_host = irq_domain_create_linear(of_fwnode_handle(np), + GEF_PIC_NUM_IRQS, + &gef_pic_host_ops, NULL); if (gef_pic_irq_host == NULL) return; @@ -244,7 +245,7 @@ unsigned int gef_pic_get_irq(void) if (active & (0x1 << hwirq)) break; } - virq = irq_linear_revmap(gef_pic_irq_host, + virq = irq_find_mapping(gef_pic_irq_host, (irq_hw_number_t)hwirq); } diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index 06e391485da7..99bb2b916949 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c @@ -260,8 +260,8 @@ void i8259_init(struct device_node *node, unsigned long intack_addr) raw_spin_unlock_irqrestore(&i8259_lock, flags); /* create a legacy host */ - i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0, - &i8259_host_ops, NULL); + i8259_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0, + &i8259_host_ops, NULL); if (i8259_host == NULL) { printk(KERN_ERR "i8259: failed to allocate irq host !\n"); return; diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 5f69e2d50f26..70be2105865d 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -711,8 +711,9 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) if (ipic == NULL) return NULL; - ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS, - &ipic_host_ops, ipic); + ipic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), + NR_IPIC_INTS, + &ipic_host_ops, ipic); if (ipic->irqhost == NULL) { kfree(ipic); return NULL; @@ -757,13 +758,12 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) ipic_write(ipic->regs, IPIC_SEMSR, temp); primary_ipic = ipic; - irq_set_default_host(primary_ipic->irqhost); + irq_set_default_domain(primary_ipic->irqhost); ipic_write(ipic->regs, IPIC_SIMSR_H, 0); ipic_write(ipic->regs, IPIC_SIMSR_L, 0); - printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS, - primary_ipic->regs); + pr_info("IPIC (%d IRQ sources) at MMIO %pa\n", NR_IPIC_INTS, &res.start); return ipic; } @@ -801,7 +801,7 @@ unsigned int ipic_get_irq(void) if (irq == 0) /* 0 --> no irq is pending */ return 0; - return irq_linear_revmap(primary_ipic->irqhost, irq); + return irq_find_mapping(primary_ipic->irqhost, irq); } #ifdef CONFIG_SUSPEND diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index d94cf36b0f65..ad7310bba00b 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -27,6 +27,7 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/syscore_ops.h> #include <linux/ratelimit.h> #include <linux/pgtable.h> @@ -474,9 +475,9 @@ static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); } - printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", - PCI_SLOT(devfn), PCI_FUNC(devfn), - flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); + pr_debug("mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", + PCI_SLOT(devfn), PCI_FUNC(devfn), + str_enabled_disabled(flags & HT_MSI_FLAGS_ENABLE), addr); if (!(flags & HT_MSI_FLAGS_ENABLE)) writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS); @@ -1483,9 +1484,9 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_mask = (1 << mpic->isu_shift) - 1; - mpic->irqhost = irq_domain_add_linear(mpic->node, - intvec_top, - &mpic_host_ops, mpic); + mpic->irqhost = irq_domain_create_linear(of_fwnode_handle(mpic->node), + intvec_top, + &mpic_host_ops, mpic); /* * FIXME: The code leaks the MPIC object and mappings here; this @@ -1520,7 +1521,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, if (!(mpic->flags & MPIC_SECONDARY)) { mpic_primary = mpic; - irq_set_default_host(mpic->irqhost); + irq_set_default_domain(mpic->irqhost); } return mpic; @@ -1785,7 +1786,7 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) return 0; } - return irq_linear_revmap(mpic->irqhost, src); + return irq_find_mapping(mpic->irqhost, src); } unsigned int mpic_get_one_irq(struct mpic *mpic) @@ -1823,7 +1824,7 @@ unsigned int mpic_get_coreint_irq(void) return 0; } - return irq_linear_revmap(mpic->irqhost, src); + return irq_find_mapping(mpic->irqhost, src); #else return 0; #endif diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 0b6e37f3ffb8..456a4f64ae0a 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -124,10 +124,7 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, if (bmp->bitmap_from_slab) bmp->bitmap = kzalloc(size, GFP_KERNEL); else { - bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES); - if (!bmp->bitmap) - panic("%s: Failed to allocate %u bytes\n", __func__, - size); + bmp->bitmap = memblock_alloc_or_panic(size, SMP_CACHE_BYTES); /* the bitmap won't be freed from memblock allocator */ kmemleak_not_leak(bmp->bitmap); } diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c deleted file mode 100644 index 737f97fd67d7..000000000000 --- a/arch/powerpc/sysdev/pmi.c +++ /dev/null @@ -1,267 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * pmi driver - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * PMI (Platform Management Interrupt) is a way to communicate - * with the BMC (Baseboard Management Controller) via interrupts. - * Unlike IPMI it is bidirectional and has a low latency. - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/interrupt.h> -#include <linux/slab.h> -#include <linux/completion.h> -#include <linux/spinlock.h> -#include <linux/module.h> -#include <linux/mod_devicetable.h> -#include <linux/workqueue.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/platform_device.h> - -#include <asm/io.h> -#include <asm/pmi.h> - -struct pmi_data { - struct list_head handler; - spinlock_t handler_spinlock; - spinlock_t pmi_spinlock; - struct mutex msg_mutex; - pmi_message_t msg; - struct completion *completion; - struct platform_device *dev; - int irq; - u8 __iomem *pmi_reg; - struct work_struct work; -}; - -static struct pmi_data *data; - -static irqreturn_t pmi_irq_handler(int irq, void *dev_id) -{ - u8 type; - int rc; - - spin_lock(&data->pmi_spinlock); - - type = ioread8(data->pmi_reg + PMI_READ_TYPE); - pr_debug("pmi: got message of type %d\n", type); - - if (type & PMI_ACK && !data->completion) { - printk(KERN_WARNING "pmi: got unexpected ACK message.\n"); - rc = -EIO; - goto unlock; - } - - if (data->completion && !(type & PMI_ACK)) { - printk(KERN_WARNING "pmi: expected ACK, but got %d\n", type); - rc = -EIO; - goto unlock; - } - - data->msg.type = type; - data->msg.data0 = ioread8(data->pmi_reg + PMI_READ_DATA0); - data->msg.data1 = ioread8(data->pmi_reg + PMI_READ_DATA1); - data->msg.data2 = ioread8(data->pmi_reg + PMI_READ_DATA2); - rc = 0; -unlock: - spin_unlock(&data->pmi_spinlock); - - if (rc == -EIO) { - rc = IRQ_HANDLED; - goto out; - } - - if (data->msg.type & PMI_ACK) { - complete(data->completion); - rc = IRQ_HANDLED; - goto out; - } - - schedule_work(&data->work); - - rc = IRQ_HANDLED; -out: - return rc; -} - - -static const struct of_device_id pmi_match[] = { - { .type = "ibm,pmi", .name = "ibm,pmi" }, - { .type = "ibm,pmi" }, - {}, -}; - -MODULE_DEVICE_TABLE(of, pmi_match); - -static void pmi_notify_handlers(struct work_struct *work) -{ - struct pmi_handler *handler; - - spin_lock(&data->handler_spinlock); - list_for_each_entry(handler, &data->handler, node) { - pr_debug("pmi: notifying handler %p\n", handler); - if (handler->type == data->msg.type) - handler->handle_pmi_message(data->msg); - } - spin_unlock(&data->handler_spinlock); -} - -static int pmi_of_probe(struct platform_device *dev) -{ - struct device_node *np = dev->dev.of_node; - int rc; - - if (data) { - printk(KERN_ERR "pmi: driver has already been initialized.\n"); - rc = -EBUSY; - goto out; - } - - data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL); - if (!data) { - printk(KERN_ERR "pmi: could not allocate memory.\n"); - rc = -ENOMEM; - goto out; - } - - data->pmi_reg = of_iomap(np, 0); - if (!data->pmi_reg) { - printk(KERN_ERR "pmi: invalid register address.\n"); - rc = -EFAULT; - goto error_cleanup_data; - } - - INIT_LIST_HEAD(&data->handler); - - mutex_init(&data->msg_mutex); - spin_lock_init(&data->pmi_spinlock); - spin_lock_init(&data->handler_spinlock); - - INIT_WORK(&data->work, pmi_notify_handlers); - - data->dev = dev; - - data->irq = irq_of_parse_and_map(np, 0); - if (!data->irq) { - printk(KERN_ERR "pmi: invalid interrupt.\n"); - rc = -EFAULT; - goto error_cleanup_iomap; - } - - rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", NULL); - if (rc) { - printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n", - data->irq, rc); - goto error_cleanup_iomap; - } - - printk(KERN_INFO "pmi: found pmi device at addr %p.\n", data->pmi_reg); - - goto out; - -error_cleanup_iomap: - iounmap(data->pmi_reg); - -error_cleanup_data: - kfree(data); - -out: - return rc; -} - -static void pmi_of_remove(struct platform_device *dev) -{ - struct pmi_handler *handler, *tmp; - - free_irq(data->irq, NULL); - iounmap(data->pmi_reg); - - spin_lock(&data->handler_spinlock); - - list_for_each_entry_safe(handler, tmp, &data->handler, node) - list_del(&handler->node); - - spin_unlock(&data->handler_spinlock); - - kfree(data); - data = NULL; -} - -static struct platform_driver pmi_of_platform_driver = { - .probe = pmi_of_probe, - .remove_new = pmi_of_remove, - .driver = { - .name = "pmi", - .of_match_table = pmi_match, - }, -}; -module_platform_driver(pmi_of_platform_driver); - -int pmi_send_message(pmi_message_t msg) -{ - unsigned long flags; - DECLARE_COMPLETION_ONSTACK(completion); - - if (!data) - return -ENODEV; - - mutex_lock(&data->msg_mutex); - - data->msg = msg; - pr_debug("pmi_send_message: msg is %08x\n", *(u32*)&msg); - - data->completion = &completion; - - spin_lock_irqsave(&data->pmi_spinlock, flags); - iowrite8(msg.data0, data->pmi_reg + PMI_WRITE_DATA0); - iowrite8(msg.data1, data->pmi_reg + PMI_WRITE_DATA1); - iowrite8(msg.data2, data->pmi_reg + PMI_WRITE_DATA2); - iowrite8(msg.type, data->pmi_reg + PMI_WRITE_TYPE); - spin_unlock_irqrestore(&data->pmi_spinlock, flags); - - pr_debug("pmi_send_message: wait for completion\n"); - - wait_for_completion_interruptible_timeout(data->completion, - PMI_TIMEOUT); - - data->completion = NULL; - - mutex_unlock(&data->msg_mutex); - - return 0; -} -EXPORT_SYMBOL_GPL(pmi_send_message); - -int pmi_register_handler(struct pmi_handler *handler) -{ - if (!data) - return -ENODEV; - - spin_lock(&data->handler_spinlock); - list_add_tail(&handler->node, &data->handler); - spin_unlock(&data->handler_spinlock); - - return 0; -} -EXPORT_SYMBOL_GPL(pmi_register_handler); - -void pmi_unregister_handler(struct pmi_handler *handler) -{ - if (!data) - return; - - pr_debug("pmi: unregistering handler %p\n", handler); - - spin_lock(&data->handler_spinlock); - list_del(&handler->node); - spin_unlock(&data->handler_spinlock); -} -EXPORT_SYMBOL_GPL(pmi_unregister_handler); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); -MODULE_DESCRIPTION("IBM Platform Management Interrupt driver"); diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c index 47cc87bd6a33..9a232ae5e360 100644 --- a/arch/powerpc/sysdev/rtc_cmos_setup.c +++ b/arch/powerpc/sysdev/rtc_cmos_setup.c @@ -66,4 +66,5 @@ static int __init add_rtc(void) } fs_initcall(add_rtc); +MODULE_DESCRIPTION("PPC RTC CMOS driver"); MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 0e42f7bad7db..07d0f6a83879 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -404,8 +404,8 @@ void __init tsi108_pci_int_init(struct device_node *node) { DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); - pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0, - &pci_irq_domain_ops, NULL); + pci_irq_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0, + &pci_irq_domain_ops, NULL); if (pci_irq_host == NULL) { printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n"); return; diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index 700b67476a7d..4e89158a577c 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -145,27 +145,6 @@ static void icp_native_cause_ipi(int cpu) icp_native_set_qirr(cpu, IPI_PRIORITY); } -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -void icp_native_cause_ipi_rm(int cpu) -{ - /* - * Currently not used to send IPIs to another CPU - * on the same core. Only caller is KVM real mode. - * Need the physical address of the XICS to be - * previously saved in kvm_hstate in the paca. - */ - void __iomem *xics_phys; - - /* - * Just like the cause_ipi functions, it is required to - * include a full barrier before causing the IPI. - */ - xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; - mb(); - __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); -} -#endif - /* * Called when an interrupt is received on an off-line CPU to * clear the interrupt, so that the CPU can go back to nap mode. diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index d3a4156e8788..c3fa539a9898 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -472,7 +472,7 @@ static int __init xics_allocate_domain(void) return -ENOMEM; } - irq_set_default_host(xics_host); + irq_set_default_domain(xics_host); return 0; } diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index a289cb97c1d7..f10592405024 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -383,7 +383,7 @@ static unsigned int xive_get_irq(void) * CPU. * * If we find that there is indeed more in there, we call - * force_external_irq_replay() to make Linux synthetize an + * force_external_irq_replay() to make Linux synthesize an * external interrupt on the next call to local_irq_restore(). */ static void xive_do_queue_eoi(struct xive_cpu *xc) @@ -726,7 +726,7 @@ static int xive_irq_set_affinity(struct irq_data *d, pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq); /* Is this valid ? */ - if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + if (!cpumask_intersects(cpumask, cpu_online_mask)) return -EINVAL; /* @@ -874,7 +874,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) * * This also tells us that it's in flight to a host queue * or has already been fetched but hasn't been EOIed yet - * by the host. This it's potentially using up a host + * by the host. Thus it's potentially using up a host * queue slot. This is important to know because as long * as this is the case, we must not hard-unmask it when * "returning" that interrupt to the host. @@ -1464,10 +1464,10 @@ static const struct irq_domain_ops xive_irq_domain_ops = { static void __init xive_init_host(struct device_node *np) { - xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL); + xive_irq_domain = irq_domain_create_tree(of_fwnode_handle(np), &xive_irq_domain_ops, NULL); if (WARN_ON(xive_irq_domain == NULL)) return; - irq_set_default_host(xive_irq_domain); + irq_set_default_domain(xive_irq_domain); } static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index f1c0fa6ece21..a0934b516933 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -415,7 +415,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) return; } - /* Grab it's CAM value */ + /* Grab its CAM value */ rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL); if (rc) { pr_err("Failed to get pool VP info CPU %d\n", cpu); @@ -559,9 +559,7 @@ bool __init xive_native_init(void) struct device_node *np; struct resource r; void __iomem *tima; - struct property *prop; u8 max_prio = 7; - const __be32 *p; u32 val, cpu; s64 rc; @@ -592,7 +590,7 @@ bool __init xive_native_init(void) max_prio = val - 1; /* Iterate the EQ sizes and pick one */ - of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) { + of_property_for_each_u32(np, "ibm,xive-eq-sizes", val) { xive_queue_shift = val; if (val == PAGE_SHIFT) break; diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index e45419264391..5aedbe3e8e6a 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -7,6 +7,7 @@ #include <linux/types.h> #include <linux/irq.h> +#include <linux/seq_file.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> @@ -814,7 +815,6 @@ bool __init xive_spapr_init(void) struct device_node *np; struct resource r; void __iomem *tima; - struct property *prop; u8 max_prio; u32 val; u32 len; @@ -866,7 +866,7 @@ bool __init xive_spapr_init(void) } /* Iterate the EQ sizes and pick one */ - of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) { + of_property_for_each_u32(np, "ibm,xive-eq-sizes", val) { xive_queue_shift = val; if (val == PAGE_SHIFT) break; diff --git a/arch/powerpc/platforms/maple/Makefile b/arch/powerpc/tools/.gitignore index 19f35ab828a7..ec380a14a09a 100644 --- a/arch/powerpc/platforms/maple/Makefile +++ b/arch/powerpc/tools/.gitignore @@ -1,2 +1,2 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y += setup.o pci.o time.o +/vmlinux.arch.S diff --git a/arch/powerpc/tools/Makefile b/arch/powerpc/tools/Makefile new file mode 100644 index 000000000000..e1f7afcd9fdf --- /dev/null +++ b/arch/powerpc/tools/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +quiet_cmd_gen_ftrace_ool_stubs = GEN $@ + cmd_gen_ftrace_ool_stubs = $< "$(CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE)" "$(CONFIG_64BIT)" \ + "$(OBJDUMP)" vmlinux.o $@ + +$(obj)/vmlinux.arch.S: $(src)/ftrace-gen-ool-stubs.sh vmlinux.o FORCE + $(call if_changed,gen_ftrace_ool_stubs) + +targets += vmlinux.arch.S diff --git a/arch/powerpc/tools/ftrace-gen-ool-stubs.sh b/arch/powerpc/tools/ftrace-gen-ool-stubs.sh new file mode 100755 index 000000000000..bac186bdf64a --- /dev/null +++ b/arch/powerpc/tools/ftrace-gen-ool-stubs.sh @@ -0,0 +1,52 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-or-later + +# Error out on error +set -e + +num_ool_stubs_text_builtin="$1" +is_64bit="$2" +objdump="$3" +vmlinux_o="$4" +arch_vmlinux_S="$5" + +RELOCATION=R_PPC64_ADDR64 +if [ -z "$is_64bit" ]; then + RELOCATION=R_PPC_ADDR32 +fi + +num_ool_stubs_total=$($objdump -r -j __patchable_function_entries "$vmlinux_o" | + grep -c "$RELOCATION") +num_ool_stubs_inittext=$($objdump -r -j __patchable_function_entries "$vmlinux_o" | + grep -e ".init.text" -e ".text.startup" | grep -c "$RELOCATION") +num_ool_stubs_text=$((num_ool_stubs_total - num_ool_stubs_inittext)) + +if [ "$num_ool_stubs_text" -gt "$num_ool_stubs_text_builtin" ]; then + num_ool_stubs_text_end=$((num_ool_stubs_text - num_ool_stubs_text_builtin)) +else + num_ool_stubs_text_end=0 +fi + +cat > "$arch_vmlinux_S" <<EOF +#include <asm/asm-offsets.h> +#include <asm/ppc_asm.h> +#include <linux/linkage.h> + +.pushsection .tramp.ftrace.text,"aw" +SYM_DATA(ftrace_ool_stub_text_end_count, .long $num_ool_stubs_text_end) + +SYM_START(ftrace_ool_stub_text_end, SYM_L_GLOBAL, .balign SZL) +#if $num_ool_stubs_text_end + .space $num_ool_stubs_text_end * FTRACE_OOL_STUB_SIZE +#endif +SYM_CODE_END(ftrace_ool_stub_text_end) +.popsection + +.pushsection .tramp.ftrace.init,"aw" +SYM_DATA(ftrace_ool_stub_inittext_count, .long $num_ool_stubs_inittext) + +SYM_START(ftrace_ool_stub_inittext, SYM_L_GLOBAL, .balign SZL) + .space $num_ool_stubs_inittext * FTRACE_OOL_STUB_SIZE +SYM_CODE_END(ftrace_ool_stub_inittext) +.popsection +EOF diff --git a/arch/powerpc/tools/ftrace_check.sh b/arch/powerpc/tools/ftrace_check.sh new file mode 100755 index 000000000000..405e7e306617 --- /dev/null +++ b/arch/powerpc/tools/ftrace_check.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-or-later +# +# This script checks vmlinux to ensure that all functions can call ftrace_caller() either directly, +# or through the stub, ftrace_tramp_text, at the end of kernel text. + +# Error out if any command fails +set -e + +# Allow for verbose output +if [ "$V" = "1" ]; then + set -x +fi + +if [ $# -lt 2 ]; then + echo "$0 [path to nm] [path to vmlinux]" 1>&2 + exit 1 +fi + +# Have Kbuild supply the path to nm so we handle cross compilation. +nm="$1" +vmlinux="$2" + +stext_addr=$($nm "$vmlinux" | grep -e " [TA] _stext$" | \ + cut -d' ' -f1 | tr '[:lower:]' '[:upper:]') +ftrace_caller_addr=$($nm "$vmlinux" | grep -e " T ftrace_caller$" | \ + cut -d' ' -f1 | tr '[:lower:]' '[:upper:]') +ftrace_tramp_addr=$($nm "$vmlinux" | grep -e " T ftrace_tramp_text$" | \ + cut -d' ' -f1 | tr '[:lower:]' '[:upper:]') + +ftrace_caller_offset=$(echo "ibase=16;$ftrace_caller_addr - $stext_addr" | bc) +ftrace_tramp_offset=$(echo "ibase=16;$ftrace_tramp_addr - $ftrace_caller_addr" | bc) +sz_32m=$(printf "%d" 0x2000000) +sz_64m=$(printf "%d" 0x4000000) + +# ftrace_caller - _stext < 32M +if [ "$ftrace_caller_offset" -ge "$sz_32m" ]; then + echo "ERROR: ftrace_caller (0x$ftrace_caller_addr) is beyond 32MiB of _stext" 1>&2 + echo "ERROR: consider disabling CONFIG_FUNCTION_TRACER, or reducing the size \ + of kernel text" 1>&2 + exit 1 +fi + +# ftrace_tramp_text - ftrace_caller < 64M +if [ "$ftrace_tramp_offset" -ge "$sz_64m" ]; then + echo "ERROR: kernel text extends beyond 64MiB from ftrace_caller" 1>&2 + echo "ERROR: consider disabling CONFIG_FUNCTION_TRACER, or reducing the size \ + of kernel text" 1>&2 + exit 1 +fi diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 682c7c0a6f77..d74b147126b7 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -10,15 +10,10 @@ KCSAN_SANITIZE := n # Disable ftrace for the entire directory ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE) -ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) - # Clang stores addresses on the stack causing the frame size to blow # out. See https://github.com/ClangBuiltLinux/linux/issues/252 ccflags-$(CONFIG_CC_IS_CLANG) += -Wframe-larger-than=4096 obj-y += xmon.o nonstdio.o spr_access.o xmon_bpts.o -ifdef CONFIG_XMON_DISASSEMBLY -obj-y += ppc-dis.o ppc-opc.o -obj-$(CONFIG_SPU_BASE) += spu-dis.o spu-opc.o -endif +obj-$(CONFIG_XMON_DISASSEMBLY) += ppc-dis.o ppc-opc.o diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c index 75fa98221d48..af105e1bc3fc 100644 --- a/arch/powerpc/xmon/ppc-dis.c +++ b/arch/powerpc/xmon/ppc-dis.c @@ -122,32 +122,21 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr) bool insn_is_short; ppc_cpu_t dialect; - dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON - | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC; + dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON; - if (cpu_has_feature(CPU_FTRS_POWER5)) - dialect |= PPC_OPCODE_POWER5; + if (IS_ENABLED(CONFIG_PPC64)) + dialect |= PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_CELL | + PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 | + PPC_OPCODE_POWER9; - if (cpu_has_feature(CPU_FTRS_CELL)) - dialect |= (PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC); + if (cpu_has_feature(CPU_FTR_TM)) + dialect |= PPC_OPCODE_HTM; - if (cpu_has_feature(CPU_FTRS_POWER6)) - dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC); + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + dialect |= PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2; - if (cpu_has_feature(CPU_FTRS_POWER7)) - dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 - | PPC_OPCODE_ALTIVEC | PPC_OPCODE_VSX); - - if (cpu_has_feature(CPU_FTRS_POWER8)) - dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 - | PPC_OPCODE_POWER8 | PPC_OPCODE_HTM - | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 | PPC_OPCODE_VSX); - - if (cpu_has_feature(CPU_FTRS_POWER9)) - dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 - | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM - | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 - | PPC_OPCODE_VSX | PPC_OPCODE_VSX3); + if (cpu_has_feature(CPU_FTR_VSX)) + dialect |= PPC_OPCODE_VSX | PPC_OPCODE_VSX3; /* Get the major opcode of the insn. */ opcode = NULL; diff --git a/arch/powerpc/xmon/spu-dis.c b/arch/powerpc/xmon/spu-dis.c deleted file mode 100644 index 4b0a4e640f08..000000000000 --- a/arch/powerpc/xmon/spu-dis.c +++ /dev/null @@ -1,237 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* Disassemble SPU instructions - - Copyright 2006 Free Software Foundation, Inc. - - This file is part of GDB, GAS, and the GNU binutils. - - */ - -#include <linux/string.h> -#include "nonstdio.h" -#include "ansidecl.h" -#include "spu.h" -#include "dis-asm.h" - -/* This file provides a disassembler function which uses - the disassembler interface defined in dis-asm.h. */ - -extern const struct spu_opcode spu_opcodes[]; -extern const int spu_num_opcodes; - -#define SPU_DISASM_TBL_SIZE (1 << 11) -static const struct spu_opcode *spu_disassemble_table[SPU_DISASM_TBL_SIZE]; - -static void -init_spu_disassemble (void) -{ - int i; - - /* If two instructions have the same opcode then we prefer the first - * one. In most cases it is just an alternate mnemonic. */ - for (i = 0; i < spu_num_opcodes; i++) - { - int o = spu_opcodes[i].opcode; - if (o >= SPU_DISASM_TBL_SIZE) - continue; /* abort (); */ - if (spu_disassemble_table[o] == 0) - spu_disassemble_table[o] = &spu_opcodes[i]; - } -} - -/* Determine the instruction from the 10 least significant bits. */ -static const struct spu_opcode * -get_index_for_opcode (unsigned int insn) -{ - const struct spu_opcode *index; - unsigned int opcode = insn >> (32-11); - - /* Init the table. This assumes that element 0/opcode 0 (currently - * NOP) is always used */ - if (spu_disassemble_table[0] == 0) - init_spu_disassemble (); - - if ((index = spu_disassemble_table[opcode & 0x780]) != 0 - && index->insn_type == RRR) - return index; - - if ((index = spu_disassemble_table[opcode & 0x7f0]) != 0 - && (index->insn_type == RI18 || index->insn_type == LBT)) - return index; - - if ((index = spu_disassemble_table[opcode & 0x7f8]) != 0 - && index->insn_type == RI10) - return index; - - if ((index = spu_disassemble_table[opcode & 0x7fc]) != 0 - && (index->insn_type == RI16)) - return index; - - if ((index = spu_disassemble_table[opcode & 0x7fe]) != 0 - && (index->insn_type == RI8)) - return index; - - if ((index = spu_disassemble_table[opcode & 0x7ff]) != 0) - return index; - - return NULL; -} - -/* Print a Spu instruction. */ - -int -print_insn_spu (unsigned long insn, unsigned long memaddr) -{ - int value; - int hex_value; - const struct spu_opcode *index; - enum spu_insns tag; - - index = get_index_for_opcode (insn); - - if (index == 0) - { - printf(".long 0x%lx", insn); - } - else - { - int i; - int paren = 0; - tag = (enum spu_insns)(index - spu_opcodes); - printf("%s", index->mnemonic); - if (tag == M_BI || tag == M_BISL || tag == M_IRET || tag == M_BISLED - || tag == M_BIHNZ || tag == M_BIHZ || tag == M_BINZ || tag == M_BIZ - || tag == M_SYNC || tag == M_HBR) - { - int fb = (insn >> (32-18)) & 0x7f; - if (fb & 0x40) - printf(tag == M_SYNC ? "c" : "p"); - if (fb & 0x20) - printf("d"); - if (fb & 0x10) - printf("e"); - } - if (index->arg[0] != 0) - printf("\t"); - hex_value = 0; - for (i = 1; i <= index->arg[0]; i++) - { - int arg = index->arg[i]; - if (arg != A_P && !paren && i > 1) - printf(","); - - switch (arg) - { - case A_T: - printf("$%lu", - DECODE_INSN_RT (insn)); - break; - case A_A: - printf("$%lu", - DECODE_INSN_RA (insn)); - break; - case A_B: - printf("$%lu", - DECODE_INSN_RB (insn)); - break; - case A_C: - printf("$%lu", - DECODE_INSN_RC (insn)); - break; - case A_S: - printf("$sp%lu", - DECODE_INSN_RA (insn)); - break; - case A_H: - printf("$ch%lu", - DECODE_INSN_RA (insn)); - break; - case A_P: - paren++; - printf("("); - break; - case A_U7A: - printf("%lu", - 173 - DECODE_INSN_U8 (insn)); - break; - case A_U7B: - printf("%lu", - 155 - DECODE_INSN_U8 (insn)); - break; - case A_S3: - case A_S6: - case A_S7: - case A_S7N: - case A_U3: - case A_U5: - case A_U6: - case A_U7: - hex_value = DECODE_INSN_I7 (insn); - printf("%d", hex_value); - break; - case A_S11: - print_address(memaddr + DECODE_INSN_I9a (insn) * 4); - break; - case A_S11I: - print_address(memaddr + DECODE_INSN_I9b (insn) * 4); - break; - case A_S10: - case A_S10B: - hex_value = DECODE_INSN_I10 (insn); - printf("%d", hex_value); - break; - case A_S14: - hex_value = DECODE_INSN_I10 (insn) * 16; - printf("%d", hex_value); - break; - case A_S16: - hex_value = DECODE_INSN_I16 (insn); - printf("%d", hex_value); - break; - case A_X16: - hex_value = DECODE_INSN_U16 (insn); - printf("%u", hex_value); - break; - case A_R18: - value = DECODE_INSN_I16 (insn) * 4; - if (value == 0) - printf("%d", value); - else - { - hex_value = memaddr + value; - print_address(hex_value & 0x3ffff); - } - break; - case A_S18: - value = DECODE_INSN_U16 (insn) * 4; - if (value == 0) - printf("%d", value); - else - print_address(value); - break; - case A_U18: - value = DECODE_INSN_U18 (insn); - if (value == 0 || 1) - { - hex_value = value; - printf("%u", value); - } - else - print_address(value); - break; - case A_U14: - hex_value = DECODE_INSN_U14 (insn); - printf("%u", hex_value); - break; - } - if (arg != A_P && paren) - { - printf(")"); - paren--; - } - } - if (hex_value > 16) - printf("\t# %x", hex_value); - } - return 4; -} diff --git a/arch/powerpc/xmon/spu-insns.h b/arch/powerpc/xmon/spu-insns.h deleted file mode 100644 index 7e1126a19909..000000000000 --- a/arch/powerpc/xmon/spu-insns.h +++ /dev/null @@ -1,399 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* SPU ELF support for BFD. - - Copyright 2006 Free Software Foundation, Inc. - - This file is part of BFD, the Binary File Descriptor library. - - */ - -/* SPU Opcode Table - --=-=-= FORMAT =-=-=- - - +----+-------+-------+-------+-------+ +------------+-------+-------+-------+ -RRR | op | RC | RB | RA | RT | RI7 | op | I7 | RA | RT | - +----+-------+-------+-------+-------+ +------------+-------+-------+-------+ - 0 3 1 1 2 3 0 1 1 2 3 - 0 7 4 1 0 7 4 1 - - +-----------+--------+-------+-------+ +---------+----------+-------+-------+ -RI8 | op | I8 | RA | RT | RI10 | op | I10 | RA | RT | - +-----------+--------+-------+-------+ +---------+----------+-------+-------+ - 0 9 1 2 3 0 7 1 2 3 - 7 4 1 7 4 1 - - +----------+-----------------+-------+ +--------+-------------------+-------+ -RI16 | op | I16 | RT | RI18 | op | I18 | RT | - +----------+-----------------+-------+ +--------+-------------------+-------+ - 0 8 2 3 0 6 2 3 - 4 1 4 1 - - +------------+-------+-------+-------+ +-------+--+-----------------+-------+ -RR | op | RB | RA | RT | LBT | op |RO| I16 | RO | - +------------+-------+-------+-------+ +-------+--+-----------------+-------+ - 0 1 1 2 3 0 6 8 2 3 - 0 7 4 1 4 1 - - +------------+----+--+-------+-------+ - LBTI | op | // |RO| RA | RO | - +------------+----+--+-------+-------+ - 0 1 1 1 2 3 - 0 5 7 4 1 - --=-=-= OPCODE =-=-=- - -OPCODE field specifies the most significant 11bit of the instruction. Some formats don't have 11bits for opcode field, and in this -case, bit field other than op are defined as 0s. For example, opcode of fma instruction which is RRR format is defined as 0x700, -since 0x700 -> 11'b11100000000, this means opcode is 4'b1110, and other 7bits are defined as 7'b0000000. - --=-=-= ASM_FORMAT =-=-=- - -RRR category RI7 category - ASM_RRR mnemonic RC, RA, RB, RT ASM_RI4 mnemonic RT, RA, I4 - ASM_RI7 mnemonic RT, RA, I7 - -RI8 category RI10 category - ASM_RUI8 mnemonic RT, RA, UI8 ASM_AI10 mnemonic RA, I10 - ASM_RI10 mnemonic RT, RA, R10 - ASM_RI10IDX mnemonic RT, I10(RA) - -RI16 category RI18 category - ASM_I16W mnemonic I16W ASM_RI18 mnemonic RT, I18 - ASM_RI16 mnemonic RT, I16 - ASM_RI16W mnemonic RT, I16W - -RR category LBT category - ASM_MFSPR mnemonic RT, SA ASM_LBT mnemonic brinst, brtarg - ASM_MTSPR mnemonic SA, RT - ASM_NOOP mnemonic LBTI category - ASM_RA mnemonic RA ASM_LBTI mnemonic brinst, RA - ASM_RAB mnemonic RA, RB - ASM_RDCH mnemonic RT, CA - ASM_RR mnemonic RT, RA, RB - ASM_RT mnemonic RT - ASM_RTA mnemonic RT, RA - ASM_WRCH mnemonic CA, RT - -Note that RRR instructions have the names for RC and RT reversed from -what's in the ISA, in order to put RT in the same position it appears -for other formats. - --=-=-= DEPENDENCY =-=-=- - -DEPENDENCY filed consists of 5 digits. This represents which register is used as source and which register is used as target. -The first(most significant) digit is always 0. Then it is followd by RC, RB, RA and RT digits. -If the digit is 0, this means the corresponding register is not used in the instruction. -If the digit is 1, this means the corresponding register is used as a source in the instruction. -If the digit is 2, this means the corresponding register is used as a target in the instruction. -If the digit is 3, this means the corresponding register is used as both source and target in the instruction. -For example, fms instruction has 00113 as the DEPENDENCY field. This means RC is not used in this operation, RB and RA are -used as sources and RT is the target. - --=-=-= PIPE =-=-=- - -This field shows which execution pipe is used for the instruction - -pipe0 execution pipelines: - FP6 SP floating pipeline - FP7 integer operations executed in SP floating pipeline - FPD DP floating pipeline - FX2 FXU pipeline - FX3 Rotate/Shift pipeline - FXB Byte pipeline - NOP No pipeline - -pipe1 execution pipelines: - BR Branch pipeline - LNOP No pipeline - LS Load/Store pipeline - SHUF Shuffle pipeline - SPR SPR/CH pipeline - -*/ - -#define _A0() {0} -#define _A1(a) {1,a} -#define _A2(a,b) {2,a,b} -#define _A3(a,b,c) {3,a,b,c} -#define _A4(a,b,c,d) {4,a,b,c,d} - -/* TAG FORMAT OPCODE MNEMONIC ASM_FORMAT DEPENDENCY PIPE COMMENT */ -/* 0[RC][RB][RA][RT] */ -/* 1:src, 2:target */ - -APUOP(M_BR, RI16, 0x190, "br", _A1(A_R18), 00000, BR) /* BRel IP<-IP+I16 */ -APUOP(M_BRSL, RI16, 0x198, "brsl", _A2(A_T,A_R18), 00002, BR) /* BRelSetLink RT,IP<-IP,IP+I16 */ -APUOP(M_BRA, RI16, 0x180, "bra", _A1(A_S18), 00000, BR) /* BRAbs IP<-I16 */ -APUOP(M_BRASL, RI16, 0x188, "brasl", _A2(A_T,A_S18), 00002, BR) /* BRAbsSetLink RT,IP<-IP,I16 */ -APUOP(M_FSMBI, RI16, 0x194, "fsmbi", _A2(A_T,A_X16), 00002, SHUF) /* FormSelMask%I RT<-fsm(I16) */ -APUOP(M_LQA, RI16, 0x184, "lqa", _A2(A_T,A_S18), 00002, LS) /* LoadQAbs RT<-M[I16] */ -APUOP(M_LQR, RI16, 0x19C, "lqr", _A2(A_T,A_R18), 00002, LS) /* LoadQRel RT<-M[IP+I16] */ -APUOP(M_STOP, RR, 0x000, "stop", _A0(), 00000, BR) /* STOP stop */ -APUOP(M_STOP2, RR, 0x000, "stop", _A1(A_U14), 00000, BR) /* STOP stop */ -APUOP(M_STOPD, RR, 0x140, "stopd", _A3(A_T,A_A,A_B), 00111, BR) /* STOPD stop (with register dependencies) */ -APUOP(M_LNOP, RR, 0x001, "lnop", _A0(), 00000, LNOP) /* LNOP no_operation */ -APUOP(M_SYNC, RR, 0x002, "sync", _A0(), 00000, BR) /* SYNC flush_pipe */ -APUOP(M_DSYNC, RR, 0x003, "dsync", _A0(), 00000, BR) /* DSYNC flush_store_queue */ -APUOP(M_MFSPR, RR, 0x00c, "mfspr", _A2(A_T,A_S), 00002, SPR) /* MFSPR RT<-SA */ -APUOP(M_RDCH, RR, 0x00d, "rdch", _A2(A_T,A_H), 00002, SPR) /* ReaDCHannel RT<-CA:data */ -APUOP(M_RCHCNT, RR, 0x00f, "rchcnt", _A2(A_T,A_H), 00002, SPR) /* ReaDCHanCouNT RT<-CA:count */ -APUOP(M_HBRA, LBT, 0x080, "hbra", _A2(A_S11,A_S18), 00000, LS) /* HBRA BTB[B9]<-M[I16] */ -APUOP(M_HBRR, LBT, 0x090, "hbrr", _A2(A_S11,A_R18), 00000, LS) /* HBRR BTB[B9]<-M[IP+I16] */ -APUOP(M_BRZ, RI16, 0x100, "brz", _A2(A_T,A_R18), 00001, BR) /* BRZ IP<-IP+I16_if(RT) */ -APUOP(M_BRNZ, RI16, 0x108, "brnz", _A2(A_T,A_R18), 00001, BR) /* BRNZ IP<-IP+I16_if(RT) */ -APUOP(M_BRHZ, RI16, 0x110, "brhz", _A2(A_T,A_R18), 00001, BR) /* BRHZ IP<-IP+I16_if(RT) */ -APUOP(M_BRHNZ, RI16, 0x118, "brhnz", _A2(A_T,A_R18), 00001, BR) /* BRHNZ IP<-IP+I16_if(RT) */ -APUOP(M_STQA, RI16, 0x104, "stqa", _A2(A_T,A_S18), 00001, LS) /* SToreQAbs M[I16]<-RT */ -APUOP(M_STQR, RI16, 0x11C, "stqr", _A2(A_T,A_R18), 00001, LS) /* SToreQRel M[IP+I16]<-RT */ -APUOP(M_MTSPR, RR, 0x10c, "mtspr", _A2(A_S,A_T), 00001, SPR) /* MTSPR SA<-RT */ -APUOP(M_WRCH, RR, 0x10d, "wrch", _A2(A_H,A_T), 00001, SPR) /* ChanWRite CA<-RT */ -APUOP(M_LQD, RI10, 0x1a0, "lqd", _A4(A_T,A_S14,A_P,A_A), 00012, LS) /* LoadQDisp RT<-M[Ra+I10] */ -APUOP(M_BI, RR, 0x1a8, "bi", _A1(A_A), 00010, BR) /* BI IP<-RA */ -APUOP(M_BISL, RR, 0x1a9, "bisl", _A2(A_T,A_A), 00012, BR) /* BISL RT,IP<-IP,RA */ -APUOP(M_IRET, RR, 0x1aa, "iret", _A1(A_A), 00010, BR) /* IRET IP<-SRR0 */ -APUOP(M_IRET2, RR, 0x1aa, "iret", _A0(), 00010, BR) /* IRET IP<-SRR0 */ -APUOP(M_BISLED, RR, 0x1ab, "bisled", _A2(A_T,A_A), 00012, BR) /* BISLED RT,IP<-IP,RA_if(ext) */ -APUOP(M_HBR, LBTI, 0x1ac, "hbr", _A2(A_S11I,A_A), 00010, LS) /* HBR BTB[B9]<-M[Ra] */ -APUOP(M_FREST, RR, 0x1b8, "frest", _A2(A_T,A_A), 00012, SHUF) /* FREST RT<-recip(RA) */ -APUOP(M_FRSQEST, RR, 0x1b9, "frsqest", _A2(A_T,A_A), 00012, SHUF) /* FRSQEST RT<-rsqrt(RA) */ -APUOP(M_FSM, RR, 0x1b4, "fsm", _A2(A_T,A_A), 00012, SHUF) /* FormSelMask% RT<-expand(Ra) */ -APUOP(M_FSMH, RR, 0x1b5, "fsmh", _A2(A_T,A_A), 00012, SHUF) /* FormSelMask% RT<-expand(Ra) */ -APUOP(M_FSMB, RR, 0x1b6, "fsmb", _A2(A_T,A_A), 00012, SHUF) /* FormSelMask% RT<-expand(Ra) */ -APUOP(M_GB, RR, 0x1b0, "gb", _A2(A_T,A_A), 00012, SHUF) /* GatherBits% RT<-gather(RA) */ -APUOP(M_GBH, RR, 0x1b1, "gbh", _A2(A_T,A_A), 00012, SHUF) /* GatherBits% RT<-gather(RA) */ -APUOP(M_GBB, RR, 0x1b2, "gbb", _A2(A_T,A_A), 00012, SHUF) /* GatherBits% RT<-gather(RA) */ -APUOP(M_CBD, RI7, 0x1f4, "cbd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */ -APUOP(M_CHD, RI7, 0x1f5, "chd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */ -APUOP(M_CWD, RI7, 0x1f6, "cwd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */ -APUOP(M_CDD, RI7, 0x1f7, "cdd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */ -APUOP(M_ROTQBII, RI7, 0x1f8, "rotqbii", _A3(A_T,A_A,A_U3), 00012, SHUF) /* ROTQBII RT<-RA<<<I7 */ -APUOP(M_ROTQBYI, RI7, 0x1fc, "rotqbyi", _A3(A_T,A_A,A_S7N), 00012, SHUF) /* ROTQBYI RT<-RA<<<(I7*8) */ -APUOP(M_ROTQMBII, RI7, 0x1f9, "rotqmbii", _A3(A_T,A_A,A_S3), 00012, SHUF) /* ROTQMBII RT<-RA<<I7 */ -APUOP(M_ROTQMBYI, RI7, 0x1fd, "rotqmbyi", _A3(A_T,A_A,A_S6), 00012, SHUF) /* ROTQMBYI RT<-RA<<I7 */ -APUOP(M_SHLQBII, RI7, 0x1fb, "shlqbii", _A3(A_T,A_A,A_U3), 00012, SHUF) /* SHLQBII RT<-RA<<I7 */ -APUOP(M_SHLQBYI, RI7, 0x1ff, "shlqbyi", _A3(A_T,A_A,A_U5), 00012, SHUF) /* SHLQBYI RT<-RA<<I7 */ -APUOP(M_STQD, RI10, 0x120, "stqd", _A4(A_T,A_S14,A_P,A_A), 00011, LS) /* SToreQDisp M[Ra+I10]<-RT */ -APUOP(M_BIHNZ, RR, 0x12b, "bihnz", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */ -APUOP(M_BIHZ, RR, 0x12a, "bihz", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */ -APUOP(M_BINZ, RR, 0x129, "binz", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */ -APUOP(M_BIZ, RR, 0x128, "biz", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */ -APUOP(M_CBX, RR, 0x1d4, "cbx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */ -APUOP(M_CHX, RR, 0x1d5, "chx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */ -APUOP(M_CWX, RR, 0x1d6, "cwx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */ -APUOP(M_CDX, RR, 0x1d7, "cdx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */ -APUOP(M_LQX, RR, 0x1c4, "lqx", _A3(A_T,A_A,A_B), 00112, LS) /* LoadQindeX RT<-M[Ra+Rb] */ -APUOP(M_ROTQBI, RR, 0x1d8, "rotqbi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQBI RT<-RA<<<Rb */ -APUOP(M_ROTQMBI, RR, 0x1d9, "rotqmbi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQMBI RT<-RA<<Rb */ -APUOP(M_SHLQBI, RR, 0x1db, "shlqbi", _A3(A_T,A_A,A_B), 00112, SHUF) /* SHLQBI RT<-RA<<Rb */ -APUOP(M_ROTQBY, RR, 0x1dc, "rotqby", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQBY RT<-RA<<<(Rb*8) */ -APUOP(M_ROTQMBY, RR, 0x1dd, "rotqmby", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQMBY RT<-RA<<Rb */ -APUOP(M_SHLQBY, RR, 0x1df, "shlqby", _A3(A_T,A_A,A_B), 00112, SHUF) /* SHLQBY RT<-RA<<Rb */ -APUOP(M_ROTQBYBI, RR, 0x1cc, "rotqbybi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQBYBI RT<-RA<<Rb */ -APUOP(M_ROTQMBYBI, RR, 0x1cd, "rotqmbybi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQMBYBI RT<-RA<<Rb */ -APUOP(M_SHLQBYBI, RR, 0x1cf, "shlqbybi", _A3(A_T,A_A,A_B), 00112, SHUF) /* SHLQBYBI RT<-RA<<Rb */ -APUOP(M_STQX, RR, 0x144, "stqx", _A3(A_T,A_A,A_B), 00111, LS) /* SToreQindeX M[Ra+Rb]<-RT */ -APUOP(M_SHUFB, RRR, 0x580, "shufb", _A4(A_C,A_A,A_B,A_T), 02111, SHUF) /* SHUFfleBytes RC<-f(RA,RB,RT) */ -APUOP(M_IL, RI16, 0x204, "il", _A2(A_T,A_S16), 00002, FX2) /* ImmLoad RT<-sxt(I16) */ -APUOP(M_ILH, RI16, 0x20c, "ilh", _A2(A_T,A_X16), 00002, FX2) /* ImmLoadH RT<-I16 */ -APUOP(M_ILHU, RI16, 0x208, "ilhu", _A2(A_T,A_X16), 00002, FX2) /* ImmLoadHUpper RT<-I16<<16 */ -APUOP(M_ILA, RI18, 0x210, "ila", _A2(A_T,A_U18), 00002, FX2) /* ImmLoadAddr RT<-zxt(I18) */ -APUOP(M_NOP, RR, 0x201, "nop", _A1(A_T), 00000, NOP) /* XNOP no_operation */ -APUOP(M_NOP2, RR, 0x201, "nop", _A0(), 00000, NOP) /* XNOP no_operation */ -APUOP(M_IOHL, RI16, 0x304, "iohl", _A2(A_T,A_X16), 00003, FX2) /* AddImmeXt RT<-RT+sxt(I16) */ -APUOP(M_ANDBI, RI10, 0x0b0, "andbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* AND%I RT<-RA&I10 */ -APUOP(M_ANDHI, RI10, 0x0a8, "andhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* AND%I RT<-RA&I10 */ -APUOP(M_ANDI, RI10, 0x0a0, "andi", _A3(A_T,A_A,A_S10), 00012, FX2) /* AND%I RT<-RA&I10 */ -APUOP(M_ORBI, RI10, 0x030, "orbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* OR%I RT<-RA|I10 */ -APUOP(M_ORHI, RI10, 0x028, "orhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* OR%I RT<-RA|I10 */ -APUOP(M_ORI, RI10, 0x020, "ori", _A3(A_T,A_A,A_S10), 00012, FX2) /* OR%I RT<-RA|I10 */ -APUOP(M_ORX, RR, 0x1f0, "orx", _A2(A_T,A_A), 00012, BR) /* ORX RT<-RA.w0|RA.w1|RA.w2|RA.w3 */ -APUOP(M_XORBI, RI10, 0x230, "xorbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* XOR%I RT<-RA^I10 */ -APUOP(M_XORHI, RI10, 0x228, "xorhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* XOR%I RT<-RA^I10 */ -APUOP(M_XORI, RI10, 0x220, "xori", _A3(A_T,A_A,A_S10), 00012, FX2) /* XOR%I RT<-RA^I10 */ -APUOP(M_AHI, RI10, 0x0e8, "ahi", _A3(A_T,A_A,A_S10), 00012, FX2) /* Add%Immed RT<-RA+I10 */ -APUOP(M_AI, RI10, 0x0e0, "ai", _A3(A_T,A_A,A_S10), 00012, FX2) /* Add%Immed RT<-RA+I10 */ -APUOP(M_SFHI, RI10, 0x068, "sfhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* SubFrom%Imm RT<-I10-RA */ -APUOP(M_SFI, RI10, 0x060, "sfi", _A3(A_T,A_A,A_S10), 00012, FX2) /* SubFrom%Imm RT<-I10-RA */ -APUOP(M_CGTBI, RI10, 0x270, "cgtbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* CGT%I RT<-(RA>I10) */ -APUOP(M_CGTHI, RI10, 0x268, "cgthi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CGT%I RT<-(RA>I10) */ -APUOP(M_CGTI, RI10, 0x260, "cgti", _A3(A_T,A_A,A_S10), 00012, FX2) /* CGT%I RT<-(RA>I10) */ -APUOP(M_CLGTBI, RI10, 0x2f0, "clgtbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* CLGT%I RT<-(RA>I10) */ -APUOP(M_CLGTHI, RI10, 0x2e8, "clgthi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CLGT%I RT<-(RA>I10) */ -APUOP(M_CLGTI, RI10, 0x2e0, "clgti", _A3(A_T,A_A,A_S10), 00012, FX2) /* CLGT%I RT<-(RA>I10) */ -APUOP(M_CEQBI, RI10, 0x3f0, "ceqbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* CEQ%I RT<-(RA=I10) */ -APUOP(M_CEQHI, RI10, 0x3e8, "ceqhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CEQ%I RT<-(RA=I10) */ -APUOP(M_CEQI, RI10, 0x3e0, "ceqi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CEQ%I RT<-(RA=I10) */ -APUOP(M_HGTI, RI10, 0x278, "hgti", _A3(A_T,A_A,A_S10), 00010, FX2) /* HaltGTI halt_if(RA>I10) */ -APUOP(M_HGTI2, RI10, 0x278, "hgti", _A2(A_A,A_S10), 00010, FX2) /* HaltGTI halt_if(RA>I10) */ -APUOP(M_HLGTI, RI10, 0x2f8, "hlgti", _A3(A_T,A_A,A_S10), 00010, FX2) /* HaltLGTI halt_if(RA>I10) */ -APUOP(M_HLGTI2, RI10, 0x2f8, "hlgti", _A2(A_A,A_S10), 00010, FX2) /* HaltLGTI halt_if(RA>I10) */ -APUOP(M_HEQI, RI10, 0x3f8, "heqi", _A3(A_T,A_A,A_S10), 00010, FX2) /* HaltEQImm halt_if(RA=I10) */ -APUOP(M_HEQI2, RI10, 0x3f8, "heqi", _A2(A_A,A_S10), 00010, FX2) /* HaltEQImm halt_if(RA=I10) */ -APUOP(M_MPYI, RI10, 0x3a0, "mpyi", _A3(A_T,A_A,A_S10), 00012, FP7) /* MPYI RT<-RA*I10 */ -APUOP(M_MPYUI, RI10, 0x3a8, "mpyui", _A3(A_T,A_A,A_S10), 00012, FP7) /* MPYUI RT<-RA*I10 */ -APUOP(M_CFLTS, RI8, 0x3b0, "cflts", _A3(A_T,A_A,A_U7A), 00012, FP7) /* CFLTS RT<-int(RA,I8) */ -APUOP(M_CFLTU, RI8, 0x3b2, "cfltu", _A3(A_T,A_A,A_U7A), 00012, FP7) /* CFLTU RT<-int(RA,I8) */ -APUOP(M_CSFLT, RI8, 0x3b4, "csflt", _A3(A_T,A_A,A_U7B), 00012, FP7) /* CSFLT RT<-flt(RA,I8) */ -APUOP(M_CUFLT, RI8, 0x3b6, "cuflt", _A3(A_T,A_A,A_U7B), 00012, FP7) /* CUFLT RT<-flt(RA,I8) */ -APUOP(M_FESD, RR, 0x3b8, "fesd", _A2(A_T,A_A), 00012, FPD) /* FESD RT<-double(RA) */ -APUOP(M_FRDS, RR, 0x3b9, "frds", _A2(A_T,A_A), 00012, FPD) /* FRDS RT<-single(RA) */ -APUOP(M_FSCRRD, RR, 0x398, "fscrrd", _A1(A_T), 00002, FPD) /* FSCRRD RT<-FP_status */ -APUOP(M_FSCRWR, RR, 0x3ba, "fscrwr", _A2(A_T,A_A), 00010, FP7) /* FSCRWR FP_status<-RA */ -APUOP(M_FSCRWR2, RR, 0x3ba, "fscrwr", _A1(A_A), 00010, FP7) /* FSCRWR FP_status<-RA */ -APUOP(M_CLZ, RR, 0x2a5, "clz", _A2(A_T,A_A), 00012, FX2) /* CLZ RT<-clz(RA) */ -APUOP(M_CNTB, RR, 0x2b4, "cntb", _A2(A_T,A_A), 00012, FXB) /* CNT RT<-pop(RA) */ -APUOP(M_XSBH, RR, 0x2b6, "xsbh", _A2(A_T,A_A), 00012, FX2) /* eXtSignBtoH RT<-sign_ext(RA) */ -APUOP(M_XSHW, RR, 0x2ae, "xshw", _A2(A_T,A_A), 00012, FX2) /* eXtSignHtoW RT<-sign_ext(RA) */ -APUOP(M_XSWD, RR, 0x2a6, "xswd", _A2(A_T,A_A), 00012, FX2) /* eXtSignWtoD RT<-sign_ext(RA) */ -APUOP(M_ROTI, RI7, 0x078, "roti", _A3(A_T,A_A,A_S7N), 00012, FX3) /* ROT%I RT<-RA<<<I7 */ -APUOP(M_ROTMI, RI7, 0x079, "rotmi", _A3(A_T,A_A,A_S7), 00012, FX3) /* ROT%MI RT<-RA<<I7 */ -APUOP(M_ROTMAI, RI7, 0x07a, "rotmai", _A3(A_T,A_A,A_S7), 00012, FX3) /* ROTMA%I RT<-RA<<I7 */ -APUOP(M_SHLI, RI7, 0x07b, "shli", _A3(A_T,A_A,A_U6), 00012, FX3) /* SHL%I RT<-RA<<I7 */ -APUOP(M_ROTHI, RI7, 0x07c, "rothi", _A3(A_T,A_A,A_S7N), 00012, FX3) /* ROT%I RT<-RA<<<I7 */ -APUOP(M_ROTHMI, RI7, 0x07d, "rothmi", _A3(A_T,A_A,A_S6), 00012, FX3) /* ROT%MI RT<-RA<<I7 */ -APUOP(M_ROTMAHI, RI7, 0x07e, "rotmahi", _A3(A_T,A_A,A_S6), 00012, FX3) /* ROTMA%I RT<-RA<<I7 */ -APUOP(M_SHLHI, RI7, 0x07f, "shlhi", _A3(A_T,A_A,A_U5), 00012, FX3) /* SHL%I RT<-RA<<I7 */ -APUOP(M_A, RR, 0x0c0, "a", _A3(A_T,A_A,A_B), 00112, FX2) /* Add% RT<-RA+RB */ -APUOP(M_AH, RR, 0x0c8, "ah", _A3(A_T,A_A,A_B), 00112, FX2) /* Add% RT<-RA+RB */ -APUOP(M_SF, RR, 0x040, "sf", _A3(A_T,A_A,A_B), 00112, FX2) /* SubFrom% RT<-RB-RA */ -APUOP(M_SFH, RR, 0x048, "sfh", _A3(A_T,A_A,A_B), 00112, FX2) /* SubFrom% RT<-RB-RA */ -APUOP(M_CGT, RR, 0x240, "cgt", _A3(A_T,A_A,A_B), 00112, FX2) /* CGT% RT<-(RA>RB) */ -APUOP(M_CGTB, RR, 0x250, "cgtb", _A3(A_T,A_A,A_B), 00112, FX2) /* CGT% RT<-(RA>RB) */ -APUOP(M_CGTH, RR, 0x248, "cgth", _A3(A_T,A_A,A_B), 00112, FX2) /* CGT% RT<-(RA>RB) */ -APUOP(M_CLGT, RR, 0x2c0, "clgt", _A3(A_T,A_A,A_B), 00112, FX2) /* CLGT% RT<-(RA>RB) */ -APUOP(M_CLGTB, RR, 0x2d0, "clgtb", _A3(A_T,A_A,A_B), 00112, FX2) /* CLGT% RT<-(RA>RB) */ -APUOP(M_CLGTH, RR, 0x2c8, "clgth", _A3(A_T,A_A,A_B), 00112, FX2) /* CLGT% RT<-(RA>RB) */ -APUOP(M_CEQ, RR, 0x3c0, "ceq", _A3(A_T,A_A,A_B), 00112, FX2) /* CEQ% RT<-(RA=RB) */ -APUOP(M_CEQB, RR, 0x3d0, "ceqb", _A3(A_T,A_A,A_B), 00112, FX2) /* CEQ% RT<-(RA=RB) */ -APUOP(M_CEQH, RR, 0x3c8, "ceqh", _A3(A_T,A_A,A_B), 00112, FX2) /* CEQ% RT<-(RA=RB) */ -APUOP(M_HGT, RR, 0x258, "hgt", _A3(A_T,A_A,A_B), 00110, FX2) /* HaltGT halt_if(RA>RB) */ -APUOP(M_HGT2, RR, 0x258, "hgt", _A2(A_A,A_B), 00110, FX2) /* HaltGT halt_if(RA>RB) */ -APUOP(M_HLGT, RR, 0x2d8, "hlgt", _A3(A_T,A_A,A_B), 00110, FX2) /* HaltLGT halt_if(RA>RB) */ -APUOP(M_HLGT2, RR, 0x2d8, "hlgt", _A2(A_A,A_B), 00110, FX2) /* HaltLGT halt_if(RA>RB) */ -APUOP(M_HEQ, RR, 0x3d8, "heq", _A3(A_T,A_A,A_B), 00110, FX2) /* HaltEQ halt_if(RA=RB) */ -APUOP(M_HEQ2, RR, 0x3d8, "heq", _A2(A_A,A_B), 00110, FX2) /* HaltEQ halt_if(RA=RB) */ -APUOP(M_FCEQ, RR, 0x3c2, "fceq", _A3(A_T,A_A,A_B), 00112, FX2) /* FCEQ RT<-(RA=RB) */ -APUOP(M_FCMEQ, RR, 0x3ca, "fcmeq", _A3(A_T,A_A,A_B), 00112, FX2) /* FCMEQ RT<-(|RA|=|RB|) */ -APUOP(M_FCGT, RR, 0x2c2, "fcgt", _A3(A_T,A_A,A_B), 00112, FX2) /* FCGT RT<-(RA<RB) */ -APUOP(M_FCMGT, RR, 0x2ca, "fcmgt", _A3(A_T,A_A,A_B), 00112, FX2) /* FCMGT RT<-(|RA|<|RB|) */ -APUOP(M_AND, RR, 0x0c1, "and", _A3(A_T,A_A,A_B), 00112, FX2) /* AND RT<-RA&RB */ -APUOP(M_NAND, RR, 0x0c9, "nand", _A3(A_T,A_A,A_B), 00112, FX2) /* NAND RT<-!(RA&RB) */ -APUOP(M_OR, RR, 0x041, "or", _A3(A_T,A_A,A_B), 00112, FX2) /* OR RT<-RA|RB */ -APUOP(M_NOR, RR, 0x049, "nor", _A3(A_T,A_A,A_B), 00112, FX2) /* NOR RT<-!(RA&RB) */ -APUOP(M_XOR, RR, 0x241, "xor", _A3(A_T,A_A,A_B), 00112, FX2) /* XOR RT<-RA^RB */ -APUOP(M_EQV, RR, 0x249, "eqv", _A3(A_T,A_A,A_B), 00112, FX2) /* EQuiValent RT<-!(RA^RB) */ -APUOP(M_ANDC, RR, 0x2c1, "andc", _A3(A_T,A_A,A_B), 00112, FX2) /* ANDComplement RT<-RA&!RB */ -APUOP(M_ORC, RR, 0x2c9, "orc", _A3(A_T,A_A,A_B), 00112, FX2) /* ORComplement RT<-RA|!RB */ -APUOP(M_ABSDB, RR, 0x053, "absdb", _A3(A_T,A_A,A_B), 00112, FXB) /* ABSoluteDiff RT<-|RA-RB| */ -APUOP(M_AVGB, RR, 0x0d3, "avgb", _A3(A_T,A_A,A_B), 00112, FXB) /* AVG% RT<-(RA+RB+1)/2 */ -APUOP(M_SUMB, RR, 0x253, "sumb", _A3(A_T,A_A,A_B), 00112, FXB) /* SUM% RT<-f(RA,RB) */ -APUOP(M_DFA, RR, 0x2cc, "dfa", _A3(A_T,A_A,A_B), 00112, FPD) /* DFAdd RT<-RA+RB */ -APUOP(M_DFM, RR, 0x2ce, "dfm", _A3(A_T,A_A,A_B), 00112, FPD) /* DFMul RT<-RA*RB */ -APUOP(M_DFS, RR, 0x2cd, "dfs", _A3(A_T,A_A,A_B), 00112, FPD) /* DFSub RT<-RA-RB */ -APUOP(M_FA, RR, 0x2c4, "fa", _A3(A_T,A_A,A_B), 00112, FP6) /* FAdd RT<-RA+RB */ -APUOP(M_FM, RR, 0x2c6, "fm", _A3(A_T,A_A,A_B), 00112, FP6) /* FMul RT<-RA*RB */ -APUOP(M_FS, RR, 0x2c5, "fs", _A3(A_T,A_A,A_B), 00112, FP6) /* FSub RT<-RA-RB */ -APUOP(M_MPY, RR, 0x3c4, "mpy", _A3(A_T,A_A,A_B), 00112, FP7) /* MPY RT<-RA*RB */ -APUOP(M_MPYH, RR, 0x3c5, "mpyh", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYH RT<-(RAh*RB)<<16 */ -APUOP(M_MPYHH, RR, 0x3c6, "mpyhh", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYHH RT<-RAh*RBh */ -APUOP(M_MPYHHU, RR, 0x3ce, "mpyhhu", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYHHU RT<-RAh*RBh */ -APUOP(M_MPYS, RR, 0x3c7, "mpys", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYS RT<-(RA*RB)>>16 */ -APUOP(M_MPYU, RR, 0x3cc, "mpyu", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYU RT<-RA*RB */ -APUOP(M_FI, RR, 0x3d4, "fi", _A3(A_T,A_A,A_B), 00112, FP7) /* FInterpolate RT<-f(RA,RB) */ -APUOP(M_ROT, RR, 0x058, "rot", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT% RT<-RA<<<RB */ -APUOP(M_ROTM, RR, 0x059, "rotm", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT%M RT<-RA<<Rb */ -APUOP(M_ROTMA, RR, 0x05a, "rotma", _A3(A_T,A_A,A_B), 00112, FX3) /* ROTMA% RT<-RA<<Rb */ -APUOP(M_SHL, RR, 0x05b, "shl", _A3(A_T,A_A,A_B), 00112, FX3) /* SHL% RT<-RA<<Rb */ -APUOP(M_ROTH, RR, 0x05c, "roth", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT% RT<-RA<<<RB */ -APUOP(M_ROTHM, RR, 0x05d, "rothm", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT%M RT<-RA<<Rb */ -APUOP(M_ROTMAH, RR, 0x05e, "rotmah", _A3(A_T,A_A,A_B), 00112, FX3) /* ROTMA% RT<-RA<<Rb */ -APUOP(M_SHLH, RR, 0x05f, "shlh", _A3(A_T,A_A,A_B), 00112, FX3) /* SHL% RT<-RA<<Rb */ -APUOP(M_MPYHHA, RR, 0x346, "mpyhha", _A3(A_T,A_A,A_B), 00113, FP7) /* MPYHHA RT<-RAh*RBh+RT */ -APUOP(M_MPYHHAU, RR, 0x34e, "mpyhhau", _A3(A_T,A_A,A_B), 00113, FP7) /* MPYHHAU RT<-RAh*RBh+RT */ -APUOP(M_DFMA, RR, 0x35c, "dfma", _A3(A_T,A_A,A_B), 00113, FPD) /* DFMAdd RT<-RT+RA*RB */ -APUOP(M_DFMS, RR, 0x35d, "dfms", _A3(A_T,A_A,A_B), 00113, FPD) /* DFMSub RT<-RA*RB-RT */ -APUOP(M_DFNMS, RR, 0x35e, "dfnms", _A3(A_T,A_A,A_B), 00113, FPD) /* DFNMSub RT<-RT-RA*RB */ -APUOP(M_DFNMA, RR, 0x35f, "dfnma", _A3(A_T,A_A,A_B), 00113, FPD) /* DFNMAdd RT<-(-RT)-RA*RB */ -APUOP(M_FMA, RRR, 0x700, "fma", _A4(A_C,A_A,A_B,A_T), 02111, FP6) /* FMAdd RC<-RT+RA*RB */ -APUOP(M_FMS, RRR, 0x780, "fms", _A4(A_C,A_A,A_B,A_T), 02111, FP6) /* FMSub RC<-RA*RB-RT */ -APUOP(M_FNMS, RRR, 0x680, "fnms", _A4(A_C,A_A,A_B,A_T), 02111, FP6) /* FNMSub RC<-RT-RA*RB */ -APUOP(M_MPYA, RRR, 0x600, "mpya", _A4(A_C,A_A,A_B,A_T), 02111, FP7) /* MPYA RC<-RA*RB+RT */ -APUOP(M_SELB, RRR, 0x400, "selb", _A4(A_C,A_A,A_B,A_T), 02111, FX2) /* SELectBits RC<-RA&RT|RB&!RT */ -/* for system function call, this uses op-code of mtspr */ -APUOP(M_SYSCALL, RI7, 0x10c, "syscall", _A3(A_T,A_A,A_S7N), 00002, SPR) /* System Call */ -/* -pseudo instruction: -system call -value of I9 operation -0 halt -1 rt[0] = open(MEM[ra[0]], ra[1]) -2 rt[0] = close(ra[0]) -3 rt[0] = read(ra[0], MEM[ra[1]], ra[2]) -4 rt[0] = write(ra[0], MEM[ra[1]], ra[2]) -5 printf(MEM[ra[0]], ra[1], ra[2], ra[3]) -42 rt[0] = clock() -52 rt[0] = lseek(ra0, ra1, ra2) - -*/ - - -/* new multiprecision add/sub */ -APUOP(M_ADDX, RR, 0x340, "addx", _A3(A_T,A_A,A_B), 00113, FX2) /* Add_eXtended RT<-RA+RB+RT */ -APUOP(M_CG, RR, 0x0c2, "cg", _A3(A_T,A_A,A_B), 00112, FX2) /* CarryGenerate RT<-cout(RA+RB) */ -APUOP(M_CGX, RR, 0x342, "cgx", _A3(A_T,A_A,A_B), 00113, FX2) /* CarryGen_eXtd RT<-cout(RA+RB+RT) */ -APUOP(M_SFX, RR, 0x341, "sfx", _A3(A_T,A_A,A_B), 00113, FX2) /* Add_eXtended RT<-RA+RB+RT */ -APUOP(M_BG, RR, 0x042, "bg", _A3(A_T,A_A,A_B), 00112, FX2) /* CarryGenerate RT<-cout(RA+RB) */ -APUOP(M_BGX, RR, 0x343, "bgx", _A3(A_T,A_A,A_B), 00113, FX2) /* CarryGen_eXtd RT<-cout(RA+RB+RT) */ - -/* - -The following ops are a subset of above except with feature bits set. -Feature bits are bits 11-17 of the instruction: - - 11 - C & P feature bit - 12 - disable interrupts - 13 - enable interrupts - -*/ -APUOPFB(M_BID, RR, 0x1a8, 0x20, "bid", _A1(A_A), 00010, BR) /* BI IP<-RA */ -APUOPFB(M_BIE, RR, 0x1a8, 0x10, "bie", _A1(A_A), 00010, BR) /* BI IP<-RA */ -APUOPFB(M_BISLD, RR, 0x1a9, 0x20, "bisld", _A2(A_T,A_A), 00012, BR) /* BISL RT,IP<-IP,RA */ -APUOPFB(M_BISLE, RR, 0x1a9, 0x10, "bisle", _A2(A_T,A_A), 00012, BR) /* BISL RT,IP<-IP,RA */ -APUOPFB(M_IRETD, RR, 0x1aa, 0x20, "iretd", _A1(A_A), 00010, BR) /* IRET IP<-SRR0 */ -APUOPFB(M_IRETD2, RR, 0x1aa, 0x20, "iretd", _A0(), 00010, BR) /* IRET IP<-SRR0 */ -APUOPFB(M_IRETE, RR, 0x1aa, 0x10, "irete", _A1(A_A), 00010, BR) /* IRET IP<-SRR0 */ -APUOPFB(M_IRETE2, RR, 0x1aa, 0x10, "irete", _A0(), 00010, BR) /* IRET IP<-SRR0 */ -APUOPFB(M_BISLEDD, RR, 0x1ab, 0x20, "bisledd", _A2(A_T,A_A), 00012, BR) /* BISLED RT,IP<-IP,RA_if(ext) */ -APUOPFB(M_BISLEDE, RR, 0x1ab, 0x10, "bislede", _A2(A_T,A_A), 00012, BR) /* BISLED RT,IP<-IP,RA_if(ext) */ -APUOPFB(M_BIHNZD, RR, 0x12b, 0x20, "bihnzd", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */ -APUOPFB(M_BIHNZE, RR, 0x12b, 0x10, "bihnze", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */ -APUOPFB(M_BIHZD, RR, 0x12a, 0x20, "bihzd", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */ -APUOPFB(M_BIHZE, RR, 0x12a, 0x10, "bihze", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */ -APUOPFB(M_BINZD, RR, 0x129, 0x20, "binzd", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */ -APUOPFB(M_BINZE, RR, 0x129, 0x10, "binze", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */ -APUOPFB(M_BIZD, RR, 0x128, 0x20, "bizd", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */ -APUOPFB(M_BIZE, RR, 0x128, 0x10, "bize", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */ -APUOPFB(M_SYNCC, RR, 0x002, 0x40, "syncc", _A0(), 00000, BR) /* SYNCC flush_pipe */ -APUOPFB(M_HBRP, LBTI, 0x1ac, 0x40, "hbrp", _A0(), 00010, LS) /* HBR BTB[B9]<-M[Ra] */ - -/* Synonyms required by the AS manual. */ -APUOP(M_LR, RI10, 0x020, "lr", _A2(A_T,A_A), 00012, FX2) /* OR%I RT<-RA|I10 */ -APUOP(M_BIHT, RR, 0x12b, "biht", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */ -APUOP(M_BIHF, RR, 0x12a, "bihf", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */ -APUOP(M_BIT, RR, 0x129, "bit", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */ -APUOP(M_BIF, RR, 0x128, "bif", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */ -APUOPFB(M_BIHTD, RR, 0x12b, 0x20, "bihtd", _A2(A_T,A_A), 00011, BR) /* BIHNF IP<-RA_if(RT) */ -APUOPFB(M_BIHTE, RR, 0x12b, 0x10, "bihte", _A2(A_T,A_A), 00011, BR) /* BIHNF IP<-RA_if(RT) */ -APUOPFB(M_BIHFD, RR, 0x12a, 0x20, "bihfd", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */ -APUOPFB(M_BIHFE, RR, 0x12a, 0x10, "bihfe", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */ -APUOPFB(M_BITD, RR, 0x129, 0x20, "bitd", _A2(A_T,A_A), 00011, BR) /* BINF IP<-RA_if(RT) */ -APUOPFB(M_BITE, RR, 0x129, 0x10, "bite", _A2(A_T,A_A), 00011, BR) /* BINF IP<-RA_if(RT) */ -APUOPFB(M_BIFD, RR, 0x128, 0x20, "bifd", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */ -APUOPFB(M_BIFE, RR, 0x128, 0x10, "bife", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */ - -#undef _A0 -#undef _A1 -#undef _A2 -#undef _A3 -#undef _A4 diff --git a/arch/powerpc/xmon/spu-opc.c b/arch/powerpc/xmon/spu-opc.c deleted file mode 100644 index 6d8197cc540b..000000000000 --- a/arch/powerpc/xmon/spu-opc.c +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* SPU opcode list - - Copyright 2006 Free Software Foundation, Inc. - - This file is part of GDB, GAS, and the GNU binutils. - - */ - -#include <linux/kernel.h> -#include <linux/bug.h> -#include "spu.h" - -/* This file holds the Spu opcode table */ - - -/* - Example contents of spu-insn.h - id_tag mode mode type opcode mnemonic asmtype dependency FPU L/S? branch? instruction - QUAD WORD (0,RC,RB,RA,RT) latency - APUOP(M_LQD, 1, 0, RI9, 0x1f8, "lqd", ASM_RI9IDX, 00012, FXU, 1, 0) Load Quadword d-form - */ - -const struct spu_opcode spu_opcodes[] = { -#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \ - { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT }, -#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \ - { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT }, -#include "spu-insns.h" -#undef APUOP -#undef APUOPFB -}; - -const int spu_num_opcodes = ARRAY_SIZE(spu_opcodes); diff --git a/arch/powerpc/xmon/spu.h b/arch/powerpc/xmon/spu.h deleted file mode 100644 index 2d13b1a5fa87..000000000000 --- a/arch/powerpc/xmon/spu.h +++ /dev/null @@ -1,115 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* SPU ELF support for BFD. - - Copyright 2006 Free Software Foundation, Inc. - - This file is part of GDB, GAS, and the GNU binutils. - - */ - - -/* These two enums are from rel_apu/common/spu_asm_format.h */ -/* definition of instruction format */ -typedef enum { - RRR, - RI18, - RI16, - RI10, - RI8, - RI7, - RR, - LBT, - LBTI, - IDATA, - UNKNOWN_IFORMAT -} spu_iformat; - -/* These values describe assembly instruction arguments. They indicate - * how to encode, range checking and which relocation to use. */ -typedef enum { - A_T, /* register at pos 0 */ - A_A, /* register at pos 7 */ - A_B, /* register at pos 14 */ - A_C, /* register at pos 21 */ - A_S, /* special purpose register at pos 7 */ - A_H, /* channel register at pos 7 */ - A_P, /* parenthesis, this has to separate regs from immediates */ - A_S3, - A_S6, - A_S7N, - A_S7, - A_U7A, - A_U7B, - A_S10B, - A_S10, - A_S11, - A_S11I, - A_S14, - A_S16, - A_S18, - A_R18, - A_U3, - A_U5, - A_U6, - A_U7, - A_U14, - A_X16, - A_U18, - A_MAX -} spu_aformat; - -enum spu_insns { -#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \ - TAG, -#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \ - TAG, -#include "spu-insns.h" -#undef APUOP -#undef APUOPFB - M_SPU_MAX -}; - -struct spu_opcode -{ - spu_iformat insn_type; - unsigned int opcode; - char *mnemonic; - int arg[5]; -}; - -#define SIGNED_EXTRACT(insn,size,pos) (((int)((insn) << (32-size-pos))) >> (32-size)) -#define UNSIGNED_EXTRACT(insn,size,pos) (((insn) >> pos) & ((1 << size)-1)) - -#define DECODE_INSN_RT(insn) (insn & 0x7f) -#define DECODE_INSN_RA(insn) ((insn >> 7) & 0x7f) -#define DECODE_INSN_RB(insn) ((insn >> 14) & 0x7f) -#define DECODE_INSN_RC(insn) ((insn >> 21) & 0x7f) - -#define DECODE_INSN_I10(insn) SIGNED_EXTRACT(insn,10,14) -#define DECODE_INSN_U10(insn) UNSIGNED_EXTRACT(insn,10,14) - -/* For branching, immediate loads, hbr and lqa/stqa. */ -#define DECODE_INSN_I16(insn) SIGNED_EXTRACT(insn,16,7) -#define DECODE_INSN_U16(insn) UNSIGNED_EXTRACT(insn,16,7) - -/* for stop */ -#define DECODE_INSN_U14(insn) UNSIGNED_EXTRACT(insn,14,0) - -/* For ila */ -#define DECODE_INSN_I18(insn) SIGNED_EXTRACT(insn,18,7) -#define DECODE_INSN_U18(insn) UNSIGNED_EXTRACT(insn,18,7) - -/* For rotate and shift and generate control mask */ -#define DECODE_INSN_I7(insn) SIGNED_EXTRACT(insn,7,14) -#define DECODE_INSN_U7(insn) UNSIGNED_EXTRACT(insn,7,14) - -/* For float <-> int conversion */ -#define DECODE_INSN_I8(insn) SIGNED_EXTRACT(insn,8,14) -#define DECODE_INSN_U8(insn) UNSIGNED_EXTRACT(insn,8,14) - -/* For hbr */ -#define DECODE_INSN_I9a(insn) ((SIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0)) -#define DECODE_INSN_I9b(insn) ((SIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0)) -#define DECODE_INSN_U9a(insn) ((UNSIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0)) -#define DECODE_INSN_U9b(insn) ((UNSIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0)) - diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index d79d6633f333..cb3a3244ae6f 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -41,8 +41,6 @@ #include <asm/rtas.h> #include <asm/sstep.h> #include <asm/irq_regs.h> -#include <asm/spu.h> -#include <asm/spu_priv1.h> #include <asm/setjmp.h> #include <asm/reg.h> #include <asm/debug.h> @@ -50,7 +48,7 @@ #include <asm/xive.h> #include <asm/opal.h> #include <asm/firmware.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <asm/sections.h> #include <asm/inst.h> #include <asm/interrupt.h> @@ -188,8 +186,6 @@ static void xmon_print_symbol(unsigned long address, const char *mid, const char *after); static const char *getvecname(unsigned long vec); -static int do_spu_cmd(void); - #ifdef CONFIG_44x static void dump_tlb_44x(void); #endif @@ -272,13 +268,6 @@ Commands:\n\ P list processes/tasks\n\ r print registers\n\ s single step\n" -#ifdef CONFIG_SPU_BASE -" ss stop execution on all spus\n\ - sr restore execution on stopped spus\n\ - sf # dump spu fields for spu # (in hex)\n\ - sd # dump spu local store for spu # (in hex)\n\ - sdi # disassemble spu local store for spu # (in hex)\n" -#endif " S print special registers\n\ Sa print all SPRs\n\ Sr # read SPR #\n\ @@ -1112,8 +1101,6 @@ cmds(struct pt_regs *excp) cacheflush(); break; case 's': - if (do_spu_cmd() == 0) - break; if (do_step(excp)) return cmd; break; @@ -1271,11 +1258,7 @@ static int xmon_batch_next_cpu(void) { unsigned long cpu; - while (!cpumask_empty(&xmon_batch_cpus)) { - cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus, - xmon_batch_start_cpu, true); - if (cpu >= nr_cpu_ids) - break; + for_each_cpu_wrap(cpu, &xmon_batch_cpus, xmon_batch_start_cpu) { if (xmon_batch_start_cpu == -1) xmon_batch_start_cpu = cpu; if (xmon_switch_cpu(cpu)) @@ -1350,7 +1333,7 @@ static int cpu_cmd(void) } termch = cpu; - if (!scanhex(&cpu)) { + if (!scanhex(&cpu) || cpu >= num_possible_cpus()) { /* print cpus waiting or in xmon */ printf("cpus stopped:"); last_cpu = first_cpu = NR_CPUS; @@ -1787,7 +1770,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr, sp + STACK_INT_FRAME_REGS); break; } - printf("--- Exception: %lx %s at ", regs.trap, + printf("---- Exception: %lx %s at ", regs.trap, getvecname(TRAP(®s))); pc = regs.nip; lr = regs.link; @@ -2623,9 +2606,9 @@ static void dump_one_paca(int cpu) printf("paca for cpu 0x%x @ %px:\n", cpu, p); - printf(" %-*s = %s\n", 25, "possible", cpu_possible(cpu) ? "yes" : "no"); - printf(" %-*s = %s\n", 25, "present", cpu_present(cpu) ? "yes" : "no"); - printf(" %-*s = %s\n", 25, "online", cpu_online(cpu) ? "yes" : "no"); + printf(" %-*s = %s\n", 25, "possible", str_yes_no(cpu_possible(cpu))); + printf(" %-*s = %s\n", 25, "present", str_yes_no(cpu_present(cpu))); + printf(" %-*s = %s\n", 25, "online", str_yes_no(cpu_online(cpu))); #define DUMP(paca, name, format) \ printf(" %-*s = "format"\t(0x%lx)\n", 25, #name, 18, paca->name, \ @@ -2772,7 +2755,7 @@ static void dump_pacas(void) termch = c; /* Put c back, it wasn't 'a' */ - if (scanhex(&num)) + if (scanhex(&num) && num < num_possible_cpus()) dump_one_paca(num); else dump_one_paca(xmon_owner); @@ -2845,7 +2828,7 @@ static void dump_xives(void) termch = c; /* Put c back, it wasn't 'a' */ - if (scanhex(&num)) + if (scanhex(&num) && num < num_possible_cpus()) dump_one_xive(num); else dump_one_xive(xmon_owner); @@ -3543,7 +3526,7 @@ scanhex(unsigned long *vp) } } else if (c == '$') { int i; - for (i=0; i<63; i++) { + for (i = 0; i < (KSYM_NAME_LEN - 1); i++) { c = inchar(); if (isspace(c) || c == '\0') { termch = c; @@ -3662,7 +3645,7 @@ symbol_lookup(void) int type = inchar(); unsigned long addr, cpu; void __percpu *ptr = NULL; - static char tmp[64]; + static char tmp[KSYM_NAME_LEN]; switch (type) { case 'a': @@ -3671,7 +3654,7 @@ symbol_lookup(void) termch = 0; break; case 's': - getstring(tmp, 64); + getstring(tmp, KSYM_NAME_LEN); if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); @@ -3686,7 +3669,7 @@ symbol_lookup(void) termch = 0; break; case 'p': - getstring(tmp, 64); + getstring(tmp, KSYM_NAME_LEN); if (setjmp(bus_error_jmp) == 0) { catch_memory_errors = 1; sync(); @@ -4107,263 +4090,3 @@ void __init xmon_setup(void) if (xmon_early) debugger(NULL); } - -#ifdef CONFIG_SPU_BASE - -struct spu_info { - struct spu *spu; - u64 saved_mfc_sr1_RW; - u32 saved_spu_runcntl_RW; - unsigned long dump_addr; - u8 stopped_ok; -}; - -#define XMON_NUM_SPUS 16 /* Enough for current hardware */ - -static struct spu_info spu_info[XMON_NUM_SPUS]; - -void __init xmon_register_spus(struct list_head *list) -{ - struct spu *spu; - - list_for_each_entry(spu, list, full_list) { - if (spu->number >= XMON_NUM_SPUS) { - WARN_ON(1); - continue; - } - - spu_info[spu->number].spu = spu; - spu_info[spu->number].stopped_ok = 0; - spu_info[spu->number].dump_addr = (unsigned long) - spu_info[spu->number].spu->local_store; - } -} - -static void stop_spus(void) -{ - struct spu *spu; - volatile int i; - u64 tmp; - - for (i = 0; i < XMON_NUM_SPUS; i++) { - if (!spu_info[i].spu) - continue; - - if (setjmp(bus_error_jmp) == 0) { - catch_memory_errors = 1; - sync(); - - spu = spu_info[i].spu; - - spu_info[i].saved_spu_runcntl_RW = - in_be32(&spu->problem->spu_runcntl_RW); - - tmp = spu_mfc_sr1_get(spu); - spu_info[i].saved_mfc_sr1_RW = tmp; - - tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; - spu_mfc_sr1_set(spu, tmp); - - sync(); - __delay(200); - - spu_info[i].stopped_ok = 1; - - printf("Stopped spu %.2d (was %s)\n", i, - spu_info[i].saved_spu_runcntl_RW ? - "running" : "stopped"); - } else { - catch_memory_errors = 0; - printf("*** Error stopping spu %.2d\n", i); - } - catch_memory_errors = 0; - } -} - -static void restart_spus(void) -{ - struct spu *spu; - volatile int i; - - for (i = 0; i < XMON_NUM_SPUS; i++) { - if (!spu_info[i].spu) - continue; - - if (!spu_info[i].stopped_ok) { - printf("*** Error, spu %d was not successfully stopped" - ", not restarting\n", i); - continue; - } - - if (setjmp(bus_error_jmp) == 0) { - catch_memory_errors = 1; - sync(); - - spu = spu_info[i].spu; - spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW); - out_be32(&spu->problem->spu_runcntl_RW, - spu_info[i].saved_spu_runcntl_RW); - - sync(); - __delay(200); - - printf("Restarted spu %.2d\n", i); - } else { - catch_memory_errors = 0; - printf("*** Error restarting spu %.2d\n", i); - } - catch_memory_errors = 0; - } -} - -#define DUMP_WIDTH 23 -#define DUMP_VALUE(format, field, value) \ -do { \ - if (setjmp(bus_error_jmp) == 0) { \ - catch_memory_errors = 1; \ - sync(); \ - printf(" %-*s = "format"\n", DUMP_WIDTH, \ - #field, value); \ - sync(); \ - __delay(200); \ - } else { \ - catch_memory_errors = 0; \ - printf(" %-*s = *** Error reading field.\n", \ - DUMP_WIDTH, #field); \ - } \ - catch_memory_errors = 0; \ -} while (0) - -#define DUMP_FIELD(obj, format, field) \ - DUMP_VALUE(format, field, obj->field) - -static void dump_spu_fields(struct spu *spu) -{ - printf("Dumping spu fields at address %p:\n", spu); - - DUMP_FIELD(spu, "0x%x", number); - DUMP_FIELD(spu, "%s", name); - DUMP_FIELD(spu, "0x%lx", local_store_phys); - DUMP_FIELD(spu, "0x%p", local_store); - DUMP_FIELD(spu, "0x%lx", ls_size); - DUMP_FIELD(spu, "0x%x", node); - DUMP_FIELD(spu, "0x%lx", flags); - DUMP_FIELD(spu, "%llu", class_0_pending); - DUMP_FIELD(spu, "0x%llx", class_0_dar); - DUMP_FIELD(spu, "0x%llx", class_1_dar); - DUMP_FIELD(spu, "0x%llx", class_1_dsisr); - DUMP_FIELD(spu, "0x%x", irqs[0]); - DUMP_FIELD(spu, "0x%x", irqs[1]); - DUMP_FIELD(spu, "0x%x", irqs[2]); - DUMP_FIELD(spu, "0x%x", slb_replace); - DUMP_FIELD(spu, "%d", pid); - DUMP_FIELD(spu, "0x%p", mm); - DUMP_FIELD(spu, "0x%p", ctx); - DUMP_FIELD(spu, "0x%p", rq); - DUMP_FIELD(spu, "0x%llx", timestamp); - DUMP_FIELD(spu, "0x%lx", problem_phys); - DUMP_FIELD(spu, "0x%p", problem); - DUMP_VALUE("0x%x", problem->spu_runcntl_RW, - in_be32(&spu->problem->spu_runcntl_RW)); - DUMP_VALUE("0x%x", problem->spu_status_R, - in_be32(&spu->problem->spu_status_R)); - DUMP_VALUE("0x%x", problem->spu_npc_RW, - in_be32(&spu->problem->spu_npc_RW)); - DUMP_FIELD(spu, "0x%p", priv2); - DUMP_FIELD(spu, "0x%p", pdata); -} - -static int spu_inst_dump(unsigned long adr, long count, int praddr) -{ - return generic_inst_dump(adr, count, praddr, print_insn_spu); -} - -static void dump_spu_ls(unsigned long num, int subcmd) -{ - unsigned long offset, addr, ls_addr; - - if (setjmp(bus_error_jmp) == 0) { - catch_memory_errors = 1; - sync(); - ls_addr = (unsigned long)spu_info[num].spu->local_store; - sync(); - __delay(200); - } else { - catch_memory_errors = 0; - printf("*** Error: accessing spu info for spu %ld\n", num); - return; - } - catch_memory_errors = 0; - - if (scanhex(&offset)) - addr = ls_addr + offset; - else - addr = spu_info[num].dump_addr; - - if (addr >= ls_addr + LS_SIZE) { - printf("*** Error: address outside of local store\n"); - return; - } - - switch (subcmd) { - case 'i': - addr += spu_inst_dump(addr, 16, 1); - last_cmd = "sdi\n"; - break; - default: - prdump(addr, 64); - addr += 64; - last_cmd = "sd\n"; - break; - } - - spu_info[num].dump_addr = addr; -} - -static int do_spu_cmd(void) -{ - static unsigned long num = 0; - int cmd, subcmd = 0; - - cmd = inchar(); - switch (cmd) { - case 's': - stop_spus(); - break; - case 'r': - restart_spus(); - break; - case 'd': - subcmd = inchar(); - if (isxdigit(subcmd) || subcmd == '\n') - termch = subcmd; - fallthrough; - case 'f': - scanhex(&num); - if (num >= XMON_NUM_SPUS || !spu_info[num].spu) { - printf("*** Error: invalid spu number\n"); - return 0; - } - - switch (cmd) { - case 'f': - dump_spu_fields(spu_info[num].spu); - break; - default: - dump_spu_ls(num, subcmd); - break; - } - - break; - default: - return -1; - } - - return 0; -} -#else /* ! CONFIG_SPU_BASE */ -static int do_spu_cmd(void) -{ - return -1; -} -#endif |