aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm64/perf.txt85
-rw-r--r--Documentation/arm64/pointer-authentication.txt22
-rw-r--r--Documentation/device-mapper/dm-dust.txt272
-rw-r--r--Documentation/device-mapper/dm-integrity.txt32
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt51
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt476
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml977
-rw-r--r--Documentation/media/uapi/v4l/field-order.rst16
-rw-r--r--Documentation/networking/rxrpc.txt21
-rw-r--r--Documentation/virtual/kvm/api.txt225
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt3
-rw-r--r--Documentation/virtual/kvm/devices/xive.txt197
-rw-r--r--Documentation/x86/mds.rst44
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/alpha/include/asm/segment.h6
-rw-r--r--arch/alpha/kernel/smc37c669.c1
-rw-r--r--arch/alpha/kernel/smc37c93x.c1
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/arc/include/asm/uaccess.h1
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/configs/mini2440_defconfig2
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/domain.h6
-rw-r--r--arch/arm/include/asm/futex.h3
-rw-r--r--arch/arm/include/asm/kvm_emulate.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h26
-rw-r--r--arch/arm/include/asm/limits.h12
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/uaccess.h3
-rw-r--r--arch/arm/mach-davinci/da830.c1
-rw-r--r--arch/arm/mach-davinci/da850.c1
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c1
-rw-r--r--arch/arm/mach-davinci/dm355.c1
-rw-r--r--arch/arm/mach-davinci/dm365.c1
-rw-r--r--arch/arm/mach-davinci/dm644x.c1
-rw-r--r--arch/arm/mach-davinci/dm646x.c1
-rw-r--r--arch/arm/mach-dove/common.c1
-rw-r--r--arch/arm/mach-mediatek/mediatek.c1
-rw-r--r--arch/arm/mach-mv78xx0/common.c1
-rw-r--r--arch/arm/mach-orion5x/common.c1
-rw-r--r--arch/arm/mach-rockchip/rockchip.c1
-rw-r--r--arch/arm/mach-zynq/common.c1
-rw-r--r--arch/arm/mm/init.c17
-rw-r--r--arch/arm/tools/syscall.tbl6
-rw-r--r--arch/arm/vdso/Makefile21
-rw-r--r--arch/arm64/Kconfig6
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/fpsimd.h29
-rw-r--r--arch/arm64/include/asm/kvm_asm.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h16
-rw-r--r--arch/arm64/include/asm/kvm_host.h101
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm64/include/asm/kvm_ptrauth.h111
-rw-r--r--arch/arm64/include/asm/sysreg.h3
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h12
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h43
-rw-r--r--arch/arm64/kernel/asm-offsets.c7
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/fpsimd.c179
-rw-r--r--arch/arm64/kernel/perf_event.c50
-rw-r--r--arch/arm64/kernel/signal.c5
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/fpsimd.c17
-rw-r--r--arch/arm64/kvm/guest.c415
-rw-r--r--arch/arm64/kvm/handle_exit.c36
-rw-r--r--arch/arm64/kvm/hyp/entry.S15
-rw-r--r--arch/arm64/kvm/hyp/switch.c80
-rw-r--r--arch/arm64/kvm/pmu.c239
-rw-r--r--arch/arm64/kvm/reset.c167
-rw-r--r--arch/arm64/kvm/sys_regs.c183
-rw-r--r--arch/arm64/kvm/sys_regs.h25
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/uaccess.h55
-rw-r--r--arch/h8300/kernel/setup.c1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/uaccess.h1
-rw-r--r--arch/ia64/include/asm/segment.h6
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/alchemy/common/platform.c22
-rw-r--r--arch/mips/ath79/clock.c1
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/configs/ip22_defconfig2
-rw-r--r--arch/mips/configs/ip27_defconfig2
-rw-r--r--arch/mips/generic/init.c4
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h11
-rw-r--r--arch/mips/include/asm/pci/bridge.h14
-rw-r--r--arch/mips/include/asm/sn/irq_alloc.h11
-rw-r--r--arch/mips/include/asm/xtalk/xtalk.h9
-rw-r--r--arch/mips/kernel/cpu-probe.c8
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c21
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl6
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl6
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl6
-rw-r--r--arch/mips/pci/Makefile3
-rw-r--r--arch/mips/pci/ops-bridge.c302
-rw-r--r--arch/mips/pci/pci-ip27.c181
-rw-r--r--arch/mips/pci/pci-xtalk-bridge.c610
-rw-r--r--arch/mips/sgi-ip22/ip22-platform.c13
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c190
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c61
-rw-r--r--arch/mips/txx9/generic/setup.c1
-rw-r--r--arch/nds32/Kconfig16
-rw-r--r--arch/nds32/include/asm/Kbuild2
-rw-r--r--arch/nds32/include/asm/assembler.h2
-rw-r--r--arch/nds32/include/asm/barrier.h2
-rw-r--r--arch/nds32/include/asm/bitfield.h2
-rw-r--r--arch/nds32/include/asm/cache.h2
-rw-r--r--arch/nds32/include/asm/cache_info.h2
-rw-r--r--arch/nds32/include/asm/cacheflush.h2
-rw-r--r--arch/nds32/include/asm/current.h2
-rw-r--r--arch/nds32/include/asm/delay.h2
-rw-r--r--arch/nds32/include/asm/elf.h2
-rw-r--r--arch/nds32/include/asm/fixmap.h2
-rw-r--r--arch/nds32/include/asm/futex.h2
-rw-r--r--arch/nds32/include/asm/highmem.h2
-rw-r--r--arch/nds32/include/asm/io.h2
-rw-r--r--arch/nds32/include/asm/irqflags.h2
-rw-r--r--arch/nds32/include/asm/l2_cache.h2
-rw-r--r--arch/nds32/include/asm/linkage.h2
-rw-r--r--arch/nds32/include/asm/memory.h10
-rw-r--r--arch/nds32/include/asm/mmu.h2
-rw-r--r--arch/nds32/include/asm/mmu_context.h2
-rw-r--r--arch/nds32/include/asm/module.h2
-rw-r--r--arch/nds32/include/asm/nds32.h2
-rw-r--r--arch/nds32/include/asm/page.h2
-rw-r--r--arch/nds32/include/asm/pgalloc.h2
-rw-r--r--arch/nds32/include/asm/pgtable.h2
-rw-r--r--arch/nds32/include/asm/proc-fns.h2
-rw-r--r--arch/nds32/include/asm/processor.h2
-rw-r--r--arch/nds32/include/asm/ptrace.h2
-rw-r--r--arch/nds32/include/asm/shmparam.h2
-rw-r--r--arch/nds32/include/asm/string.h2
-rw-r--r--arch/nds32/include/asm/swab.h2
-rw-r--r--arch/nds32/include/asm/syscall.h2
-rw-r--r--arch/nds32/include/asm/syscalls.h2
-rw-r--r--arch/nds32/include/asm/thread_info.h4
-rw-r--r--arch/nds32/include/asm/tlb.h2
-rw-r--r--arch/nds32/include/asm/tlbflush.h2
-rw-r--r--arch/nds32/include/asm/uaccess.h2
-rw-r--r--arch/nds32/include/asm/unistd.h2
-rw-r--r--arch/nds32/include/asm/vdso.h2
-rw-r--r--arch/nds32/include/asm/vdso_datapage.h3
-rw-r--r--arch/nds32/include/asm/vdso_timer_info.h2
-rw-r--r--arch/nds32/include/uapi/asm/auxvec.h2
-rw-r--r--arch/nds32/include/uapi/asm/byteorder.h2
-rw-r--r--arch/nds32/include/uapi/asm/cachectl.h2
-rw-r--r--arch/nds32/include/uapi/asm/param.h2
-rw-r--r--arch/nds32/include/uapi/asm/ptrace.h2
-rw-r--r--arch/nds32/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h2
-rw-r--r--arch/nds32/kernel/.gitignore1
-rw-r--r--arch/nds32/kernel/cacheinfo.c2
-rw-r--r--arch/nds32/kernel/ex-exit.S4
-rw-r--r--arch/nds32/kernel/nds32_ksyms.c6
-rw-r--r--arch/nds32/kernel/vdso.c1
-rw-r--r--arch/nds32/kernel/vdso/.gitignore1
-rw-r--r--arch/nds32/kernel/vdso/Makefile14
-rw-r--r--arch/nds32/kernel/vdso/gettimeofday.c4
-rw-r--r--arch/nds32/mm/init.c2
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/kernel/ptrace.c1
-rw-r--r--arch/openrisc/kernel/setup.c1
-rw-r--r--arch/openrisc/kernel/traps.c1
-rw-r--r--arch/openrisc/mm/init.c1
-rw-r--r--arch/openrisc/mm/tlb.c1
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h6
-rw-r--r--arch/powerpc/include/asm/kvm_host.h11
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h41
-rw-r--r--arch/powerpc/include/asm/xive.h3
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h46
-rw-r--r--arch/powerpc/kernel/cacheinfo.c1
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/powerpc/kvm/Makefile2
-rw-r--r--arch/powerpc/kvm/book3s.c42
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c96
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c105
-rw-r--r--arch/powerpc/kvm/book3s_hv.c152
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c57
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c144
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S86
-rw-r--r--arch/powerpc/kvm/book3s_xive.c250
-rw-r--r--arch/powerpc/kvm/book3s_xive.h37
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c1249
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c78
-rw-r--r--arch/powerpc/kvm/powerpc.c40
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S3
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/sysdev/xive/native.c11
-rw-r--r--arch/riscv/Kconfig6
-rw-r--r--arch/riscv/Makefile5
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/bug.h35
-rw-r--r--arch/riscv/include/asm/cacheflush.h2
-rw-r--r--arch/riscv/include/asm/csr.h123
-rw-r--r--arch/riscv/include/asm/elf.h6
-rw-r--r--arch/riscv/include/asm/futex.h13
-rw-r--r--arch/riscv/include/asm/irqflags.h10
-rw-r--r--arch/riscv/include/asm/mmu_context.h59
-rw-r--r--arch/riscv/include/asm/ptrace.h21
-rw-r--r--arch/riscv/include/asm/sbi.h19
-rw-r--r--arch/riscv/include/asm/sifive_l2_cache.h16
-rw-r--r--arch/riscv/include/asm/thread_info.h4
-rw-r--r--arch/riscv/include/asm/uaccess.h28
-rw-r--r--arch/riscv/kernel/asm-offsets.c3
-rw-r--r--arch/riscv/kernel/cpu.c3
-rw-r--r--arch/riscv/kernel/entry.S22
-rw-r--r--arch/riscv/kernel/head.S33
-rw-r--r--arch/riscv/kernel/irq.c19
-rw-r--r--arch/riscv/kernel/perf_event.c4
-rw-r--r--arch/riscv/kernel/reset.c15
-rw-r--r--arch/riscv/kernel/setup.c6
-rw-r--r--arch/riscv/kernel/signal.c6
-rw-r--r--arch/riscv/kernel/smp.c61
-rw-r--r--arch/riscv/kernel/smpboot.c22
-rw-r--r--arch/riscv/kernel/stacktrace.c14
-rw-r--r--arch/riscv/kernel/traps.c30
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/riscv/mm/Makefile2
-rw-r--r--arch/riscv/mm/cacheflush.c61
-rw-r--r--arch/riscv/mm/context.c69
-rw-r--r--arch/riscv/mm/fault.c9
-rw-r--r--arch/riscv/mm/sifive_l2_cache.c175
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/boot/Makefile1
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S2
-rw-r--r--arch/s390/configs/defconfig (renamed from arch/s390/defconfig)0
-rw-r--r--arch/s390/include/asm/cpacf.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/segment.h5
-rw-r--r--arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--arch/s390/kernel/ptrace.c1
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/interrupt.c11
-rw-r--r--arch/s390/kvm/kvm-s390.c120
-rw-r--r--arch/s390/kvm/vsie.c13
-rw-r--r--arch/s390/mm/kasan_init.c2
-rw-r--r--arch/s390/tools/gen_facilities.c3
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/unicore32/configs/unicore32_defconfig2
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl12
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl12
-rw-r--r--arch/x86/entry/vdso/vdso2c.c3
-rw-r--r--arch/x86/events/amd/iommu.c2
-rw-r--r--arch/x86/events/intel/core.c8
-rw-r--r--arch/x86/events/perf_event.h4
-rw-r--r--arch/x86/include/asm/arch_hweight.h2
-rw-r--r--arch/x86/include/asm/e820/api.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/include/asm/msr-index.h8
-rw-r--r--arch/x86/include/asm/vdso.h1
-rw-r--r--arch/x86/kernel/e820.c18
-rw-r--r--arch/x86/kernel/kprobes/core.c2
-rw-r--r--arch/x86/kernel/traps.c8
-rw-r--r--arch/x86/kvm/cpuid.c12
-rw-r--r--arch/x86/kvm/hyperv.c24
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h42
-rw-r--r--arch/x86/kvm/lapic.c38
-rw-r--r--arch/x86/kvm/mmu.c23
-rw-r--r--arch/x86/kvm/mtrr.c10
-rw-r--r--arch/x86/kvm/paging_tmpl.h38
-rw-r--r--arch/x86/kvm/svm.c128
-rw-r--r--arch/x86/kvm/vmx/capabilities.h2
-rw-r--r--arch/x86/kvm/vmx/nested.c348
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c8
-rw-r--r--arch/x86/kvm/vmx/vmx.c90
-rw-r--r--arch/x86/kvm/vmx/vmx.h11
-rw-r--r--arch/x86/kvm/x86.c199
-rw-r--r--arch/x86/kvm/x86.h10
-rw-r--r--arch/x86/mm/init_64.c144
-rw-r--r--arch/x86/mm/mem_encrypt.c10
-rw-r--r--arch/x86/mm/mm_internal.h3
-rw-r--r--arch/xtensa/include/asm/segment.h16
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c1
-rw-r--r--block/bio-integrity.c3
-rw-r--r--drivers/acpi/acpi_apd.c1
-rw-r--r--drivers/ata/sata_rcar.c1
-rw-r--r--drivers/block/brd.c6
-rw-r--r--drivers/block/rbd.c24
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c1
-rw-r--r--drivers/clk/axs10x/pll_clock.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c1
-rw-r--r--drivers/clk/bcm/clk-kona.c3
-rw-r--r--drivers/clk/berlin/berlin2-div.c1
-rw-r--r--drivers/clk/berlin/bg2.c1
-rw-r--r--drivers/clk/berlin/bg2q.c1
-rw-r--r--drivers/clk/clk-fixed-mmio.c3
-rw-r--r--drivers/clk/clk-fractional-divider.c1
-rw-r--r--drivers/clk/clk-hsdk-pll.c1
-rw-r--r--drivers/clk/clk-multiplier.c1
-rw-r--r--drivers/clk/davinci/pll-da850.c1
-rw-r--r--drivers/clk/h8300/clk-div.c1
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c3
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c1
-rw-r--r--drivers/clk/imx/clk-composite-8m.c3
-rw-r--r--drivers/clk/imx/clk-frac-pll.c1
-rw-r--r--drivers/clk/imx/clk-imx21.c1
-rw-r--r--drivers/clk/imx/clk-imx27.c1
-rw-r--r--drivers/clk/imx/clk-pfdv2.c1
-rw-r--r--drivers/clk/imx/clk-pllv4.c1
-rw-r--r--drivers/clk/imx/clk-sccg-pll.c1
-rw-r--r--drivers/clk/ingenic/cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c1
-rw-r--r--drivers/clk/loongson1/clk-loongson1c.c1
-rw-r--r--drivers/clk/microchip/clk-core.c1
-rw-r--r--drivers/clk/microchip/clk-pic32mzda.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c1
-rw-r--r--drivers/clk/mvebu/clk-corediv.c1
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c1
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c1
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c1
-rw-r--r--drivers/clk/pxa/clk-pxa.c1
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c1
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c1
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c1
-rw-r--r--drivers/clk/renesas/clk-rz.c1
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c1
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c1
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c1
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c1
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c3
-rw-r--r--drivers/clk/rockchip/clk-px30.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c1
-rw-r--r--drivers/clk/rockchip/clk-rv1108.c1
-rw-r--r--drivers/clk/rockchip/clk.c1
-rw-r--r--drivers/clk/samsung/clk-cpu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c1
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c1
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c1
-rw-r--r--drivers/clk/samsung/clk-pll.c3
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c1
-rw-r--r--drivers/clk/samsung/clk.c1
-rw-r--r--drivers/clk/sifive/fu540-prci.c1
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c1
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c1
-rw-r--r--drivers/clk/st/clkgen-mux.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mmc_timing.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-mod1.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c1
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c1
-rw-r--r--drivers/clk/sunxi/clk-mod0.c1
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-display.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-pll3.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-tcon-ch1.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-cpus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c1
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c1
-rw-r--r--drivers/clk/sunxi/clk-usb.c1
-rw-r--r--drivers/clk/tegra/clk-emc.c1
-rw-r--r--drivers/clk/tegra/clk-periph-fixed.c1
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c1
-rw-r--r--drivers/clk/tegra/clk.c1
-rw-r--r--drivers/clk/ti/adpll.c1
-rw-r--r--drivers/clk/ti/clk.c1
-rw-r--r--drivers/clk/ti/fapll.c1
-rw-r--r--drivers/clk/versatile/clk-sp810.c1
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c1
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c1
-rw-r--r--drivers/edac/Kconfig4
-rw-r--r--drivers/edac/edac_mc.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c1
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/lightnvm/core.c82
-rw-r--r--drivers/lightnvm/pblk-cache.c8
-rw-r--r--drivers/lightnvm/pblk-core.c65
-rw-r--r--drivers/lightnvm/pblk-gc.c52
-rw-r--r--drivers/lightnvm/pblk-init.c65
-rw-r--r--drivers/lightnvm/pblk-map.c1
-rw-r--r--drivers/lightnvm/pblk-rb.c13
-rw-r--r--drivers/lightnvm/pblk-read.c394
-rw-r--r--drivers/lightnvm/pblk-recovery.c74
-rw-r--r--drivers/lightnvm/pblk-write.c1
-rw-r--r--drivers/lightnvm/pblk.h28
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c1
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-crypt.c26
-rw-r--r--drivers/md/dm-delay.c3
-rw-r--r--drivers/md/dm-dust.c515
-rw-r--r--drivers/md/dm-exception-store.h3
-rw-r--r--drivers/md/dm-init.c8
-rw-r--r--drivers/md/dm-integrity.c717
-rw-r--r--drivers/md/dm-ioctl.c6
-rw-r--r--drivers/md/dm-mpath.c19
-rw-r--r--drivers/md/dm-rq.c8
-rw-r--r--drivers/md/dm-snap.c359
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/dm-thin-metadata.c139
-rw-r--r--drivers/md/dm-writecache.c29
-rw-r--r--drivers/md/dm-zoned-metadata.c5
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c2
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h19
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c46
-rw-r--r--drivers/media/platform/coda/coda-common.c10
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/omap/omap_vout.c15
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c68
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c4
-rw-r--r--drivers/memory/tegra/tegra124-emc.c1
-rw-r--r--drivers/mfd/intel-lpss.c1
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ieee802154/ca8210.c1
-rw-r--r--drivers/nvme/host/core.c79
-rw-r--r--drivers/nvme/host/fabrics.c4
-rw-r--r--drivers/nvme/host/fc.c14
-rw-r--r--drivers/nvme/host/lightnvm.c1
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c34
-rw-r--r--drivers/nvme/host/trace.h1
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/cio/qdio_main.c19
-rw-r--r--drivers/s390/cio/trace.c1
-rw-r--r--drivers/s390/cio/trace.h23
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c2
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c6
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2
-rw-r--r--drivers/staging/media/imx/imx-media.h3
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c2
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c14
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c3
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/intel/Kconfig1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c16
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c13
-rw-r--r--drivers/thermal/qcom/Kconfig1
-rw-r--r--drivers/thermal/thermal_core.c31
-rw-r--r--drivers/tty/hvc/hvc_riscv_sbi.c1
-rw-r--r--fs/afs/addr_list.c2
-rw-r--r--fs/afs/afs.h16
-rw-r--r--fs/afs/callback.c29
-rw-r--r--fs/afs/cell.c187
-rw-r--r--fs/afs/cmservice.c14
-rw-r--r--fs/afs/dir.c375
-rw-r--r--fs/afs/dir_silly.c35
-rw-r--r--fs/afs/dynroot.c5
-rw-r--r--fs/afs/file.c29
-rw-r--r--fs/afs/flock.c49
-rw-r--r--fs/afs/fs_probe.c4
-rw-r--r--fs/afs/fsclient.c702
-rw-r--r--fs/afs/inode.c453
-rw-r--r--fs/afs/internal.h199
-rw-r--r--fs/afs/proc.c8
-rw-r--r--fs/afs/rotate.c47
-rw-r--r--fs/afs/rxrpc.c20
-rw-r--r--fs/afs/security.c19
-rw-r--r--fs/afs/server.c17
-rw-r--r--fs/afs/super.c22
-rw-r--r--fs/afs/vl_list.c20
-rw-r--r--fs/afs/vl_probe.c4
-rw-r--r--fs/afs/vl_rotate.c28
-rw-r--r--fs/afs/vlclient.c38
-rw-r--r--fs/afs/write.c100
-rw-r--r--fs/afs/xattr.c202
-rw-r--r--fs/afs/yfsclient.c714
-rw-r--r--fs/ceph/caps.c93
-rw-r--r--fs/ceph/debugfs.c40
-rw-r--r--fs/ceph/export.c356
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/ceph/inode.c85
-rw-r--r--fs/ceph/locks.c13
-rw-r--r--fs/ceph/mds_client.c205
-rw-r--r--fs/ceph/mds_client.h33
-rw-r--r--fs/ceph/mdsmap.c2
-rw-r--r--fs/ceph/quota.c177
-rw-r--r--fs/ceph/super.c7
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/cifs/dns_resolve.c2
-rw-r--r--fs/configfs/dir.c17
-rw-r--r--fs/fsopen.c2
-rw-r--r--fs/io_uring.c88
-rw-r--r--fs/nfs/dns_resolve.c2
-rw-r--r--include/asm-generic/segment.h9
-rw-r--r--include/asm-generic/uaccess.h58
-rw-r--r--include/linux/ceph/ceph_fs.h6
-rw-r--r--include/linux/ceph/messenger.h3
-rw-r--r--include/linux/ceph/osdmap.h13
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/dns_resolver.h3
-rw-r--r--include/linux/kvm_host.h48
-rw-r--r--include/linux/lightnvm.h2
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/list_bl.h26
-rw-r--r--include/linux/nvme.h4
-rw-r--r--include/linux/overflow.h8
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/platform_data/xtalk-bridge.h22
-rw-r--r--include/linux/random.h2
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/thermal.h6
-rw-r--r--include/linux/wait_bit.h13
-rw-r--r--include/media/davinci/vpbe.h2
-rw-r--r--include/net/af_rxrpc.h3
-rw-r--r--include/sound/hdaudio.h1
-rw-r--r--include/uapi/asm-generic/unistd.h14
-rw-r--r--include/uapi/linux/kvm.h15
-rw-r--r--kernel/locking/rwsem-xadd.c46
-rw-r--r--kernel/signal.c1
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/hweight.c4
-rw-r--r--mm/slab.c226
-rw-r--r--net/ceph/cls_lock_client.c2
-rw-r--r--net/ceph/debugfs.c4
-rw-r--r--net/ceph/messenger.c123
-rw-r--r--net/ceph/mon_client.c6
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/dns_resolver/dns_query.c6
-rw-r--r--net/rxrpc/af_rxrpc.c28
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/conn_client.c8
-rw-r--r--net/rxrpc/sendmsg.c4
-rw-r--r--sound/hda/hdac_device.c7
-rw-r--r--sound/hda/hdac_sysfs.c3
-rw-r--r--sound/pci/hda/patch_realtek.c96
-rw-r--r--sound/soc/mxs/mxs-saif.c1
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h3
-rw-r--r--tools/objtool/Documentation/stack-validation.txt2
-rw-r--r--tools/objtool/check.c11
-rw-r--r--tools/testing/selftests/.gitignore1
-rw-r--r--tools/testing/selftests/Makefile39
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test.c15
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c3
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c8
-rw-r--r--tools/testing/selftests/capabilities/test_execve.c6
-rw-r--r--tools/testing/selftests/drivers/.gitignore1
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_wouldblock.c1
-rw-r--r--tools/testing/selftests/kselftest.h17
-rwxr-xr-xtools/testing/selftests/kselftest/prefix.pl23
-rw-r--r--tools/testing/selftests/kselftest/runner.sh86
-rw-r--r--tools/testing/selftests/kvm/.gitignore7
-rw-r--r--tools/testing/selftests/kvm/Makefile2
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c4
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h4
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c32
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c70
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c280
-rw-r--r--tools/testing/selftests/lib.mk76
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test.c1
-rw-r--r--tools/testing/selftests/pidfd/.gitignore1
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c1
-rw-r--r--tools/testing/selftests/rseq/Makefile8
-rw-r--r--tools/testing/selftests/rseq/rseq-arm.h132
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64.h74
-rw-r--r--tools/testing/selftests/rseq/rseq-mips.h115
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc.h90
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h69
-rw-r--r--tools/testing/selftests/rseq/rseq-x86.h264
-rw-r--r--tools/testing/selftests/rseq/rseq.c55
-rw-r--r--tools/testing/selftests/rseq/rseq.h1
-rw-r--r--tools/testing/selftests/sigaltstack/sas.c1
-rw-r--r--tools/testing/selftests/sync/sync_test.c1
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/arm/arm.c43
-rw-r--r--virt/kvm/kvm_main.c103
632 files changed, 14829 insertions, 6331 deletions
diff --git a/Documentation/arm64/perf.txt b/Documentation/arm64/perf.txt
new file mode 100644
index 000000000000..0d6a7d87d49e
--- /dev/null
+++ b/Documentation/arm64/perf.txt
@@ -0,0 +1,85 @@
+Perf Event Attributes
+=====================
+
+Author: Andrew Murray <andrew.murray@arm.com>
+Date: 2019-03-06
+
+exclude_user
+------------
+
+This attribute excludes userspace.
+
+Userspace always runs at EL0 and thus this attribute will exclude EL0.
+
+
+exclude_kernel
+--------------
+
+This attribute excludes the kernel.
+
+The kernel runs at EL2 with VHE and EL1 without. Guest kernels always run
+at EL1.
+
+For the host this attribute will exclude EL1 and additionally EL2 on a VHE
+system.
+
+For the guest this attribute will exclude EL1. Please note that EL2 is
+never counted within a guest.
+
+
+exclude_hv
+----------
+
+This attribute excludes the hypervisor.
+
+For a VHE host this attribute is ignored as we consider the host kernel to
+be the hypervisor.
+
+For a non-VHE host this attribute will exclude EL2 as we consider the
+hypervisor to be any code that runs at EL2 which is predominantly used for
+guest/host transitions.
+
+For the guest this attribute has no effect. Please note that EL2 is
+never counted within a guest.
+
+
+exclude_host / exclude_guest
+----------------------------
+
+These attributes exclude the KVM host and guest, respectively.
+
+The KVM host may run at EL0 (userspace), EL1 (non-VHE kernel) and EL2 (VHE
+kernel or non-VHE hypervisor).
+
+The KVM guest may run at EL0 (userspace) and EL1 (kernel).
+
+Due to the overlapping exception levels between host and guests we cannot
+exclusively rely on the PMU's hardware exception filtering - therefore we
+must enable/disable counting on the entry and exit to the guest. This is
+performed differently on VHE and non-VHE systems.
+
+For non-VHE systems we exclude EL2 for exclude_host - upon entering and
+exiting the guest we disable/enable the event as appropriate based on the
+exclude_host and exclude_guest attributes.
+
+For VHE systems we exclude EL1 for exclude_guest and exclude both EL0,EL2
+for exclude_host. Upon entering and exiting the guest we modify the event
+to include/exclude EL0 as appropriate based on the exclude_host and
+exclude_guest attributes.
+
+The statements above also apply when these attributes are used within a
+non-VHE guest however please note that EL2 is never counted within a guest.
+
+
+Accuracy
+--------
+
+On non-VHE hosts we enable/disable counters on the entry/exit of host/guest
+transition at EL2 - however there is a period of time between
+enabling/disabling the counters and entering/exiting the guest. We are
+able to eliminate counters counting host events on the boundaries of guest
+entry/exit when counting guest events by filtering out EL2 for
+exclude_host. However when using !exclude_hv there is a small blackout
+window at the guest entry/exit where host events are not captured.
+
+On VHE systems there are no blackout windows.
diff --git a/Documentation/arm64/pointer-authentication.txt b/Documentation/arm64/pointer-authentication.txt
index 5baca42ba146..fc71b33de87e 100644
--- a/Documentation/arm64/pointer-authentication.txt
+++ b/Documentation/arm64/pointer-authentication.txt
@@ -87,7 +87,21 @@ used to get and set the keys for a thread.
Virtualization
--------------
-Pointer authentication is not currently supported in KVM guests. KVM
-will mask the feature bits from ID_AA64ISAR1_EL1, and attempted use of
-the feature will result in an UNDEFINED exception being injected into
-the guest.
+Pointer authentication is enabled in KVM guest when each virtual cpu is
+initialised by passing flags KVM_ARM_VCPU_PTRAUTH_[ADDRESS/GENERIC] and
+requesting these two separate cpu features to be enabled. The current KVM
+guest implementation works by enabling both features together, so both
+these userspace flags are checked before enabling pointer authentication.
+The separate userspace flag will allow to have no userspace ABI changes
+if support is added in the future to allow these two features to be
+enabled independently of one another.
+
+As Arm Architecture specifies that Pointer Authentication feature is
+implemented along with the VHE feature so KVM arm64 ptrauth code relies
+on VHE mode to be present.
+
+Additionally, when these vcpu feature flags are not set then KVM will
+filter out the Pointer Authentication system key registers from
+KVM_GET/SET_REG_* ioctls and mask those features from cpufeature ID
+register. Any attempt to use the Pointer Authentication instructions will
+result in an UNDEFINED exception being injected into the guest.
diff --git a/Documentation/device-mapper/dm-dust.txt b/Documentation/device-mapper/dm-dust.txt
new file mode 100644
index 000000000000..954d402a1f6a
--- /dev/null
+++ b/Documentation/device-mapper/dm-dust.txt
@@ -0,0 +1,272 @@
+dm-dust
+=======
+
+This target emulates the behavior of bad sectors at arbitrary
+locations, and the ability to enable the emulation of the failures
+at an arbitrary time.
+
+This target behaves similarly to a linear target. At a given time,
+the user can send a message to the target to start failing read
+requests on specific blocks (to emulate the behavior of a hard disk
+drive with bad sectors).
+
+When the failure behavior is enabled (i.e.: when the output of
+"dmsetup status" displays "fail_read_on_bad_block"), reads of blocks
+in the "bad block list" will fail with EIO ("Input/output error").
+
+Writes of blocks in the "bad block list will result in the following:
+
+1. Remove the block from the "bad block list".
+2. Successfully complete the write.
+
+This emulates the "remapped sector" behavior of a drive with bad
+sectors.
+
+Normally, a drive that is encountering bad sectors will most likely
+encounter more bad sectors, at an unknown time or location.
+With dm-dust, the user can use the "addbadblock" and "removebadblock"
+messages to add arbitrary bad blocks at new locations, and the
+"enable" and "disable" messages to modulate the state of whether the
+configured "bad blocks" will be treated as bad, or bypassed.
+This allows the pre-writing of test data and metadata prior to
+simulating a "failure" event where bad sectors start to appear.
+
+Table parameters:
+-----------------
+<device_path> <offset> <blksz>
+
+Mandatory parameters:
+ <device_path>: path to the block device.
+ <offset>: offset to data area from start of device_path
+ <blksz>: block size in bytes
+ (minimum 512, maximum 1073741824, must be a power of 2)
+
+Usage instructions:
+-------------------
+
+First, find the size (in 512-byte sectors) of the device to be used:
+
+$ sudo blockdev --getsz /dev/vdb1
+33552384
+
+Create the dm-dust device:
+(For a device with a block size of 512 bytes)
+$ sudo dmsetup create dust1 --table '0 33552384 dust /dev/vdb1 0 512'
+
+(For a device with a block size of 4096 bytes)
+$ sudo dmsetup create dust1 --table '0 33552384 dust /dev/vdb1 0 4096'
+
+Check the status of the read behavior ("bypass" indicates that all I/O
+will be passed through to the underlying device):
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 bypass
+
+$ sudo dd if=/dev/mapper/dust1 of=/dev/null bs=512 count=128 iflag=direct
+128+0 records in
+128+0 records out
+
+$ sudo dd if=/dev/zero of=/dev/mapper/dust1 bs=512 count=128 oflag=direct
+128+0 records in
+128+0 records out
+
+Adding and removing bad blocks:
+-------------------------------
+
+At any time (i.e.: whether the device has the "bad block" emulation
+enabled or disabled), bad blocks may be added or removed from the
+device via the "addbadblock" and "removebadblock" messages:
+
+$ sudo dmsetup message dust1 0 addbadblock 60
+kernel: device-mapper: dust: badblock added at block 60
+
+$ sudo dmsetup message dust1 0 addbadblock 67
+kernel: device-mapper: dust: badblock added at block 67
+
+$ sudo dmsetup message dust1 0 addbadblock 72
+kernel: device-mapper: dust: badblock added at block 72
+
+These bad blocks will be stored in the "bad block list".
+While the device is in "bypass" mode, reads and writes will succeed:
+
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 bypass
+
+Enabling block read failures:
+-----------------------------
+
+To enable the "fail read on bad block" behavior, send the "enable" message:
+
+$ sudo dmsetup message dust1 0 enable
+kernel: device-mapper: dust: enabling read failures on bad sectors
+
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 fail_read_on_bad_block
+
+With the device in "fail read on bad block" mode, attempting to read a
+block will encounter an "Input/output error":
+
+$ sudo dd if=/dev/mapper/dust1 of=/dev/null bs=512 count=1 skip=67 iflag=direct
+dd: error reading '/dev/mapper/dust1': Input/output error
+0+0 records in
+0+0 records out
+0 bytes copied, 0.00040651 s, 0.0 kB/s
+
+...and writing to the bad blocks will remove the blocks from the list,
+therefore emulating the "remap" behavior of hard disk drives:
+
+$ sudo dd if=/dev/zero of=/dev/mapper/dust1 bs=512 count=128 oflag=direct
+128+0 records in
+128+0 records out
+
+kernel: device-mapper: dust: block 60 removed from badblocklist by write
+kernel: device-mapper: dust: block 67 removed from badblocklist by write
+kernel: device-mapper: dust: block 72 removed from badblocklist by write
+kernel: device-mapper: dust: block 87 removed from badblocklist by write
+
+Bad block add/remove error handling:
+------------------------------------
+
+Attempting to add a bad block that already exists in the list will
+result in an "Invalid argument" error, as well as a helpful message:
+
+$ sudo dmsetup message dust1 0 addbadblock 88
+device-mapper: message ioctl on dust1 failed: Invalid argument
+kernel: device-mapper: dust: block 88 already in badblocklist
+
+Attempting to remove a bad block that doesn't exist in the list will
+result in an "Invalid argument" error, as well as a helpful message:
+
+$ sudo dmsetup message dust1 0 removebadblock 87
+device-mapper: message ioctl on dust1 failed: Invalid argument
+kernel: device-mapper: dust: block 87 not found in badblocklist
+
+Counting the number of bad blocks in the bad block list:
+--------------------------------------------------------
+
+To count the number of bad blocks configured in the device, run the
+following message command:
+
+$ sudo dmsetup message dust1 0 countbadblocks
+
+A message will print with the number of bad blocks currently
+configured on the device:
+
+kernel: device-mapper: dust: countbadblocks: 895 badblock(s) found
+
+Querying for specific bad blocks:
+---------------------------------
+
+To find out if a specific block is in the bad block list, run the
+following message command:
+
+$ sudo dmsetup message dust1 0 queryblock 72
+
+The following message will print if the block is in the list:
+device-mapper: dust: queryblock: block 72 found in badblocklist
+
+The following message will print if the block is in the list:
+device-mapper: dust: queryblock: block 72 not found in badblocklist
+
+The "queryblock" message command will work in both the "enabled"
+and "disabled" modes, allowing the verification of whether a block
+will be treated as "bad" without having to issue I/O to the device,
+or having to "enable" the bad block emulation.
+
+Clearing the bad block list:
+----------------------------
+
+To clear the bad block list (without needing to individually run
+a "removebadblock" message command for every block), run the
+following message command:
+
+$ sudo dmsetup message dust1 0 clearbadblocks
+
+After clearing the bad block list, the following message will appear:
+
+kernel: device-mapper: dust: clearbadblocks: badblocks cleared
+
+If there were no bad blocks to clear, the following message will
+appear:
+
+kernel: device-mapper: dust: clearbadblocks: no badblocks found
+
+Message commands list:
+----------------------
+
+Below is a list of the messages that can be sent to a dust device:
+
+Operations on blocks (requires a <blknum> argument):
+
+addbadblock <blknum>
+queryblock <blknum>
+removebadblock <blknum>
+
+...where <blknum> is a block number within range of the device
+ (corresponding to the block size of the device.)
+
+Single argument message commands:
+
+countbadblocks
+clearbadblocks
+disable
+enable
+quiet
+
+Device removal:
+---------------
+
+When finished, remove the device via the "dmsetup remove" command:
+
+$ sudo dmsetup remove dust1
+
+Quiet mode:
+-----------
+
+On test runs with many bad blocks, it may be desirable to avoid
+excessive logging (from bad blocks added, removed, or "remapped").
+This can be done by enabling "quiet mode" via the following message:
+
+$ sudo dmsetup message dust1 0 quiet
+
+This will suppress log messages from add / remove / removed by write
+operations. Log messages from "countbadblocks" or "queryblock"
+message commands will still print in quiet mode.
+
+The status of quiet mode can be seen by running "dmsetup status":
+
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 fail_read_on_bad_block quiet
+
+To disable quiet mode, send the "quiet" message again:
+
+$ sudo dmsetup message dust1 0 quiet
+
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 fail_read_on_bad_block verbose
+
+(The presence of "verbose" indicates normal logging.)
+
+"Why not...?"
+-------------
+
+scsi_debug has a "medium error" mode that can fail reads on one
+specified sector (sector 0x1234, hardcoded in the source code), but
+it uses RAM for the persistent storage, which drastically decreases
+the potential device size.
+
+dm-flakey fails all I/O from all block locations at a specified time
+frequency, and not a given point in time.
+
+When a bad sector occurs on a hard disk drive, reads to that sector
+are failed by the device, usually resulting in an error code of EIO
+("I/O error") or ENODATA ("No data available"). However, a write to
+the sector may succeed, and result in the sector becoming readable
+after the device controller no longer experiences errors reading the
+sector (or after a reallocation of the sector). However, there may
+be bad sectors that occur on the device in the future, in a different,
+unpredictable location.
+
+This target seeks to provide a device that can exhibit the behavior
+of a bad sector at a known sector location, at a known time, based
+on a large storage device (at least tens of gigabytes, not occupying
+system memory).
diff --git a/Documentation/device-mapper/dm-integrity.txt b/Documentation/device-mapper/dm-integrity.txt
index 297251b0d2d5..d63d78ffeb73 100644
--- a/Documentation/device-mapper/dm-integrity.txt
+++ b/Documentation/device-mapper/dm-integrity.txt
@@ -21,6 +21,13 @@ mode it calculates and verifies the integrity tag internally. In this
mode, the dm-integrity target can be used to detect silent data
corruption on the disk or in the I/O path.
+There's an alternate mode of operation where dm-integrity uses bitmap
+instead of a journal. If a bit in the bitmap is 1, the corresponding
+region's data and integrity tags are not synchronized - if the machine
+crashes, the unsynchronized regions will be recalculated. The bitmap mode
+is faster than the journal mode, because we don't have to write the data
+twice, but it is also less reliable, because if data corruption happens
+when the machine crashes, it may not be detected.
When loading the target for the first time, the kernel driver will format
the device. But it will only format the device if the superblock contains
@@ -59,6 +66,10 @@ Target arguments:
either both data and tag or none of them are written. The
journaled mode degrades write throughput twice because the
data have to be written twice.
+ B - bitmap mode - data and metadata are written without any
+ synchronization, the driver maintains a bitmap of dirty
+ regions where data and metadata don't match. This mode can
+ only be used with internal hash.
R - recovery mode - in this mode, journal is not replayed,
checksums are not checked and writes to the device are not
allowed. This mode is useful for data recovery if the
@@ -79,6 +90,10 @@ interleave_sectors:number
a power of two. If the device is already formatted, the value from
the superblock is used.
+meta_device:device
+ Don't interleave the data and metadata on on device. Use a
+ separate device for metadata.
+
buffer_sectors:number
The number of sectors in one buffer. The value is rounded down to
a power of two.
@@ -146,6 +161,15 @@ block_size:number
Supported values are 512, 1024, 2048 and 4096 bytes. If not
specified the default block size is 512 bytes.
+sectors_per_bit:number
+ In the bitmap mode, this parameter specifies the number of
+ 512-byte sectors that corresponds to one bitmap bit.
+
+bitmap_flush_interval:number
+ The bitmap flush interval in milliseconds. The metadata buffers
+ are synchronized when this interval expires.
+
+
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
be changed when reloading the target (load an inactive table and swap the
tables with suspend and resume). The other arguments should not be changed
@@ -167,7 +191,13 @@ The layout of the formatted block device:
provides (i.e. the size of the device minus the size of all
metadata and padding). The user of this target should not send
bios that access data beyond the "provided data sectors" limit.
- * flags - a flag is set if journal_mac is used
+ * flags
+ SB_FLAG_HAVE_JOURNAL_MAC - a flag is set if journal_mac is used
+ SB_FLAG_RECALCULATING - recalculating is in progress
+ SB_FLAG_DIRTY_BITMAP - journal area contains the bitmap of dirty
+ blocks
+ * log2(sectors per block)
+ * a position where recalculating finished
* journal
The journal is divided into sections, each section contains:
* metadata area (4kiB), it contains journal entries
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt
new file mode 100644
index 000000000000..73d8f19c3bd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt
@@ -0,0 +1,51 @@
+SiFive L2 Cache Controller
+--------------------------
+The SiFive Level 2 Cache Controller is used to provide access to fast copies
+of memory for masters in a Core Complex. The Level 2 Cache Controller also
+acts as directory-based coherency manager.
+All the properties in ePAPR/DeviceTree specification applies for this platform
+
+Required Properties:
+--------------------
+- compatible: Should be "sifive,fu540-c000-ccache" and "cache"
+
+- cache-block-size: Specifies the block size in bytes of the cache.
+ Should be 64
+
+- cache-level: Should be set to 2 for a level 2 cache
+
+- cache-sets: Specifies the number of associativity sets of the cache.
+ Should be 1024
+
+- cache-size: Specifies the size in bytes of the cache. Should be 2097152
+
+- cache-unified: Specifies the cache is a unified cache
+
+- interrupts: Must contain 3 entries (DirError, DataError and DataFail signals)
+
+- reg: Physical base address and size of L2 cache controller registers map
+
+Optional Properties:
+--------------------
+- next-level-cache: phandle to the next level cache if present.
+
+- memory-region: reference to the reserved-memory for the L2 Loosely Integrated
+ Memory region. The reserved memory node should be defined as per the bindings
+ in reserved-memory.txt
+
+
+Example:
+
+ cache-controller@2010000 {
+ compatible = "sifive,fu540-c000-ccache", "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-sets = <1024>;
+ cache-size = <2097152>;
+ cache-unified;
+ interrupt-parent = <&plic0>;
+ interrupts = <1 2 3>;
+ reg = <0x0 0x2010000 0x0 0x1000>;
+ next-level-cache = <&L25 &L40 &L36>;
+ memory-region = <&l2_lim>;
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
deleted file mode 100644
index e9034a6c003a..000000000000
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ /dev/null
@@ -1,476 +0,0 @@
-Device tree binding vendor prefix registry. Keep list in alphabetical order.
-
-This isn't an exhaustive list, but you should add new prefixes to it before
-using them to avoid name-space collisions.
-
-abilis Abilis Systems
-abracon Abracon Corporation
-actions Actions Semiconductor Co., Ltd.
-active-semi Active-Semi International Inc
-ad Avionic Design GmbH
-adafruit Adafruit Industries, LLC
-adapteva Adapteva, Inc.
-adaptrum Adaptrum, Inc.
-adh AD Holdings Plc.
-adi Analog Devices, Inc.
-advantech Advantech Corporation
-aeroflexgaisler Aeroflex Gaisler AB
-al Annapurna Labs
-allo Allo.com
-allwinner Allwinner Technology Co., Ltd.
-alphascale AlphaScale Integrated Circuits Systems, Inc.
-altr Altera Corp.
-amarula Amarula Solutions
-amazon Amazon.com, Inc.
-amcc Applied Micro Circuits Corporation (APM, formally AMCC)
-amd Advanced Micro Devices (AMD), Inc.
-amediatech Shenzhen Amediatech Technology Co., Ltd
-amlogic Amlogic, Inc.
-ampire Ampire Co., Ltd.
-ams AMS AG
-amstaos AMS-Taos Inc.
-analogix Analogix Semiconductor, Inc.
-andestech Andes Technology Corporation
-apm Applied Micro Circuits Corporation (APM)
-aptina Aptina Imaging
-arasan Arasan Chip Systems
-archermind ArcherMind Technology (Nanjing) Co., Ltd.
-arctic Arctic Sand
-arcx arcx Inc. / Archronix Inc.
-aries Aries Embedded GmbH
-arm ARM Ltd.
-armadeus ARMadeus Systems SARL
-arrow Arrow Electronics
-artesyn Artesyn Embedded Technologies Inc.
-asahi-kasei Asahi Kasei Corp.
-aspeed ASPEED Technology Inc.
-asus AsusTek Computer Inc.
-atlas Atlas Scientific LLC
-atmel Atmel Corporation
-auo AU Optronics Corporation
-auvidea Auvidea GmbH
-avago Avago Technologies
-avia avia semiconductor
-avic Shanghai AVIC Optoelectronics Co., Ltd.
-avnet Avnet, Inc.
-axentia Axentia Technologies AB
-axis Axis Communications AB
-azoteq Azoteq (Pty) Ltd
-azw Shenzhen AZW Technology Co., Ltd.
-bananapi BIPAI KEJI LIMITED
-bhf Beckhoff Automation GmbH & Co. KG
-bitmain Bitmain Technologies
-boe BOE Technology Group Co., Ltd.
-bosch Bosch Sensortec GmbH
-boundary Boundary Devices Inc.
-brcm Broadcom Corporation
-buffalo Buffalo, Inc.
-bticino Bticino International
-calxeda Calxeda
-capella Capella Microsystems, Inc
-cascoda Cascoda, Ltd.
-catalyst Catalyst Semiconductor, Inc.
-cavium Cavium, Inc.
-cdns Cadence Design Systems Inc.
-cdtech CDTech(H.K.) Electronics Limited
-ceva Ceva, Inc.
-chipidea Chipidea, Inc
-chipone ChipOne
-chipspark ChipSPARK
-chrp Common Hardware Reference Platform
-chunghwa Chunghwa Picture Tubes Ltd.
-ciaa Computadora Industrial Abierta Argentina
-cirrus Cirrus Logic, Inc.
-cloudengines Cloud Engines, Inc.
-cnm Chips&Media, Inc.
-cnxt Conexant Systems, Inc.
-compulab CompuLab Ltd.
-cortina Cortina Systems, Inc.
-cosmic Cosmic Circuits
-crane Crane Connectivity Solutions
-creative Creative Technology Ltd
-crystalfontz Crystalfontz America, Inc.
-csky Hangzhou C-SKY Microsystems Co., Ltd
-cubietech Cubietech, Ltd.
-cypress Cypress Semiconductor Corporation
-cznic CZ.NIC, z.s.p.o.
-dallas Maxim Integrated Products (formerly Dallas Semiconductor)
-dataimage DataImage, Inc.
-davicom DAVICOM Semiconductor, Inc.
-delta Delta Electronics, Inc.
-denx Denx Software Engineering
-devantech Devantech, Ltd.
-dh DH electronics GmbH
-digi Digi International Inc.
-digilent Diglent, Inc.
-dioo Dioo Microcircuit Co., Ltd
-dlc DLC Display Co., Ltd.
-dlg Dialog Semiconductor
-dlink D-Link Corporation
-dmo Data Modul AG
-domintech Domintech Co., Ltd.
-dongwoon Dongwoon Anatech
-dptechnics DPTechnics
-dragino Dragino Technology Co., Limited
-ea Embedded Artists AB
-ebs-systart EBS-SYSTART GmbH
-ebv EBV Elektronik
-eckelmann Eckelmann AG
-edt Emerging Display Technologies
-eeti eGalax_eMPIA Technology Inc
-elan Elan Microelectronic Corp.
-elgin Elgin S/A.
-embest Shenzhen Embest Technology Co., Ltd.
-emlid Emlid, Ltd.
-emmicro EM Microelectronic
-emtrion emtrion GmbH
-endless Endless Mobile, Inc.
-energymicro Silicon Laboratories (formerly Energy Micro AS)
-engicam Engicam S.r.l.
-epcos EPCOS AG
-epfl Ecole Polytechnique Fédérale de Lausanne
-epson Seiko Epson Corp.
-est ESTeem Wireless Modems
-ettus NI Ettus Research
-eukrea Eukréa Electromatique
-everest Everest Semiconductor Co. Ltd.
-everspin Everspin Technologies, Inc.
-exar Exar Corporation
-excito Excito
-ezchip EZchip Semiconductor
-facebook Facebook
-fairphone Fairphone B.V.
-faraday Faraday Technology Corporation
-fastrax Fastrax Oy
-fcs Fairchild Semiconductor
-feiyang Shenzhen Fly Young Technology Co.,LTD.
-firefly Firefly
-focaltech FocalTech Systems Co.,Ltd
-friendlyarm Guangzhou FriendlyARM Computer Tech Co., Ltd
-fsl Freescale Semiconductor
-fujitsu Fujitsu Ltd.
-gateworks Gateworks Corporation
-gcw Game Consoles Worldwide
-ge General Electric Company
-geekbuying GeekBuying
-gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
-GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
-geniatech Geniatech, Inc.
-giantec Giantec Semiconductor, Inc.
-giantplus Giantplus Technology Co., Ltd.
-globalscale Globalscale Technologies, Inc.
-globaltop GlobalTop Technology, Inc.
-gmt Global Mixed-mode Technology, Inc.
-goodix Shenzhen Huiding Technology Co., Ltd.
-google Google, Inc.
-grinn Grinn
-grmn Garmin Limited
-gumstix Gumstix, Inc.
-gw Gateworks Corporation
-hannstar HannStar Display Corporation
-haoyu Haoyu Microelectronic Co. Ltd.
-hardkernel Hardkernel Co., Ltd
-hideep HiDeep Inc.
-himax Himax Technologies, Inc.
-hisilicon Hisilicon Limited.
-hit Hitachi Ltd.
-hitex Hitex Development Tools
-holt Holt Integrated Circuits, Inc.
-honeywell Honeywell
-hp Hewlett Packard
-holtek Holtek Semiconductor, Inc.
-hwacom HwaCom Systems Inc.
-i2se I2SE GmbH
-ibm International Business Machines (IBM)
-icplus IC Plus Corp.
-idt Integrated Device Technologies, Inc.
-ifi Ingenieurburo Fur Ic-Technologie (I/F/I)
-ilitek ILI Technology Corporation (ILITEK)
-img Imagination Technologies Ltd.
-infineon Infineon Technologies
-inforce Inforce Computing
-ingenic Ingenic Semiconductor
-innolux Innolux Corporation
-inside-secure INSIDE Secure
-intel Intel Corporation
-intercontrol Inter Control Group
-invensense InvenSense Inc.
-inversepath Inverse Path
-iom Iomega Corporation
-isee ISEE 2007 S.L.
-isil Intersil
-issi Integrated Silicon Solutions Inc.
-itead ITEAD Intelligent Systems Co.Ltd
-iwave iWave Systems Technologies Pvt. Ltd.
-jdi Japan Display Inc.
-jedec JEDEC Solid State Technology Association
-jianda Jiandangjing Technology Co., Ltd.
-karo Ka-Ro electronics GmbH
-keithkoep Keith & Koep GmbH
-keymile Keymile GmbH
-khadas Khadas
-kiebackpeter Kieback & Peter GmbH
-kinetic Kinetic Technologies
-kingdisplay King & Display Technology Co., Ltd.
-kingnovel Kingnovel Technology Co., Ltd.
-kionix Kionix, Inc.
-kobo Rakuten Kobo Inc.
-koe Kaohsiung Opto-Electronics Inc.
-kosagi Sutajio Ko-Usagi PTE Ltd.
-kyo Kyocera Corporation
-lacie LaCie
-laird Laird PLC
-lantiq Lantiq Semiconductor
-lattice Lattice Semiconductor
-lego LEGO Systems A/S
-lemaker Shenzhen LeMaker Technology Co., Ltd.
-lenovo Lenovo Group Ltd.
-lg LG Corporation
-libretech Shenzhen Libre Technology Co., Ltd
-licheepi Lichee Pi
-linaro Linaro Limited
-linksys Belkin International, Inc. (Linksys)
-linux Linux-specific binding
-linx Linx Technologies
-lltc Linear Technology Corporation
-logicpd Logic PD, Inc.
-lsi LSI Corp. (LSI Logic)
-lwn Liebherr-Werk Nenzing GmbH
-macnica Macnica Americas
-marvell Marvell Technology Group Ltd.
-maxbotix MaxBotix Inc.
-maxim Maxim Integrated Products
-mbvl Mobiveil Inc.
-mcube mCube
-meas Measurement Specialties
-mediatek MediaTek Inc.
-megachips MegaChips
-mele Shenzhen MeLE Digital Technology Ltd.
-melexis Melexis N.V.
-melfas MELFAS Inc.
-mellanox Mellanox Technologies
-memsic MEMSIC Inc.
-menlo Menlo Systems GmbH
-merrii Merrii Technology Co., Ltd.
-micrel Micrel Inc.
-microchip Microchip Technology Inc.
-microcrystal Micro Crystal AG
-micron Micron Technology Inc.
-mikroe MikroElektronika d.o.o.
-minix MINIX Technology Ltd.
-miramems MiraMEMS Sensing Technology Co., Ltd.
-mitsubishi Mitsubishi Electric Corporation
-mosaixtech Mosaix Technologies, Inc.
-motorola Motorola, Inc.
-moxa Moxa Inc.
-mpl MPL AG
-mqmaker mqmaker Inc.
-mscc Microsemi Corporation
-msi Micro-Star International Co. Ltd.
-mti Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
-multi-inno Multi-Inno Technology Co.,Ltd
-mundoreader Mundo Reader S.L.
-murata Murata Manufacturing Co., Ltd.
-mxicy Macronix International Co., Ltd.
-myir MYIR Tech Limited
-national National Semiconductor
-nec NEC LCD Technologies, Ltd.
-neonode Neonode Inc.
-netgear NETGEAR
-netlogic Broadcom Corporation (formerly NetLogic Microsystems)
-netron-dy Netron DY
-netxeon Shenzhen Netxeon Technology CO., LTD
-nexbox Nexbox
-nextthing Next Thing Co.
-newhaven Newhaven Display International
-ni National Instruments
-nintendo Nintendo
-nlt NLT Technologies, Ltd.
-nokia Nokia
-nordic Nordic Semiconductor
-novtech NovTech, Inc.
-nutsboard NutsBoard
-nuvoton Nuvoton Technology Corporation
-nvd New Vision Display
-nvidia NVIDIA
-nxp NXP Semiconductors
-oceanic Oceanic Systems (UK) Ltd.
-okaya Okaya Electric America, Inc.
-oki Oki Electric Industry Co., Ltd.
-olimex OLIMEX Ltd.
-olpc One Laptop Per Child
-onion Onion Corporation
-onnn ON Semiconductor Corp.
-ontat On Tat Industrial Company
-opalkelly Opal Kelly Incorporated
-opencores OpenCores.org
-openrisc OpenRISC.io
-option Option NV
-oranth Shenzhen Oranth Technology Co., Ltd.
-ORCL Oracle Corporation
-orisetech Orise Technology
-ortustech Ortus Technology Co., Ltd.
-osddisplays OSD Displays
-ovti OmniVision Technologies
-oxsemi Oxford Semiconductor, Ltd.
-panasonic Panasonic Corporation
-parade Parade Technologies Inc.
-pda Precision Design Associates, Inc.
-pericom Pericom Technology Inc.
-pervasive Pervasive Displays, Inc.
-phicomm PHICOMM Co., Ltd.
-phytec PHYTEC Messtechnik GmbH
-picochip Picochip Ltd
-pine64 Pine64
-pixcir PIXCIR MICROELECTRONICS Co., Ltd
-plantower Plantower Co., Ltd
-plathome Plat'Home Co., Ltd.
-plda PLDA
-plx Broadcom Corporation (formerly PLX Technology)
-pni PNI Sensor Corporation
-portwell Portwell Inc.
-poslab Poslab Technology Co., Ltd.
-powervr PowerVR (deprecated, use img)
-probox2 PROBOX2 (by W2COMP Co., Ltd.)
-pulsedlight PulsedLight, Inc
-qca Qualcomm Atheros, Inc.
-qcom Qualcomm Technologies, Inc
-qemu QEMU, a generic and open source machine emulator and virtualizer
-qi Qi Hardware
-qiaodian QiaoDian XianShi Corporation
-qnap QNAP Systems, Inc.
-radxa Radxa
-raidsonic RaidSonic Technology GmbH
-ralink Mediatek/Ralink Technology Corp.
-ramtron Ramtron International
-raspberrypi Raspberry Pi Foundation
-raydium Raydium Semiconductor Corp.
-rda Unisoc Communications, Inc.
-realtek Realtek Semiconductor Corp.
-renesas Renesas Electronics Corporation
-richtek Richtek Technology Corporation
-ricoh Ricoh Co. Ltd.
-rikomagic Rikomagic Tech Corp. Ltd
-riscv RISC-V Foundation
-rockchip Fuzhou Rockchip Electronics Co., Ltd
-rocktech ROCKTECH DISPLAYS LIMITED
-rohm ROHM Semiconductor Co., Ltd
-ronbo Ronbo Electronics
-roofull Shenzhen Roofull Technology Co, Ltd
-samsung Samsung Semiconductor
-samtec Samtec/Softing company
-sancloud Sancloud Ltd
-sandisk Sandisk Corporation
-sbs Smart Battery System
-schindler Schindler
-seagate Seagate Technology PLC
-seirobotics Shenzhen SEI Robotics Co., Ltd
-semtech Semtech Corporation
-sensirion Sensirion AG
-sff Small Form Factor Committee
-sgd Solomon Goldentek Display Corporation
-sgx SGX Sensortech
-sharp Sharp Corporation
-shimafuji Shimafuji Electric, Inc.
-si-en Si-En Technology Ltd.
-si-linux Silicon Linux Corporation
-sifive SiFive, Inc.
-sigma Sigma Designs, Inc.
-sii Seiko Instruments, Inc.
-sil Silicon Image
-silabs Silicon Laboratories
-silead Silead Inc.
-silergy Silergy Corp.
-siliconmitus Silicon Mitus, Inc.
-simtek
-sirf SiRF Technology, Inc.
-sis Silicon Integrated Systems Corp.
-sitronix Sitronix Technology Corporation
-skyworks Skyworks Solutions, Inc.
-smsc Standard Microsystems Corporation
-snps Synopsys, Inc.
-socionext Socionext Inc.
-solidrun SolidRun
-solomon Solomon Systech Limited
-sony Sony Corporation
-spansion Spansion Inc.
-sprd Spreadtrum Communications Inc.
-sst Silicon Storage Technology, Inc.
-st STMicroelectronics
-starry Starry Electronic Technology (ShenZhen) Co., LTD
-startek Startek
-ste ST-Ericsson
-stericsson ST-Ericsson
-summit Summit microelectronics
-sunchip Shenzhen Sunchip Technology Co., Ltd
-SUNW Sun Microsystems, Inc
-swir Sierra Wireless
-syna Synaptics Inc.
-synology Synology, Inc.
-tbs TBS Technologies
-tbs-biometrics Touchless Biometric Systems AG
-tcg Trusted Computing Group
-tcl Toby Churchill Ltd.
-technexion TechNexion
-technologic Technologic Systems
-tempo Tempo Semiconductor
-techstar Shenzhen Techstar Electronics Co., Ltd.
-terasic Terasic Inc.
-thine THine Electronics, Inc.
-ti Texas Instruments
-tianma Tianma Micro-electronics Co., Ltd.
-tlm Trusted Logic Mobility
-tmt Tecon Microprocessor Technologies, LLC.
-topeet Topeet
-toradex Toradex AG
-toshiba Toshiba Corporation
-toumaz Toumaz
-tpk TPK U.S.A. LLC
-tplink TP-LINK Technologies Co., Ltd.
-tpo TPO
-tq TQ Systems GmbH
-tronfy Tronfy
-tronsmart Tronsmart
-truly Truly Semiconductors Limited
-tsd Theobroma Systems Design und Consulting GmbH
-tyan Tyan Computer Corporation
-u-blox u-blox
-ucrobotics uCRobotics
-ubnt Ubiquiti Networks
-udoo Udoo
-uniwest United Western Technologies Corp (UniWest)
-upisemi uPI Semiconductor Corp.
-urt United Radiant Technology Corporation
-usi Universal Scientific Industrial Co., Ltd.
-v3 V3 Semiconductor
-vamrs Vamrs Ltd.
-variscite Variscite Ltd.
-via VIA Technologies, Inc.
-virtio Virtual I/O Device Specification, developed by the OASIS consortium
-vishay Vishay Intertechnology, Inc
-vitesse Vitesse Semiconductor Corporation
-vivante Vivante Corporation
-vocore VoCore Studio
-voipac Voipac Technologies s.r.o.
-vot Vision Optical Technology Co., Ltd.
-wd Western Digital Corp.
-wetek WeTek Electronics, limited.
-wexler Wexler
-whwave Shenzhen whwave Electronics, Inc.
-wi2wi Wi2Wi, Inc.
-winbond Winbond Electronics corp.
-winstar Winstar Display Corp.
-wlf Wolfson Microelectronics
-wm Wondermedia Technologies, Inc.
-x-powers X-Powers
-xes Extreme Engineering Solutions (X-ES)
-xillybus Xillybus Ltd.
-xlnx Xilinx
-xunlong Shenzhen Xunlong Software CO.,Limited
-ysoft Y Soft Corporation a.s.
-zarlink Zarlink Semiconductor
-zeitec ZEITEC Semiconductor Co., LTD.
-zidoo Shenzhen Zidoo Technology Co., Ltd.
-zii Zodiac Inflight Innovations
-zte ZTE Corp.
-zyxel ZyXEL Communications Corp.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
new file mode 100644
index 000000000000..33a65a45e319
--- /dev/null
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -0,0 +1,977 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/vendor-prefixes.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Devicetree Vendor Prefix Registry
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+select: true
+
+properties: {}
+
+patternProperties:
+ # Prefixes which are not vendors, but followed the pattern
+ # DO NOT ADD NEW PROPERTIES TO THIS LIST
+ "^(at25|devbus|dmacap|dsa|exynos|gpio-fan|gpio|gpmc|hdmi|i2c-gpio),.*": true
+ "^(keypad|m25p|max8952|max8997|max8998|mpmc),.*": true
+ "^(pinctrl-single|#pinctrl-single|PowerPC),.*": true
+ "^(pl022|pxa-mmc|rcar_sound|rotary-encoder|s5m8767|sdhci),.*": true
+ "^(simple-audio-card|simple-graph-card|st-plgpio|st-spics|ts),.*": true
+
+ # Keep list in alphabetical order.
+ "^abilis,.*":
+ description: Abilis Systems
+ "^abracon,.*":
+ description: Abracon Corporation
+ "^actions,.*":
+ description: Actions Semiconductor Co., Ltd.
+ "^active-semi,.*":
+ description: Active-Semi International Inc
+ "^ad,.*":
+ description: Avionic Design GmbH
+ "^adafruit,.*":
+ description: Adafruit Industries, LLC
+ "^adapteva,.*":
+ description: Adapteva, Inc.
+ "^adaptrum,.*":
+ description: Adaptrum, Inc.
+ "^adh,.*":
+ description: AD Holdings Plc.
+ "^adi,.*":
+ description: Analog Devices, Inc.
+ "^advantech,.*":
+ description: Advantech Corporation
+ "^aeroflexgaisler,.*":
+ description: Aeroflex Gaisler AB
+ "^al,.*":
+ description: Annapurna Labs
+ "^allo,.*":
+ description: Allo.com
+ "^allwinner,.*":
+ description: Allwinner Technology Co., Ltd.
+ "^alphascale,.*":
+ description: AlphaScale Integrated Circuits Systems, Inc.
+ "^altr,.*":
+ description: Altera Corp.
+ "^amarula,.*":
+ description: Amarula Solutions
+ "^amazon,.*":
+ description: Amazon.com, Inc.
+ "^amcc,.*":
+ description: Applied Micro Circuits Corporation (APM, formally AMCC)
+ "^amd,.*":
+ description: Advanced Micro Devices (AMD), Inc.
+ "^amediatech,.*":
+ description: Shenzhen Amediatech Technology Co., Ltd
+ "^amlogic,.*":
+ description: Amlogic, Inc.
+ "^ampire,.*":
+ description: Ampire Co., Ltd.
+ "^ams,.*":
+ description: AMS AG
+ "^amstaos,.*":
+ description: AMS-Taos Inc.
+ "^analogix,.*":
+ description: Analogix Semiconductor, Inc.
+ "^andestech,.*":
+ description: Andes Technology Corporation
+ "^apm,.*":
+ description: Applied Micro Circuits Corporation (APM)
+ "^aptina,.*":
+ description: Aptina Imaging
+ "^arasan,.*":
+ description: Arasan Chip Systems
+ "^archermind,.*":
+ description: ArcherMind Technology (Nanjing) Co., Ltd.
+ "^arctic,.*":
+ description: Arctic Sand
+ "^arcx,.*":
+ description: arcx Inc. / Archronix Inc.
+ "^aries,.*":
+ description: Aries Embedded GmbH
+ "^arm,.*":
+ description: ARM Ltd.
+ "^armadeus,.*":
+ description: ARMadeus Systems SARL
+ "^arrow,.*":
+ description: Arrow Electronics
+ "^artesyn,.*":
+ description: Artesyn Embedded Technologies Inc.
+ "^asahi-kasei,.*":
+ description: Asahi Kasei Corp.
+ "^aspeed,.*":
+ description: ASPEED Technology Inc.
+ "^asus,.*":
+ description: AsusTek Computer Inc.
+ "^atlas,.*":
+ description: Atlas Scientific LLC
+ "^atmel,.*":
+ description: Atmel Corporation
+ "^auo,.*":
+ description: AU Optronics Corporation
+ "^auvidea,.*":
+ description: Auvidea GmbH
+ "^avago,.*":
+ description: Avago Technologies
+ "^avia,.*":
+ description: avia semiconductor
+ "^avic,.*":
+ description: Shanghai AVIC Optoelectronics Co., Ltd.
+ "^avnet,.*":
+ description: Avnet, Inc.
+ "^axentia,.*":
+ description: Axentia Technologies AB
+ "^axis,.*":
+ description: Axis Communications AB
+ "^azoteq,.*":
+ description: Azoteq (Pty) Ltd
+ "^azw,.*":
+ description: Shenzhen AZW Technology Co., Ltd.
+ "^bananapi,.*":
+ description: BIPAI KEJI LIMITED
+ "^bhf,.*":
+ description: Beckhoff Automation GmbH & Co. KG
+ "^bitmain,.*":
+ description: Bitmain Technologies
+ "^boe,.*":
+ description: BOE Technology Group Co., Ltd.
+ "^bosch,.*":
+ description: Bosch Sensortec GmbH
+ "^boundary,.*":
+ description: Boundary Devices Inc.
+ "^brcm,.*":
+ description: Broadcom Corporation
+ "^buffalo,.*":
+ description: Buffalo, Inc.
+ "^bticino,.*":
+ description: Bticino International
+ "^calxeda,.*":
+ description: Calxeda
+ "^capella,.*":
+ description: Capella Microsystems, Inc
+ "^cascoda,.*":
+ description: Cascoda, Ltd.
+ "^catalyst,.*":
+ description: Catalyst Semiconductor, Inc.
+ "^cavium,.*":
+ description: Cavium, Inc.
+ "^cdns,.*":
+ description: Cadence Design Systems Inc.
+ "^cdtech,.*":
+ description: CDTech(H.K.) Electronics Limited
+ "^ceva,.*":
+ description: Ceva, Inc.
+ "^chipidea,.*":
+ description: Chipidea, Inc
+ "^chipone,.*":
+ description: ChipOne
+ "^chipspark,.*":
+ description: ChipSPARK
+ "^chrp,.*":
+ description: Common Hardware Reference Platform
+ "^chunghwa,.*":
+ description: Chunghwa Picture Tubes Ltd.
+ "^ciaa,.*":
+ description: Computadora Industrial Abierta Argentina
+ "^cirrus,.*":
+ description: Cirrus Logic, Inc.
+ "^cloudengines,.*":
+ description: Cloud Engines, Inc.
+ "^cnm,.*":
+ description: Chips&Media, Inc.
+ "^cnxt,.*":
+ description: Conexant Systems, Inc.
+ "^compulab,.*":
+ description: CompuLab Ltd.
+ "^cortina,.*":
+ description: Cortina Systems, Inc.
+ "^cosmic,.*":
+ description: Cosmic Circuits
+ "^crane,.*":
+ description: Crane Connectivity Solutions
+ "^creative,.*":
+ description: Creative Technology Ltd
+ "^crystalfontz,.*":
+ description: Crystalfontz America, Inc.
+ "^csky,.*":
+ description: Hangzhou C-SKY Microsystems Co., Ltd
+ "^cubietech,.*":
+ description: Cubietech, Ltd.
+ "^cypress,.*":
+ description: Cypress Semiconductor Corporation
+ "^cznic,.*":
+ description: CZ.NIC, z.s.p.o.
+ "^dallas,.*":
+ description: Maxim Integrated Products (formerly Dallas Semiconductor)
+ "^dataimage,.*":
+ description: DataImage, Inc.
+ "^davicom,.*":
+ description: DAVICOM Semiconductor, Inc.
+ "^delta,.*":
+ description: Delta Electronics, Inc.
+ "^denx,.*":
+ description: Denx Software Engineering
+ "^devantech,.*":
+ description: Devantech, Ltd.
+ "^dh,.*":
+ description: DH electronics GmbH
+ "^digi,.*":
+ description: Digi International Inc.
+ "^digilent,.*":
+ description: Diglent, Inc.
+ "^dioo,.*":
+ description: Dioo Microcircuit Co., Ltd
+ "^dlc,.*":
+ description: DLC Display Co., Ltd.
+ "^dlg,.*":
+ description: Dialog Semiconductor
+ "^dlink,.*":
+ description: D-Link Corporation
+ "^dmo,.*":
+ description: Data Modul AG
+ "^domintech,.*":
+ description: Domintech Co., Ltd.
+ "^dongwoon,.*":
+ description: Dongwoon Anatech
+ "^dptechnics,.*":
+ description: DPTechnics
+ "^dragino,.*":
+ description: Dragino Technology Co., Limited
+ "^ea,.*":
+ description: Embedded Artists AB
+ "^ebs-systart,.*":
+ description: EBS-SYSTART GmbH
+ "^ebv,.*":
+ description: EBV Elektronik
+ "^eckelmann,.*":
+ description: Eckelmann AG
+ "^edt,.*":
+ description: Emerging Display Technologies
+ "^eeti,.*":
+ description: eGalax_eMPIA Technology Inc
+ "^elan,.*":
+ description: Elan Microelectronic Corp.
+ "^elgin,.*":
+ description: Elgin S/A.
+ "^embest,.*":
+ description: Shenzhen Embest Technology Co., Ltd.
+ "^emlid,.*":
+ description: Emlid, Ltd.
+ "^emmicro,.*":
+ description: EM Microelectronic
+ "^emtrion,.*":
+ description: emtrion GmbH
+ "^endless,.*":
+ description: Endless Mobile, Inc.
+ "^energymicro,.*":
+ description: Silicon Laboratories (formerly Energy Micro AS)
+ "^engicam,.*":
+ description: Engicam S.r.l.
+ "^epcos,.*":
+ description: EPCOS AG
+ "^epfl,.*":
+ description: Ecole Polytechnique Fédérale de Lausanne
+ "^epson,.*":
+ description: Seiko Epson Corp.
+ "^est,.*":
+ description: ESTeem Wireless Modems
+ "^ettus,.*":
+ description: NI Ettus Research
+ "^eukrea,.*":
+ description: Eukréa Electromatique
+ "^everest,.*":
+ description: Everest Semiconductor Co. Ltd.
+ "^everspin,.*":
+ description: Everspin Technologies, Inc.
+ "^exar,.*":
+ description: Exar Corporation
+ "^excito,.*":
+ description: Excito
+ "^ezchip,.*":
+ description: EZchip Semiconductor
+ "^facebook,.*":
+ description: Facebook
+ "^fairphone,.*":
+ description: Fairphone B.V.
+ "^faraday,.*":
+ description: Faraday Technology Corporation
+ "^fastrax,.*":
+ description: Fastrax Oy
+ "^fcs,.*":
+ description: Fairchild Semiconductor
+ "^feiyang,.*":
+ description: Shenzhen Fly Young Technology Co.,LTD.
+ "^firefly,.*":
+ description: Firefly
+ "^focaltech,.*":
+ description: FocalTech Systems Co.,Ltd
+ "^friendlyarm,.*":
+ description: Guangzhou FriendlyARM Computer Tech Co., Ltd
+ "^fsl,.*":
+ description: Freescale Semiconductor
+ "^fujitsu,.*":
+ description: Fujitsu Ltd.
+ "^gateworks,.*":
+ description: Gateworks Corporation
+ "^gcw,.*":
+ description: Game Consoles Worldwide
+ "^ge,.*":
+ description: General Electric Company
+ "^geekbuying,.*":
+ description: GeekBuying
+ "^gef,.*":
+ description: GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+ "^GEFanuc,.*":
+ description: GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+ "^geniatech,.*":
+ description: Geniatech, Inc.
+ "^giantec,.*":
+ description: Giantec Semiconductor, Inc.
+ "^giantplus,.*":
+ description: Giantplus Technology Co., Ltd.
+ "^globalscale,.*":
+ description: Globalscale Technologies, Inc.
+ "^globaltop,.*":
+ description: GlobalTop Technology, Inc.
+ "^gmt,.*":
+ description: Global Mixed-mode Technology, Inc.
+ "^goodix,.*":
+ description: Shenzhen Huiding Technology Co., Ltd.
+ "^google,.*":
+ description: Google, Inc.
+ "^grinn,.*":
+ description: Grinn
+ "^grmn,.*":
+ description: Garmin Limited
+ "^gumstix,.*":
+ description: Gumstix, Inc.
+ "^gw,.*":
+ description: Gateworks Corporation
+ "^hannstar,.*":
+ description: HannStar Display Corporation
+ "^haoyu,.*":
+ description: Haoyu Microelectronic Co. Ltd.
+ "^hardkernel,.*":
+ description: Hardkernel Co., Ltd
+ "^hideep,.*":
+ description: HiDeep Inc.
+ "^himax,.*":
+ description: Himax Technologies, Inc.
+ "^hisilicon,.*":
+ description: Hisilicon Limited.
+ "^hit,.*":
+ description: Hitachi Ltd.
+ "^hitex,.*":
+ description: Hitex Development Tools
+ "^holt,.*":
+ description: Holt Integrated Circuits, Inc.
+ "^honeywell,.*":
+ description: Honeywell
+ "^hp,.*":
+ description: Hewlett Packard
+ "^holtek,.*":
+ description: Holtek Semiconductor, Inc.
+ "^hwacom,.*":
+ description: HwaCom Systems Inc.
+ "^i2se,.*":
+ description: I2SE GmbH
+ "^ibm,.*":
+ description: International Business Machines (IBM)
+ "^icplus,.*":
+ description: IC Plus Corp.
+ "^idt,.*":
+ description: Integrated Device Technologies, Inc.
+ "^ifi,.*":
+ description: Ingenieurburo Fur Ic-Technologie (I/F/I)
+ "^ilitek,.*":
+ description: ILI Technology Corporation (ILITEK)
+ "^img,.*":
+ description: Imagination Technologies Ltd.
+ "^infineon,.*":
+ description: Infineon Technologies
+ "^inforce,.*":
+ description: Inforce Computing
+ "^ingenic,.*":
+ description: Ingenic Semiconductor
+ "^innolux,.*":
+ description: Innolux Corporation
+ "^inside-secure,.*":
+ description: INSIDE Secure
+ "^intel,.*":
+ description: Intel Corporation
+ "^intercontrol,.*":
+ description: Inter Control Group
+ "^invensense,.*":
+ description: InvenSense Inc.
+ "^inversepath,.*":
+ description: Inverse Path
+ "^iom,.*":
+ description: Iomega Corporation
+ "^isee,.*":
+ description: ISEE 2007 S.L.
+ "^isil,.*":
+ description: Intersil
+ "^issi,.*":
+ description: Integrated Silicon Solutions Inc.
+ "^itead,.*":
+ description: ITEAD Intelligent Systems Co.Ltd
+ "^iwave,.*":
+ description: iWave Systems Technologies Pvt. Ltd.
+ "^jdi,.*":
+ description: Japan Display Inc.
+ "^jedec,.*":
+ description: JEDEC Solid State Technology Association
+ "^jianda,.*":
+ description: Jiandangjing Technology Co., Ltd.
+ "^karo,.*":
+ description: Ka-Ro electronics GmbH
+ "^keithkoep,.*":
+ description: Keith & Koep GmbH
+ "^keymile,.*":
+ description: Keymile GmbH
+ "^khadas,.*":
+ description: Khadas
+ "^kiebackpeter,.*":
+ description: Kieback & Peter GmbH
+ "^kinetic,.*":
+ description: Kinetic Technologies
+ "^kingdisplay,.*":
+ description: King & Display Technology Co., Ltd.
+ "^kingnovel,.*":
+ description: Kingnovel Technology Co., Ltd.
+ "^kionix,.*":
+ description: Kionix, Inc.
+ "^kobo,.*":
+ description: Rakuten Kobo Inc.
+ "^koe,.*":
+ description: Kaohsiung Opto-Electronics Inc.
+ "^kosagi,.*":
+ description: Sutajio Ko-Usagi PTE Ltd.
+ "^kyo,.*":
+ description: Kyocera Corporation
+ "^lacie,.*":
+ description: LaCie
+ "^laird,.*":
+ description: Laird PLC
+ "^lantiq,.*":
+ description: Lantiq Semiconductor
+ "^lattice,.*":
+ description: Lattice Semiconductor
+ "^lego,.*":
+ description: LEGO Systems A/S
+ "^lemaker,.*":
+ description: Shenzhen LeMaker Technology Co., Ltd.
+ "^lenovo,.*":
+ description: Lenovo Group Ltd.
+ "^lg,.*":
+ description: LG Corporation
+ "^libretech,.*":
+ description: Shenzhen Libre Technology Co., Ltd
+ "^licheepi,.*":
+ description: Lichee Pi
+ "^linaro,.*":
+ description: Linaro Limited
+ "^linksys,.*":
+ description: Belkin International, Inc. (Linksys)
+ "^linux,.*":
+ description: Linux-specific binding
+ "^linx,.*":
+ description: Linx Technologies
+ "^lltc,.*":
+ description: Linear Technology Corporation
+ "^logicpd,.*":
+ description: Logic PD, Inc.
+ "^lsi,.*":
+ description: LSI Corp. (LSI Logic)
+ "^lwn,.*":
+ description: Liebherr-Werk Nenzing GmbH
+ "^macnica,.*":
+ description: Macnica Americas
+ "^marvell,.*":
+ description: Marvell Technology Group Ltd.
+ "^maxbotix,.*":
+ description: MaxBotix Inc.
+ "^maxim,.*":
+ description: Maxim Integrated Products
+ "^mbvl,.*":
+ description: Mobiveil Inc.
+ "^mcube,.*":
+ description: mCube
+ "^meas,.*":
+ description: Measurement Specialties
+ "^mediatek,.*":
+ description: MediaTek Inc.
+ "^megachips,.*":
+ description: MegaChips
+ "^mele,.*":
+ description: Shenzhen MeLE Digital Technology Ltd.
+ "^melexis,.*":
+ description: Melexis N.V.
+ "^melfas,.*":
+ description: MELFAS Inc.
+ "^mellanox,.*":
+ description: Mellanox Technologies
+ "^memsic,.*":
+ description: MEMSIC Inc.
+ "^menlo,.*":
+ description: Menlo Systems GmbH
+ "^merrii,.*":
+ description: Merrii Technology Co., Ltd.
+ "^micrel,.*":
+ description: Micrel Inc.
+ "^microchip,.*":
+ description: Microchip Technology Inc.
+ "^microcrystal,.*":
+ description: Micro Crystal AG
+ "^micron,.*":
+ description: Micron Technology Inc.
+ "^mikroe,.*":
+ description: MikroElektronika d.o.o.
+ "^minix,.*":
+ description: MINIX Technology Ltd.
+ "^miramems,.*":
+ description: MiraMEMS Sensing Technology Co., Ltd.
+ "^mitsubishi,.*":
+ description: Mitsubishi Electric Corporation
+ "^mosaixtech,.*":
+ description: Mosaix Technologies, Inc.
+ "^motorola,.*":
+ description: Motorola, Inc.
+ "^moxa,.*":
+ description: Moxa Inc.
+ "^mpl,.*":
+ description: MPL AG
+ "^mqmaker,.*":
+ description: mqmaker Inc.
+ "^mscc,.*":
+ description: Microsemi Corporation
+ "^msi,.*":
+ description: Micro-Star International Co. Ltd.
+ "^mti,.*":
+ description: Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
+ "^multi-inno,.*":
+ description: Multi-Inno Technology Co.,Ltd
+ "^mundoreader,.*":
+ description: Mundo Reader S.L.
+ "^murata,.*":
+ description: Murata Manufacturing Co., Ltd.
+ "^mxicy,.*":
+ description: Macronix International Co., Ltd.
+ "^myir,.*":
+ description: MYIR Tech Limited
+ "^national,.*":
+ description: National Semiconductor
+ "^nec,.*":
+ description: NEC LCD Technologies, Ltd.
+ "^neonode,.*":
+ description: Neonode Inc.
+ "^netgear,.*":
+ description: NETGEAR
+ "^netlogic,.*":
+ description: Broadcom Corporation (formerly NetLogic Microsystems)
+ "^netron-dy,.*":
+ description: Netron DY
+ "^netxeon,.*":
+ description: Shenzhen Netxeon Technology CO., LTD
+ "^nexbox,.*":
+ description: Nexbox
+ "^nextthing,.*":
+ description: Next Thing Co.
+ "^newhaven,.*":
+ description: Newhaven Display International
+ "^ni,.*":
+ description: National Instruments
+ "^nintendo,.*":
+ description: Nintendo
+ "^nlt,.*":
+ description: NLT Technologies, Ltd.
+ "^nokia,.*":
+ description: Nokia
+ "^nordic,.*":
+ description: Nordic Semiconductor
+ "^novtech,.*":
+ description: NovTech, Inc.
+ "^nutsboard,.*":
+ description: NutsBoard
+ "^nuvoton,.*":
+ description: Nuvoton Technology Corporation
+ "^nvd,.*":
+ description: New Vision Display
+ "^nvidia,.*":
+ description: NVIDIA
+ "^nxp,.*":
+ description: NXP Semiconductors
+ "^oceanic,.*":
+ description: Oceanic Systems (UK) Ltd.
+ "^okaya,.*":
+ description: Okaya Electric America, Inc.
+ "^oki,.*":
+ description: Oki Electric Industry Co., Ltd.
+ "^olimex,.*":
+ description: OLIMEX Ltd.
+ "^olpc,.*":
+ description: One Laptop Per Child
+ "^onion,.*":
+ description: Onion Corporation
+ "^onnn,.*":
+ description: ON Semiconductor Corp.
+ "^ontat,.*":
+ description: On Tat Industrial Company
+ "^opalkelly,.*":
+ description: Opal Kelly Incorporated
+ "^opencores,.*":
+ description: OpenCores.org
+ "^openrisc,.*":
+ description: OpenRISC.io
+ "^option,.*":
+ description: Option NV
+ "^oranth,.*":
+ description: Shenzhen Oranth Technology Co., Ltd.
+ "^ORCL,.*":
+ description: Oracle Corporation
+ "^orisetech,.*":
+ description: Orise Technology
+ "^ortustech,.*":
+ description: Ortus Technology Co., Ltd.
+ "^osddisplays,.*":
+ description: OSD Displays
+ "^ovti,.*":
+ description: OmniVision Technologies
+ "^oxsemi,.*":
+ description: Oxford Semiconductor, Ltd.
+ "^panasonic,.*":
+ description: Panasonic Corporation
+ "^parade,.*":
+ description: Parade Technologies Inc.
+ "^pda,.*":
+ description: Precision Design Associates, Inc.
+ "^pericom,.*":
+ description: Pericom Technology Inc.
+ "^pervasive,.*":
+ description: Pervasive Displays, Inc.
+ "^phicomm,.*":
+ description: PHICOMM Co., Ltd.
+ "^phytec,.*":
+ description: PHYTEC Messtechnik GmbH
+ "^picochip,.*":
+ description: Picochip Ltd
+ "^pine64,.*":
+ description: Pine64
+ "^pixcir,.*":
+ description: PIXCIR MICROELECTRONICS Co., Ltd
+ "^plantower,.*":
+ description: Plantower Co., Ltd
+ "^plathome,.*":
+ description: Plat'Home Co., Ltd.
+ "^plda,.*":
+ description: PLDA
+ "^plx,.*":
+ description: Broadcom Corporation (formerly PLX Technology)
+ "^pni,.*":
+ description: PNI Sensor Corporation
+ "^portwell,.*":
+ description: Portwell Inc.
+ "^poslab,.*":
+ description: Poslab Technology Co., Ltd.
+ "^powervr,.*":
+ description: PowerVR (deprecated, use img)
+ "^probox2,.*":
+ description: PROBOX2 (by W2COMP Co., Ltd.)
+ "^pulsedlight,.*":
+ description: PulsedLight, Inc
+ "^qca,.*":
+ description: Qualcomm Atheros, Inc.
+ "^qcom,.*":
+ description: Qualcomm Technologies, Inc
+ "^qemu,.*":
+ description: QEMU, a generic and open source machine emulator and virtualizer
+ "^qi,.*":
+ description: Qi Hardware
+ "^qiaodian,.*":
+ description: QiaoDian XianShi Corporation
+ "^qnap,.*":
+ description: QNAP Systems, Inc.
+ "^radxa,.*":
+ description: Radxa
+ "^raidsonic,.*":
+ description: RaidSonic Technology GmbH
+ "^ralink,.*":
+ description: Mediatek/Ralink Technology Corp.
+ "^ramtron,.*":
+ description: Ramtron International
+ "^raspberrypi,.*":
+ description: Raspberry Pi Foundation
+ "^raydium,.*":
+ description: Raydium Semiconductor Corp.
+ "^rda,.*":
+ description: Unisoc Communications, Inc.
+ "^realtek,.*":
+ description: Realtek Semiconductor Corp.
+ "^renesas,.*":
+ description: Renesas Electronics Corporation
+ "^richtek,.*":
+ description: Richtek Technology Corporation
+ "^ricoh,.*":
+ description: Ricoh Co. Ltd.
+ "^rikomagic,.*":
+ description: Rikomagic Tech Corp. Ltd
+ "^riscv,.*":
+ description: RISC-V Foundation
+ "^rockchip,.*":
+ description: Fuzhou Rockchip Electronics Co., Ltd
+ "^rocktech,.*":
+ description: ROCKTECH DISPLAYS LIMITED
+ "^rohm,.*":
+ description: ROHM Semiconductor Co., Ltd
+ "^ronbo,.*":
+ description: Ronbo Electronics
+ "^roofull,.*":
+ description: Shenzhen Roofull Technology Co, Ltd
+ "^samsung,.*":
+ description: Samsung Semiconductor
+ "^samtec,.*":
+ description: Samtec/Softing company
+ "^sancloud,.*":
+ description: Sancloud Ltd
+ "^sandisk,.*":
+ description: Sandisk Corporation
+ "^sbs,.*":
+ description: Smart Battery System
+ "^schindler,.*":
+ description: Schindler
+ "^seagate,.*":
+ description: Seagate Technology PLC
+ "^seirobotics,.*":
+ description: Shenzhen SEI Robotics Co., Ltd
+ "^semtech,.*":
+ description: Semtech Corporation
+ "^sensirion,.*":
+ description: Sensirion AG
+ "^sff,.*":
+ description: Small Form Factor Committee
+ "^sgd,.*":
+ description: Solomon Goldentek Display Corporation
+ "^sgx,.*":
+ description: SGX Sensortech
+ "^sharp,.*":
+ description: Sharp Corporation
+ "^shimafuji,.*":
+ description: Shimafuji Electric, Inc.
+ "^si-en,.*":
+ description: Si-En Technology Ltd.
+ "^si-linux,.*":
+ description: Silicon Linux Corporation
+ "^sifive,.*":
+ description: SiFive, Inc.
+ "^sigma,.*":
+ description: Sigma Designs, Inc.
+ "^sii,.*":
+ description: Seiko Instruments, Inc.
+ "^sil,.*":
+ description: Silicon Image
+ "^silabs,.*":
+ description: Silicon Laboratories
+ "^silead,.*":
+ description: Silead Inc.
+ "^silergy,.*":
+ description: Silergy Corp.
+ "^siliconmitus,.*":
+ description: Silicon Mitus, Inc.
+ "^simte,.*":
+ description: k
+ "^sirf,.*":
+ description: SiRF Technology, Inc.
+ "^sis,.*":
+ description: Silicon Integrated Systems Corp.
+ "^sitronix,.*":
+ description: Sitronix Technology Corporation
+ "^skyworks,.*":
+ description: Skyworks Solutions, Inc.
+ "^smsc,.*":
+ description: Standard Microsystems Corporation
+ "^snps,.*":
+ description: Synopsys, Inc.
+ "^socionext,.*":
+ description: Socionext Inc.
+ "^solidrun,.*":
+ description: SolidRun
+ "^solomon,.*":
+ description: Solomon Systech Limited
+ "^sony,.*":
+ description: Sony Corporation
+ "^spansion,.*":
+ description: Spansion Inc.
+ "^sprd,.*":
+ description: Spreadtrum Communications Inc.
+ "^sst,.*":
+ description: Silicon Storage Technology, Inc.
+ "^st,.*":
+ description: STMicroelectronics
+ "^starry,.*":
+ description: Starry Electronic Technology (ShenZhen) Co., LTD
+ "^startek,.*":
+ description: Startek
+ "^ste,.*":
+ description: ST-Ericsson
+ "^stericsson,.*":
+ description: ST-Ericsson
+ "^summit,.*":
+ description: Summit microelectronics
+ "^sunchip,.*":
+ description: Shenzhen Sunchip Technology Co., Ltd
+ "^SUNW,.*":
+ description: Sun Microsystems, Inc
+ "^swir,.*":
+ description: Sierra Wireless
+ "^syna,.*":
+ description: Synaptics Inc.
+ "^synology,.*":
+ description: Synology, Inc.
+ "^tbs,.*":
+ description: TBS Technologies
+ "^tbs-biometrics,.*":
+ description: Touchless Biometric Systems AG
+ "^tcg,.*":
+ description: Trusted Computing Group
+ "^tcl,.*":
+ description: Toby Churchill Ltd.
+ "^technexion,.*":
+ description: TechNexion
+ "^technologic,.*":
+ description: Technologic Systems
+ "^tempo,.*":
+ description: Tempo Semiconductor
+ "^techstar,.*":
+ description: Shenzhen Techstar Electronics Co., Ltd.
+ "^terasic,.*":
+ description: Terasic Inc.
+ "^thine,.*":
+ description: THine Electronics, Inc.
+ "^ti,.*":
+ description: Texas Instruments
+ "^tianma,.*":
+ description: Tianma Micro-electronics Co., Ltd.
+ "^tlm,.*":
+ description: Trusted Logic Mobility
+ "^tmt,.*":
+ description: Tecon Microprocessor Technologies, LLC.
+ "^topeet,.*":
+ description: Topeet
+ "^toradex,.*":
+ description: Toradex AG
+ "^toshiba,.*":
+ description: Toshiba Corporation
+ "^toumaz,.*":
+ description: Toumaz
+ "^tpk,.*":
+ description: TPK U.S.A. LLC
+ "^tplink,.*":
+ description: TP-LINK Technologies Co., Ltd.
+ "^tpo,.*":
+ description: TPO
+ "^tq,.*":
+ description: TQ Systems GmbH
+ "^tronfy,.*":
+ description: Tronfy
+ "^tronsmart,.*":
+ description: Tronsmart
+ "^truly,.*":
+ description: Truly Semiconductors Limited
+ "^tsd,.*":
+ description: Theobroma Systems Design und Consulting GmbH
+ "^tyan,.*":
+ description: Tyan Computer Corporation
+ "^u-blox,.*":
+ description: u-blox
+ "^ucrobotics,.*":
+ description: uCRobotics
+ "^ubnt,.*":
+ description: Ubiquiti Networks
+ "^udoo,.*":
+ description: Udoo
+ "^uniwest,.*":
+ description: United Western Technologies Corp (UniWest)
+ "^upisemi,.*":
+ description: uPI Semiconductor Corp.
+ "^urt,.*":
+ description: United Radiant Technology Corporation
+ "^usi,.*":
+ description: Universal Scientific Industrial Co., Ltd.
+ "^v3,.*":
+ description: V3 Semiconductor
+ "^vamrs,.*":
+ description: Vamrs Ltd.
+ "^variscite,.*":
+ description: Variscite Ltd.
+ "^via,.*":
+ description: VIA Technologies, Inc.
+ "^virtio,.*":
+ description: Virtual I/O Device Specification, developed by the OASIS consortium
+ "^vishay,.*":
+ description: Vishay Intertechnology, Inc
+ "^vitesse,.*":
+ description: Vitesse Semiconductor Corporation
+ "^vivante,.*":
+ description: Vivante Corporation
+ "^vocore,.*":
+ description: VoCore Studio
+ "^voipac,.*":
+ description: Voipac Technologies s.r.o.
+ "^vot,.*":
+ description: Vision Optical Technology Co., Ltd.
+ "^wd,.*":
+ description: Western Digital Corp.
+ "^wetek,.*":
+ description: WeTek Electronics, limited.
+ "^wexler,.*":
+ description: Wexler
+ "^whwave,.*":
+ description: Shenzhen whwave Electronics, Inc.
+ "^wi2wi,.*":
+ description: Wi2Wi, Inc.
+ "^winbond,.*":
+ description: Winbond Electronics corp.
+ "^winstar,.*":
+ description: Winstar Display Corp.
+ "^wlf,.*":
+ description: Wolfson Microelectronics
+ "^wm,.*":
+ description: Wondermedia Technologies, Inc.
+ "^x-powers,.*":
+ description: X-Powers
+ "^xes,.*":
+ description: Extreme Engineering Solutions (X-ES)
+ "^xillybus,.*":
+ description: Xillybus Ltd.
+ "^xlnx,.*":
+ description: Xilinx
+ "^xunlong,.*":
+ description: Shenzhen Xunlong Software CO.,Limited
+ "^ysoft,.*":
+ description: Y Soft Corporation a.s.
+ "^zarlink,.*":
+ description: Zarlink Semiconductor
+ "^zeitec,.*":
+ description: ZEITEC Semiconductor Co., LTD.
+ "^zidoo,.*":
+ description: Shenzhen Zidoo Technology Co., Ltd.
+ "^zii,.*":
+ description: Zodiac Inflight Innovations
+ "^zte,.*":
+ description: ZTE Corp.
+ "^zyxel,.*":
+ description: ZyXEL Communications Corp.
+
+ # Normal property name match without a comma
+ # These should catch all node/property names without a prefix
+ "^[a-zA-Z0-9#][a-zA-Z0-9+\\-._@]{0,63}$": true
+ "^[a-zA-Z0-9+\\-._]*@[0-9a-zA-Z,]*$": true
+ "^#.*": true
+
+additionalProperties: false
+
+...
diff --git a/Documentation/media/uapi/v4l/field-order.rst b/Documentation/media/uapi/v4l/field-order.rst
index 3fb473e3b8e2..d640e922a974 100644
--- a/Documentation/media/uapi/v4l/field-order.rst
+++ b/Documentation/media/uapi/v4l/field-order.rst
@@ -75,12 +75,11 @@ enum v4l2_field
* - ``V4L2_FIELD_ANY``
- 0
- - Applications request this field order when any one of the
- ``V4L2_FIELD_NONE``, ``V4L2_FIELD_TOP``, ``V4L2_FIELD_BOTTOM``, or
- ``V4L2_FIELD_INTERLACED`` formats is acceptable. Drivers choose
- depending on hardware capabilities or e. g. the requested image
- size, and return the actual field order. Drivers must never return
- ``V4L2_FIELD_ANY``. If multiple field orders are possible the
+ - Applications request this field order when any field format
+ is acceptable. Drivers choose depending on hardware capabilities or
+ e.g. the requested image size, and return the actual field order.
+ Drivers must never return ``V4L2_FIELD_ANY``.
+ If multiple field orders are possible the
driver must choose one of the possible field orders during
:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` or
:ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>`. struct
@@ -88,9 +87,8 @@ enum v4l2_field
``V4L2_FIELD_ANY``.
* - ``V4L2_FIELD_NONE``
- 1
- - Images are in progressive format, not interlaced. The driver may
- also indicate this order when it cannot distinguish between
- ``V4L2_FIELD_TOP`` and ``V4L2_FIELD_BOTTOM``.
+ - Images are in progressive (frame-based) format, not interlaced
+ (field-based).
* - ``V4L2_FIELD_TOP``
- 2
- Images consist of the top (aka odd) field only.
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index cd7303d7fa25..180e07d956a7 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -796,7 +796,9 @@ The kernel interface functions are as follows:
s64 tx_total_len,
gfp_t gfp,
rxrpc_notify_rx_t notify_rx,
- bool upgrade);
+ bool upgrade,
+ bool intr,
+ unsigned int debug_id);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
@@ -824,6 +826,13 @@ The kernel interface functions are as follows:
the server upgrade the service to a better one. The resultant service ID
is returned by rxrpc_kernel_recv_data().
+ intr should be set to true if the call should be interruptible. If this
+ is not set, this function may not return until a channel has been
+ allocated; if it is set, the function may return -ERESTARTSYS.
+
+ debug_id is the call debugging ID to be used for tracing. This can be
+ obtained by atomically incrementing rxrpc_debug_id.
+
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
@@ -1056,6 +1065,16 @@ The kernel interface functions are as follows:
This value can be used to determine if the remote client has been
restarted as it shouldn't change otherwise.
+ (*) Set the maxmimum lifespan on a call.
+
+ void rxrpc_kernel_set_max_life(struct socket *sock,
+ struct rxrpc_call *call,
+ unsigned long hard_timeout)
+
+ This sets the maximum lifespan on a call to hard_timeout (which is in
+ jiffies). In the event of the timeout occurring, the call will be
+ aborted and -ETIME or -ETIMEDOUT will be returned.
+
=======================
CONFIGURABLE PARAMETERS
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 64b38dfcc243..ba6c42c576dd 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -69,23 +69,6 @@ by and on behalf of the VM's process may not be freed/unaccounted when
the VM is shut down.
-It is important to note that althought VM ioctls may only be issued from
-the process that created the VM, a VM's lifecycle is associated with its
-file descriptor, not its creator (process). In other words, the VM and
-its resources, *including the associated address space*, are not freed
-until the last reference to the VM's file descriptor has been released.
-For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
-not be freed until both the parent (original) process and its child have
-put their references to the VM's file descriptor.
-
-Because a VM's resources are not freed until the last reference to its
-file descriptor is released, creating additional references to a VM via
-via fork(), dup(), etc... without careful consideration is strongly
-discouraged and may have unwanted side effects, e.g. memory allocated
-by and on behalf of the VM's process may not be freed/unaccounted when
-the VM is shut down.
-
-
3. Extensions
-------------
@@ -347,7 +330,7 @@ They must be less than the value that KVM_CHECK_EXTENSION returns for
the KVM_CAP_MULTI_ADDRESS_SPACE capability.
The bits in the dirty bitmap are cleared before the ioctl returns, unless
-KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is enabled. For more information,
+KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
see the description of the capability.
4.9 KVM_SET_MEMORY_ALIAS
@@ -1117,9 +1100,8 @@ struct kvm_userspace_memory_region {
This ioctl allows the user to create, modify or delete a guest physical
memory slot. Bits 0-15 of "slot" specify the slot id and this value
should be less than the maximum number of user memory slots supported per
-VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
-if this capability is supported by the architecture. Slots may not
-overlap in guest physical address space.
+VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS.
+Slots may not overlap in guest physical address space.
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
specifies the address space which is being modified. They must be
@@ -1901,6 +1883,12 @@ Architectures: all
Type: vcpu ioctl
Parameters: struct kvm_one_reg (in)
Returns: 0 on success, negative value on failure
+Errors:
+  ENOENT:   no such register
+  EINVAL:   invalid register ID, or no such register
+  EPERM:    (arm64) register access not allowed before vcpu finalization
+(These error codes are indicative only: do not rely on a specific error
+code being returned in a specific situation.)
struct kvm_one_reg {
__u64 id;
@@ -1985,6 +1973,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TLB3PS | 32
PPC | KVM_REG_PPC_EPTCFG | 32
PPC | KVM_REG_PPC_ICP_STATE | 64
+ PPC | KVM_REG_PPC_VP_STATE | 128
PPC | KVM_REG_PPC_TB_OFFSET | 64
PPC | KVM_REG_PPC_SPMC1 | 32
PPC | KVM_REG_PPC_SPMC2 | 32
@@ -2137,6 +2126,37 @@ contains elements ranging from 32 to 128 bits. The index is a 32bit
value in the kvm_regs structure seen as a 32bit array.
0x60x0 0000 0010 <index into the kvm_regs struct:16>
+Specifically:
+ Encoding Register Bits kvm_regs member
+----------------------------------------------------------------
+ 0x6030 0000 0010 0000 X0 64 regs.regs[0]
+ 0x6030 0000 0010 0002 X1 64 regs.regs[1]
+ ...
+ 0x6030 0000 0010 003c X30 64 regs.regs[30]
+ 0x6030 0000 0010 003e SP 64 regs.sp
+ 0x6030 0000 0010 0040 PC 64 regs.pc
+ 0x6030 0000 0010 0042 PSTATE 64 regs.pstate
+ 0x6030 0000 0010 0044 SP_EL1 64 sp_el1
+ 0x6030 0000 0010 0046 ELR_EL1 64 elr_el1
+ 0x6030 0000 0010 0048 SPSR_EL1 64 spsr[KVM_SPSR_EL1] (alias SPSR_SVC)
+ 0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT]
+ 0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND]
+ 0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ]
+ 0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ]
+ 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] (*)
+ 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] (*)
+ ...
+ 0x6040 0000 0010 00d0 V31 128 fp_regs.vregs[31] (*)
+ 0x6020 0000 0010 00d4 FPSR 32 fp_regs.fpsr
+ 0x6020 0000 0010 00d5 FPCR 32 fp_regs.fpcr
+
+(*) These encodings are not accepted for SVE-enabled vcpus. See
+ KVM_ARM_VCPU_INIT.
+
+ The equivalent register content can be accessed via bits [127:0] of
+ the corresponding SVE Zn registers instead for vcpus that have SVE
+ enabled (see below).
+
arm64 CCSIDR registers are demultiplexed by CSSELR value:
0x6020 0000 0011 00 <csselr:8>
@@ -2146,6 +2166,64 @@ arm64 system registers have the following id bit patterns:
arm64 firmware pseudo-registers have the following bit pattern:
0x6030 0000 0014 <regno:16>
+arm64 SVE registers have the following bit patterns:
+ 0x6080 0000 0015 00 <n:5> <slice:5> Zn bits[2048*slice + 2047 : 2048*slice]
+ 0x6050 0000 0015 04 <n:4> <slice:5> Pn bits[256*slice + 255 : 256*slice]
+ 0x6050 0000 0015 060 <slice:5> FFR bits[256*slice + 255 : 256*slice]
+ 0x6060 0000 0015 ffff KVM_REG_ARM64_SVE_VLS pseudo-register
+
+Access to register IDs where 2048 * slice >= 128 * max_vq will fail with
+ENOENT. max_vq is the vcpu's maximum supported vector length in 128-bit
+quadwords: see (**) below.
+
+These registers are only accessible on vcpus for which SVE is enabled.
+See KVM_ARM_VCPU_INIT for details.
+
+In addition, except for KVM_REG_ARM64_SVE_VLS, these registers are not
+accessible until the vcpu's SVE configuration has been finalized
+using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE). See KVM_ARM_VCPU_INIT
+and KVM_ARM_VCPU_FINALIZE for more information about this procedure.
+
+KVM_REG_ARM64_SVE_VLS is a pseudo-register that allows the set of vector
+lengths supported by the vcpu to be discovered and configured by
+userspace. When transferred to or from user memory via KVM_GET_ONE_REG
+or KVM_SET_ONE_REG, the value of this register is of type
+__u64[KVM_ARM64_SVE_VLS_WORDS], and encodes the set of vector lengths as
+follows:
+
+__u64 vector_lengths[KVM_ARM64_SVE_VLS_WORDS];
+
+if (vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX &&
+ ((vector_lengths[(vq - KVM_ARM64_SVE_VQ_MIN) / 64] >>
+ ((vq - KVM_ARM64_SVE_VQ_MIN) % 64)) & 1))
+ /* Vector length vq * 16 bytes supported */
+else
+ /* Vector length vq * 16 bytes not supported */
+
+(**) The maximum value vq for which the above condition is true is
+max_vq. This is the maximum vector length available to the guest on
+this vcpu, and determines which register slices are visible through
+this ioctl interface.
+
+(See Documentation/arm64/sve.txt for an explanation of the "vq"
+nomenclature.)
+
+KVM_REG_ARM64_SVE_VLS is only accessible after KVM_ARM_VCPU_INIT.
+KVM_ARM_VCPU_INIT initialises it to the best set of vector lengths that
+the host supports.
+
+Userspace may subsequently modify it if desired until the vcpu's SVE
+configuration is finalized using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE).
+
+Apart from simply removing all vector lengths from the host set that
+exceed some value, support for arbitrarily chosen sets of vector lengths
+is hardware-dependent and may not be available. Attempting to configure
+an invalid set of vector lengths via KVM_SET_ONE_REG will fail with
+EINVAL.
+
+After the vcpu's SVE configuration is finalized, further attempts to
+write this register will fail with EPERM.
+
MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
the register group type:
@@ -2198,6 +2276,12 @@ Architectures: all
Type: vcpu ioctl
Parameters: struct kvm_one_reg (in and out)
Returns: 0 on success, negative value on failure
+Errors include:
+  ENOENT:   no such register
+  EINVAL:   invalid register ID, or no such register
+  EPERM:    (arm64) register access not allowed before vcpu finalization
+(These error codes are indicative only: do not rely on a specific error
+code being returned in a specific situation.)
This ioctl allows to receive the value of a single register implemented
in a vcpu. The register to read is indicated by the "id" field of the
@@ -2690,6 +2774,49 @@ Possible features:
- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
Depends on KVM_CAP_ARM_PMU_V3.
+ - KVM_ARM_VCPU_PTRAUTH_ADDRESS: Enables Address Pointer authentication
+ for arm64 only.
+ Depends on KVM_CAP_ARM_PTRAUTH_ADDRESS.
+ If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are
+ both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and
+ KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be
+ requested.
+
+ - KVM_ARM_VCPU_PTRAUTH_GENERIC: Enables Generic Pointer authentication
+ for arm64 only.
+ Depends on KVM_CAP_ARM_PTRAUTH_GENERIC.
+ If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are
+ both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and
+ KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be
+ requested.
+
+ - KVM_ARM_VCPU_SVE: Enables SVE for the CPU (arm64 only).
+ Depends on KVM_CAP_ARM_SVE.
+ Requires KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE):
+
+ * After KVM_ARM_VCPU_INIT:
+
+ - KVM_REG_ARM64_SVE_VLS may be read using KVM_GET_ONE_REG: the
+ initial value of this pseudo-register indicates the best set of
+ vector lengths possible for a vcpu on this host.
+
+ * Before KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE):
+
+ - KVM_RUN and KVM_GET_REG_LIST are not available;
+
+ - KVM_GET_ONE_REG and KVM_SET_ONE_REG cannot be used to access
+ the scalable archietctural SVE registers
+ KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() or
+ KVM_REG_ARM64_SVE_FFR;
+
+ - KVM_REG_ARM64_SVE_VLS may optionally be written using
+ KVM_SET_ONE_REG, to modify the set of vector lengths available
+ for the vcpu.
+
+ * After KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE):
+
+ - the KVM_REG_ARM64_SVE_VLS pseudo-register is immutable, and can
+ no longer be written using KVM_SET_ONE_REG.
4.83 KVM_ARM_PREFERRED_TARGET
@@ -3809,7 +3936,7 @@ to I/O ports.
4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl)
-Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
Architectures: x86, arm, arm64, mips
Type: vm ioctl
Parameters: struct kvm_dirty_log (in)
@@ -3842,10 +3969,10 @@ the address space for which you want to return the dirty bitmap.
They must be less than the value that KVM_CHECK_EXTENSION returns for
the KVM_CAP_MULTI_ADDRESS_SPACE capability.
-This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
is enabled; for more information, see the description of the capability.
However, it can always be used as long as KVM_CHECK_EXTENSION confirms
-that KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is present.
+that KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is present.
4.118 KVM_GET_SUPPORTED_HV_CPUID
@@ -3904,6 +4031,40 @@ number of valid entries in the 'entries' array, which is then filled.
'index' and 'flags' fields in 'struct kvm_cpuid_entry2' are currently reserved,
userspace should not expect to get any particular value there.
+4.119 KVM_ARM_VCPU_FINALIZE
+
+Architectures: arm, arm64
+Type: vcpu ioctl
+Parameters: int feature (in)
+Returns: 0 on success, -1 on error
+Errors:
+ EPERM: feature not enabled, needs configuration, or already finalized
+ EINVAL: feature unknown or not present
+
+Recognised values for feature:
+ arm64 KVM_ARM_VCPU_SVE (requires KVM_CAP_ARM_SVE)
+
+Finalizes the configuration of the specified vcpu feature.
+
+The vcpu must already have been initialised, enabling the affected feature, by
+means of a successful KVM_ARM_VCPU_INIT call with the appropriate flag set in
+features[].
+
+For affected vcpu features, this is a mandatory step that must be performed
+before the vcpu is fully usable.
+
+Between KVM_ARM_VCPU_INIT and KVM_ARM_VCPU_FINALIZE, the feature may be
+configured by use of ioctls such as KVM_SET_ONE_REG. The exact configuration
+that should be performaned and how to do it are feature-dependent.
+
+Other calls that depend on a particular feature being finalized, such as
+KVM_RUN, KVM_GET_REG_LIST, KVM_GET_ONE_REG and KVM_SET_ONE_REG, will fail with
+-EPERM unless the feature has already been finalized by means of a
+KVM_ARM_VCPU_FINALIZE call.
+
+See KVM_ARM_VCPU_INIT for details of vcpu features that require finalization
+using this ioctl.
+
5. The kvm_run structure
------------------------
@@ -4505,6 +4666,15 @@ struct kvm_sync_regs {
struct kvm_vcpu_events events;
};
+6.75 KVM_CAP_PPC_IRQ_XIVE
+
+Architectures: ppc
+Target: vcpu
+Parameters: args[0] is the XIVE device fd
+ args[1] is the XIVE CPU number (server ID) for this vcpu
+
+This capability connects the vcpu to an in-kernel XIVE device.
+
7. Capabilities that can be enabled on VMs
------------------------------------------
@@ -4798,7 +4968,7 @@ and injected exceptions.
* For the new DR6 bits, note that bit 16 is set iff the #DB exception
will clear DR6.RTM.
-7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
Architectures: x86, arm, arm64, mips
Parameters: args[0] whether feature should be enabled or not
@@ -4821,6 +4991,11 @@ while userspace can see false reports of dirty pages. Manual reprotection
helps reducing this time, improving guest performance and reducing the
number of dirty log false positives.
+KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name
+KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make
+it hard or impossible to use it correctly. The availability of
+KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed.
+Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT.
8. Other capabilities.
----------------------
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index 95ca68d663a4..4ffb82b02468 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -141,7 +141,8 @@ struct kvm_s390_vm_cpu_subfunc {
u8 pcc[16]; # valid with Message-Security-Assist-Extension 4
u8 ppno[16]; # valid with Message-Security-Assist-Extension 5
u8 kma[16]; # valid with Message-Security-Assist-Extension 8
- u8 reserved[1808]; # reserved for future instructions
+ u8 kdsa[16]; # valid with Message-Security-Assist-Extension 9
+ u8 reserved[1792]; # reserved for future instructions
};
Parameters: address of a buffer to load the subfunction blocks from.
diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt
new file mode 100644
index 000000000000..9a24a4525253
--- /dev/null
+++ b/Documentation/virtual/kvm/devices/xive.txt
@@ -0,0 +1,197 @@
+POWER9 eXternal Interrupt Virtualization Engine (XIVE Gen1)
+==========================================================
+
+Device types supported:
+ KVM_DEV_TYPE_XIVE POWER9 XIVE Interrupt Controller generation 1
+
+This device acts as a VM interrupt controller. It provides the KVM
+interface to configure the interrupt sources of a VM in the underlying
+POWER9 XIVE interrupt controller.
+
+Only one XIVE instance may be instantiated. A guest XIVE device
+requires a POWER9 host and the guest OS should have support for the
+XIVE native exploitation interrupt mode. If not, it should run using
+the legacy interrupt mode, referred as XICS (POWER7/8).
+
+* Device Mappings
+
+ The KVM device exposes different MMIO ranges of the XIVE HW which
+ are required for interrupt management. These are exposed to the
+ guest in VMAs populated with a custom VM fault handler.
+
+ 1. Thread Interrupt Management Area (TIMA)
+
+ Each thread has an associated Thread Interrupt Management context
+ composed of a set of registers. These registers let the thread
+ handle priority management and interrupt acknowledgment. The most
+ important are :
+
+ - Interrupt Pending Buffer (IPB)
+ - Current Processor Priority (CPPR)
+ - Notification Source Register (NSR)
+
+ They are exposed to software in four different pages each proposing
+ a view with a different privilege. The first page is for the
+ physical thread context and the second for the hypervisor. Only the
+ third (operating system) and the fourth (user level) are exposed the
+ guest.
+
+ 2. Event State Buffer (ESB)
+
+ Each source is associated with an Event State Buffer (ESB) with
+ either a pair of even/odd pair of pages which provides commands to
+ manage the source: to trigger, to EOI, to turn off the source for
+ instance.
+
+ 3. Device pass-through
+
+ When a device is passed-through into the guest, the source
+ interrupts are from a different HW controller (PHB4) and the ESB
+ pages exposed to the guest should accommadate this change.
+
+ The passthru_irq helpers, kvmppc_xive_set_mapped() and
+ kvmppc_xive_clr_mapped() are called when the device HW irqs are
+ mapped into or unmapped from the guest IRQ number space. The KVM
+ device extends these helpers to clear the ESB pages of the guest IRQ
+ number being mapped and then lets the VM fault handler repopulate.
+ The handler will insert the ESB page corresponding to the HW
+ interrupt of the device being passed-through or the initial IPI ESB
+ page if the device has being removed.
+
+ The ESB remapping is fully transparent to the guest and the OS
+ device driver. All handling is done within VFIO and the above
+ helpers in KVM-PPC.
+
+* Groups:
+
+ 1. KVM_DEV_XIVE_GRP_CTRL
+ Provides global controls on the device
+ Attributes:
+ 1.1 KVM_DEV_XIVE_RESET (write only)
+ Resets the interrupt controller configuration for sources and event
+ queues. To be used by kexec and kdump.
+ Errors: none
+
+ 1.2 KVM_DEV_XIVE_EQ_SYNC (write only)
+ Sync all the sources and queues and mark the EQ pages dirty. This
+ to make sure that a consistent memory state is captured when
+ migrating the VM.
+ Errors: none
+
+ 2. KVM_DEV_XIVE_GRP_SOURCE (write only)
+ Initializes a new source in the XIVE device and mask it.
+ Attributes:
+ Interrupt source number (64-bit)
+ The kvm_device_attr.addr points to a __u64 value:
+ bits: | 63 .... 2 | 1 | 0
+ values: | unused | level | type
+ - type: 0:MSI 1:LSI
+ - level: assertion level in case of an LSI.
+ Errors:
+ -E2BIG: Interrupt source number is out of range
+ -ENOMEM: Could not create a new source block
+ -EFAULT: Invalid user pointer for attr->addr.
+ -ENXIO: Could not allocate underlying HW interrupt
+
+ 3. KVM_DEV_XIVE_GRP_SOURCE_CONFIG (write only)
+ Configures source targeting
+ Attributes:
+ Interrupt source number (64-bit)
+ The kvm_device_attr.addr points to a __u64 value:
+ bits: | 63 .... 33 | 32 | 31 .. 3 | 2 .. 0
+ values: | eisn | mask | server | priority
+ - priority: 0-7 interrupt priority level
+ - server: CPU number chosen to handle the interrupt
+ - mask: mask flag (unused)
+ - eisn: Effective Interrupt Source Number
+ Errors:
+ -ENOENT: Unknown source number
+ -EINVAL: Not initialized source number
+ -EINVAL: Invalid priority
+ -EINVAL: Invalid CPU number.
+ -EFAULT: Invalid user pointer for attr->addr.
+ -ENXIO: CPU event queues not configured or configuration of the
+ underlying HW interrupt failed
+ -EBUSY: No CPU available to serve interrupt
+
+ 4. KVM_DEV_XIVE_GRP_EQ_CONFIG (read-write)
+ Configures an event queue of a CPU
+ Attributes:
+ EQ descriptor identifier (64-bit)
+ The EQ descriptor identifier is a tuple (server, priority) :
+ bits: | 63 .... 32 | 31 .. 3 | 2 .. 0
+ values: | unused | server | priority
+ The kvm_device_attr.addr points to :
+ struct kvm_ppc_xive_eq {
+ __u32 flags;
+ __u32 qshift;
+ __u64 qaddr;
+ __u32 qtoggle;
+ __u32 qindex;
+ __u8 pad[40];
+ };
+ - flags: queue flags
+ KVM_XIVE_EQ_ALWAYS_NOTIFY (required)
+ forces notification without using the coalescing mechanism
+ provided by the XIVE END ESBs.
+ - qshift: queue size (power of 2)
+ - qaddr: real address of queue
+ - qtoggle: current queue toggle bit
+ - qindex: current queue index
+ - pad: reserved for future use
+ Errors:
+ -ENOENT: Invalid CPU number
+ -EINVAL: Invalid priority
+ -EINVAL: Invalid flags
+ -EINVAL: Invalid queue size
+ -EINVAL: Invalid queue address
+ -EFAULT: Invalid user pointer for attr->addr.
+ -EIO: Configuration of the underlying HW failed
+
+ 5. KVM_DEV_XIVE_GRP_SOURCE_SYNC (write only)
+ Synchronize the source to flush event notifications
+ Attributes:
+ Interrupt source number (64-bit)
+ Errors:
+ -ENOENT: Unknown source number
+ -EINVAL: Not initialized source number
+
+* VCPU state
+
+ The XIVE IC maintains VP interrupt state in an internal structure
+ called the NVT. When a VP is not dispatched on a HW processor
+ thread, this structure can be updated by HW if the VP is the target
+ of an event notification.
+
+ It is important for migration to capture the cached IPB from the NVT
+ as it synthesizes the priorities of the pending interrupts. We
+ capture a bit more to report debug information.
+
+ KVM_REG_PPC_VP_STATE (2 * 64bits)
+ bits: | 63 .... 32 | 31 .... 0 |
+ values: | TIMA word0 | TIMA word1 |
+ bits: | 127 .......... 64 |
+ values: | unused |
+
+* Migration:
+
+ Saving the state of a VM using the XIVE native exploitation mode
+ should follow a specific sequence. When the VM is stopped :
+
+ 1. Mask all sources (PQ=01) to stop the flow of events.
+
+ 2. Sync the XIVE device with the KVM control KVM_DEV_XIVE_EQ_SYNC to
+ flush any in-flight event notification and to stabilize the EQs. At
+ this stage, the EQ pages are marked dirty to make sure they are
+ transferred in the migration sequence.
+
+ 3. Capture the state of the source targeting, the EQs configuration
+ and the state of thread interrupt context registers.
+
+ Restore is similar :
+
+ 1. Restore the EQ configuration. As targeting depends on it.
+ 2. Restore targeting
+ 3. Restore the thread interrupt contexts
+ 4. Restore the source states
+ 5. Let the vCPU run
diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
index 534e9baa4e1d..5d4330be200f 100644
--- a/Documentation/x86/mds.rst
+++ b/Documentation/x86/mds.rst
@@ -142,45 +142,13 @@ Mitigation points
mds_user_clear.
The mitigation is invoked in prepare_exit_to_usermode() which covers
- most of the kernel to user space transitions. There are a few exceptions
- which are not invoking prepare_exit_to_usermode() on return to user
- space. These exceptions use the paranoid exit code.
+ all but one of the kernel to user space transitions. The exception
+ is when we return from a Non Maskable Interrupt (NMI), which is
+ handled directly in do_nmi().
- - Non Maskable Interrupt (NMI):
-
- Access to sensible data like keys, credentials in the NMI context is
- mostly theoretical: The CPU can do prefetching or execute a
- misspeculated code path and thereby fetching data which might end up
- leaking through a buffer.
-
- But for mounting other attacks the kernel stack address of the task is
- already valuable information. So in full mitigation mode, the NMI is
- mitigated on the return from do_nmi() to provide almost complete
- coverage.
-
- - Double fault (#DF):
-
- A double fault is usually fatal, but the ESPFIX workaround, which can
- be triggered from user space through modify_ldt(2) is a recoverable
- double fault. #DF uses the paranoid exit path, so explicit mitigation
- in the double fault handler is required.
-
- - Machine Check Exception (#MC):
-
- Another corner case is a #MC which hits between the CPU buffer clear
- invocation and the actual return to user. As this still is in kernel
- space it takes the paranoid exit path which does not clear the CPU
- buffers. So the #MC handler repopulates the buffers to some
- extent. Machine checks are not reliably controllable and the window is
- extremly small so mitigation would just tick a checkbox that this
- theoretical corner case is covered. To keep the amount of special
- cases small, ignore #MC.
-
- - Debug Exception (#DB):
-
- This takes the paranoid exit path only when the INT1 breakpoint is in
- kernel space. #DB on a user space address takes the regular exit path,
- so no extra mitigation required.
+ (The reason that NMI is special is that prepare_exit_to_usermode() can
+ enable IRQs. In NMI context, NMIs are blocked, and we don't want to
+ enable IRQs with NMIs blocked.)
2. C-State transition
diff --git a/MAINTAINERS b/MAINTAINERS
index 005902ea1450..9cc6767e1b12 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1000,7 +1000,7 @@ F: include/linux/clk/analogbits*
ANDES ARCHITECTURE
M: Greentime Hu <green.hu@gmail.com>
M: Vincent Chen <deanbo422@gmail.com>
-T: git https://github.com/andestech/linux.git
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux.git
S: Supported
F: arch/nds32/
F: Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt
diff --git a/arch/Kconfig b/arch/Kconfig
index f11f0698b148..c47b328eada0 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -781,7 +781,7 @@ config COMPAT_OLD_SIGACTION
bool
config 64BIT_TIME
- def_bool ARCH_HAS_64BIT_TIME
+ def_bool y
help
This should be selected by all architectures that need to support
new system calls with a 64-bit time_t. This is relevant on all 32-bit
diff --git a/arch/alpha/include/asm/segment.h b/arch/alpha/include/asm/segment.h
deleted file mode 100644
index 0453d97daae7..000000000000
--- a/arch/alpha/include/asm/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ALPHA_SEGMENT_H
-#define __ALPHA_SEGMENT_H
-
-/* Only here because we have some old header files that expect it.. */
-
-#endif
diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c
index 4dbd4e415041..bbbd34586de0 100644
--- a/arch/alpha/kernel/smc37c669.c
+++ b/arch/alpha/kernel/smc37c669.c
@@ -10,7 +10,6 @@
#include <asm/hwrpb.h>
#include <asm/io.h>
-#include <asm/segment.h>
#if 0
# define DBG_DEVS(args) printk args
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c
index 733f08966fd2..71cd7aca38ce 100644
--- a/arch/alpha/kernel/smc37c93x.c
+++ b/arch/alpha/kernel/smc37c93x.c
@@ -11,7 +11,6 @@
#include <asm/hwrpb.h>
#include <asm/io.h>
-#include <asm/segment.h>
#define SMC_DEBUG 0
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 165f268beafc..9e7704e44f6d 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -467,3 +467,9 @@
535 common io_uring_setup sys_io_uring_setup
536 common io_uring_enter sys_io_uring_enter
537 common io_uring_register sys_io_uring_register
+538 common open_tree sys_open_tree
+539 common move_mount sys_move_mount
+540 common fsopen sys_fsopen
+541 common fsconfig sys_fsconfig
+542 common fsmount sys_fsmount
+543 common fspick sys_fspick
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index eabc3efa6c6d..526418543379 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -742,6 +742,7 @@ extern long arc_strnlen_user_noinline(const char __user *src, long n);
#endif
+#include <asm/segment.h>
#include <asm-generic/uaccess.h>
#endif
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 05ecc004de86..f863c6935d0e 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -116,8 +116,7 @@ endif
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
ifeq ($(CONFIG_THUMB2_KERNEL),y)
-AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
-CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
+CFLAGS_ISA :=-mthumb -Wa,-mimplicit-it=always $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig
index 8b0f7c4c3f09..7d26ca0b1302 100644
--- a/arch/arm/configs/mini2440_defconfig
+++ b/arch/arm/configs/mini2440_defconfig
@@ -152,7 +152,7 @@ CONFIG_SPI_S3C24XX=y
CONFIG_SPI_SPIDEV=y
CONFIG_GPIO_SYSFS=y
CONFIG_SENSORS_LM75=y
-CONFIG_THERMAL=m
+CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_FB=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index f6d24d762a7f..07ebbdce3645 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -387,7 +387,7 @@ CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_LM95245=m
CONFIG_SENSORS_NTC_THERMISTOR=m
-CONFIG_THERMAL=m
+CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_XILINX_WATCHDOG=m
CONFIG_SA1100_WATCHDOG=m
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 0b2ecc98e086..60de9d13181a 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -14,7 +14,6 @@ generic-y += msi.h
generic-y += parport.h
generic-y += preempt.h
generic-y += seccomp.h
-generic-y += segment.h
generic-y += serial.h
generic-y += simd.h
generic-y += trace_clock.h
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 99d9f630d6b6..1888c2d15da5 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -133,9 +133,11 @@ static inline void modify_domain(unsigned dom, unsigned type) { }
* instructions (inline assembly)
*/
#ifdef CONFIG_CPU_USE_DOMAINS
-#define TUSER(instr) #instr "t"
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr "t" #cond
#else
-#define TUSER(instr) #instr
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr #cond
#endif
#else /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 0a46676b4245..83c391b597d4 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -110,10 +110,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
preempt_disable();
__ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ " .syntax unified\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
" it eq @ explicit IT needed for the 2b label\n"
- "2: " TUSER(streq) " %3, [%4]\n"
+ "2: " TUSERCOND(str, eq) " %3, [%4]\n"
__futex_atomic_ex_table("%5")
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 8927cae7c966..efb0e2c0d84c 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -343,4 +343,6 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
}
}
+static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
+
#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 770d73257ad9..075e1921fdd9 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -19,6 +19,7 @@
#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__
+#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cputype.h>
@@ -53,6 +54,8 @@
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+static inline int kvm_arm_init_sve(void) { return 0; }
+
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -150,9 +153,13 @@ struct kvm_cpu_context {
u32 cp15[NR_CP15_REGS];
};
-typedef struct kvm_cpu_context kvm_cpu_context_t;
+struct kvm_host_data {
+ struct kvm_cpu_context host_ctxt;
+};
+
+typedef struct kvm_host_data kvm_host_data_t;
-static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
int cpu)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
@@ -182,7 +189,7 @@ struct kvm_vcpu_arch {
struct kvm_vcpu_fault_info fault;
/* Host FP context */
- kvm_cpu_context_t *host_cpu_context;
+ struct kvm_cpu_context *host_cpu_context;
/* VGIC state */
struct vgic_cpu vgic_cpu;
@@ -361,6 +368,9 @@ static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+
static inline void kvm_arm_vhe_guest_enter(void) {}
static inline void kvm_arm_vhe_guest_exit(void) {}
@@ -409,4 +419,14 @@ static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
return 0;
}
+static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
+{
+ return -EINVAL;
+}
+
+static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/limits.h b/arch/arm/include/asm/limits.h
deleted file mode 100644
index ab159371d786..000000000000
--- a/arch/arm/include/asm/limits.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_PIPE_H
-#define __ASM_PIPE_H
-
-#ifndef PAGE_SIZE
-#include <asm/page.h>
-#endif
-
-#define PIPE_BUF PAGE_SIZE
-
-#endif
-
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 57fe73ea0f72..5d06f75ffad4 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -135,8 +135,8 @@ static inline void prefetchw(const void *ptr)
__asm__ __volatile__(
".arch_extension mp\n"
__ALT_SMP_ASM(
- WASM(pldw) "\t%a0",
- WASM(pld) "\t%a0"
+ "pldw\t%a0",
+ "pld\t%a0"
)
:: "p" (ptr));
}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index dff49845eb87..d49ce8f48be3 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -112,10 +112,11 @@ static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
unsigned long tmp;
asm volatile(
+ " .syntax unified\n"
" sub %1, %3, #1\n"
" subs %1, %1, %0\n"
" addhs %1, %1, #1\n"
- " subhss %1, %1, %2\n"
+ " subshs %1, %1, %2\n"
" movlo %0, #0\n"
: "+r" (safe_ptr), "=&r" (tmp)
: "r" (size), "r" (current_thread_info()->addr_limit)
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 63511f638ce4..e6b8ffd934a1 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -12,6 +12,7 @@
#include <linux/clk/davinci.h>
#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 67ab71ba3ad3..77bc64d6e39b 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -18,6 +18,7 @@
#include <linux/cpufreq.h>
#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/mfd/da8xx-cfgchip.h>
#include <linux/platform_data/clk-da8xx-cfgchip.h>
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index b8dc674e06bc..036139fe0d0f 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -17,6 +17,7 @@
#include <linux/dma-contiguous.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/serial_8250.h>
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 4a482445b9a2..c6073326be2e 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 8e0a77315add..2f9ae6431bf5 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index cecc7ceb8d34..1b9e9a6192ef 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -14,6 +14,7 @@
#include <linux/clkdev.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index f33392f77a03..62ca952fe161 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 0d420a2bfe3e..d7b826d2695c 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_data/dma-mv_xor.h>
diff --git a/arch/arm/mach-mediatek/mediatek.c b/arch/arm/mach-mediatek/mediatek.c
index b6a81ba1ce32..5a9c016b3c6c 100644
--- a/arch/arm/mach-mediatek/mediatek.c
+++ b/arch/arm/mach-mediatek/mediatek.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
#include <linux/clk-provider.h>
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index f72e1e9f5fc5..dd762d1b083f 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index c67f92bfa30e..7bcb41137bbf 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/serial_8250.h>
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index e41cabc4dc2b..06ab03b93109 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/irqchip.h>
#include <linux/clk-provider.h>
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 6aba9ebf8041..7f634eaeaf10 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -15,6 +15,7 @@
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/platform_device.h>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 68dcd5f8d7c6..be0b42937888 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -182,21 +182,6 @@ int pfn_valid(unsigned long pfn)
EXPORT_SYMBOL(pfn_valid);
#endif
-#ifndef CONFIG_SPARSEMEM
-static void __init arm_memory_present(void)
-{
-}
-#else
-static void __init arm_memory_present(void)
-{
- struct memblock_region *reg;
-
- for_each_memblock(memory, reg)
- memory_present(0, memblock_region_memory_base_pfn(reg),
- memblock_region_memory_end_pfn(reg));
-}
-#endif
-
static bool arm_memblock_steal_permitted = true;
phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
@@ -293,7 +278,7 @@ void __init bootmem_init(void)
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
*/
- arm_memory_present();
+ memblocks_present();
/*
* sparse_init() needs the bootmem allocator up and running.
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 0393917eaa57..aaf479a9e92d 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -441,3 +441,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index f4efff9d3afb..fadf554d9391 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -10,12 +10,12 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector
ccflags-y += -DDISABLE_BRANCH_PROFILING
-VDSO_LDFLAGS := -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
-VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
-VDSO_LDFLAGS += -nostdlib -shared
-VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
-VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
+ldflags-y = -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+ -z max-page-size=4096 -z common-page-size=4096 \
+ -nostdlib -shared \
+ $(call ld-option, --hash-style=sysv) \
+ $(call ld-option, --build-id) \
+ -T
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
@@ -37,8 +37,8 @@ KCOV_INSTRUMENT := n
$(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file
-$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,vdsold)
+$(obj)/vdso.so.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,ld)
$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE
$(call if_changed,vdsomunge)
@@ -48,11 +48,6 @@ $(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
-# Actual build commands
-quiet_cmd_vdsold = VDSO $@
- cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
- -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
-
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 69a59a5d1143..4780eb7af842 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1341,6 +1341,7 @@ menu "ARMv8.3 architectural features"
config ARM64_PTR_AUTH
bool "Enable support for pointer authentication"
default y
+ depends on !KVM || ARM64_VHE
help
Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret
@@ -1354,8 +1355,9 @@ config ARM64_PTR_AUTH
context-switched along with the process.
The feature is detected at runtime. If the feature is not present in
- hardware it will not be advertised to userspace nor will it be
- enabled.
+ hardware it will not be advertised to userspace/KVM guest nor will it
+ be enabled. However, KVM guest also require VHE mode and hence
+ CONFIG_ARM64_VHE=y option to use this feature.
endmenu
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 9e977dedf193..1de6e05ce48b 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -17,7 +17,6 @@ generic-y += mmiowb.h
generic-y += msi.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += segment.h
generic-y += serial.h
generic-y += set_memory.h
generic-y += switch_to.h
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index dd1ad3950ef5..df62bbd33a9a 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -24,10 +24,13 @@
#ifndef __ASSEMBLY__
+#include <linux/bitmap.h>
#include <linux/build_bug.h>
+#include <linux/bug.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/stddef.h>
+#include <linux/types.h>
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */
@@ -56,7 +59,8 @@ extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_bind_task_to_cpu(void);
-extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state);
+extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
+ void *sve_state, unsigned int sve_vl);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_flush_cpu_state(void);
@@ -87,6 +91,29 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void);
extern int __ro_after_init sve_max_vl;
+extern int __ro_after_init sve_max_virtualisable_vl;
+extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+
+/*
+ * Helpers to translate bit indices in sve_vq_map to VQ values (and
+ * vice versa). This allows find_next_bit() to be used to find the
+ * _maximum_ VQ not exceeding a certain value.
+ */
+static inline unsigned int __vq_to_bit(unsigned int vq)
+{
+ return SVE_VQ_MAX - vq;
+}
+
+static inline unsigned int __bit_to_vq(unsigned int bit)
+{
+ return SVE_VQ_MAX - bit;
+}
+
+/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
+static inline bool sve_vq_available(unsigned int vq)
+{
+ return test_bit(__vq_to_bit(vq), sve_vq_map);
+}
#ifdef CONFIG_ARM64_SVE
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index f5b79e995f40..ff73f5462aca 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -108,7 +108,8 @@ extern u32 __kvm_get_mdcr_el2(void);
.endm
.macro get_host_ctxt reg, tmp
- hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
+ hyp_adr_this_cpu \reg, kvm_host_data, \tmp
+ add \reg, \reg, #HOST_DATA_CONTEXT
.endm
.macro get_vcpu_ptr vcpu, ctxt
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index d3842791e1c4..613427fafff9 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -98,6 +98,22 @@ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TWE;
}
+static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
+}
+
+static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
+}
+
+static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_has_ptrauth(vcpu))
+ vcpu_ptrauth_disable(vcpu);
+}
+
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index a01fe087e022..2a8d3f8ca22c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -22,9 +22,13 @@
#ifndef __ARM64_KVM_HOST_H__
#define __ARM64_KVM_HOST_H__
+#include <linux/bitmap.h>
#include <linux/types.h>
+#include <linux/jump_label.h>
#include <linux/kvm_types.h>
+#include <linux/percpu.h>
#include <asm/arch_gicv3.h>
+#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/fpsimd.h>
@@ -45,7 +49,7 @@
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#define KVM_VCPU_MAX_FEATURES 4
+#define KVM_VCPU_MAX_FEATURES 7
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
@@ -54,8 +58,12 @@
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+extern unsigned int kvm_sve_max_vl;
+int kvm_arm_init_sve(void);
+
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
@@ -117,6 +125,7 @@ enum vcpu_sysreg {
SCTLR_EL1, /* System Control Register */
ACTLR_EL1, /* Auxiliary Control Register */
CPACR_EL1, /* Coprocessor Access Control */
+ ZCR_EL1, /* SVE Control */
TTBR0_EL1, /* Translation Table Base Register 0 */
TTBR1_EL1, /* Translation Table Base Register 1 */
TCR_EL1, /* Translation Control Register */
@@ -152,6 +161,18 @@ enum vcpu_sysreg {
PMSWINC_EL0, /* Software Increment Register */
PMUSERENR_EL0, /* User Enable Register */
+ /* Pointer Authentication Registers in a strict increasing order. */
+ APIAKEYLO_EL1,
+ APIAKEYHI_EL1,
+ APIBKEYLO_EL1,
+ APIBKEYHI_EL1,
+ APDAKEYLO_EL1,
+ APDAKEYHI_EL1,
+ APDBKEYLO_EL1,
+ APDBKEYHI_EL1,
+ APGAKEYLO_EL1,
+ APGAKEYHI_EL1,
+
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */
@@ -212,7 +233,17 @@ struct kvm_cpu_context {
struct kvm_vcpu *__hyp_running_vcpu;
};
-typedef struct kvm_cpu_context kvm_cpu_context_t;
+struct kvm_pmu_events {
+ u32 events_host;
+ u32 events_guest;
+};
+
+struct kvm_host_data {
+ struct kvm_cpu_context host_ctxt;
+ struct kvm_pmu_events pmu_events;
+};
+
+typedef struct kvm_host_data kvm_host_data_t;
struct vcpu_reset_state {
unsigned long pc;
@@ -223,6 +254,8 @@ struct vcpu_reset_state {
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
+ void *sve_state;
+ unsigned int sve_max_vl;
/* HYP configuration */
u64 hcr_el2;
@@ -255,7 +288,7 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch external_debug_state;
/* Pointer to host CPU context */
- kvm_cpu_context_t *host_cpu_context;
+ struct kvm_cpu_context *host_cpu_context;
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
@@ -318,12 +351,40 @@ struct kvm_vcpu_arch {
bool sysregs_loaded_on_cpu;
};
+/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
+#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
+ sve_ffr_offset((vcpu)->arch.sve_max_vl)))
+
+#define vcpu_sve_state_size(vcpu) ({ \
+ size_t __size_ret; \
+ unsigned int __vcpu_vq; \
+ \
+ if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
+ __size_ret = 0; \
+ } else { \
+ __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \
+ __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
+ } \
+ \
+ __size_ret; \
+})
+
/* vcpu_arch flags field values: */
#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
+#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
+#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
+#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
+
+#define vcpu_has_sve(vcpu) (system_supports_sve() && \
+ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
+
+#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
+ system_supports_generic_auth()) && \
+ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
@@ -432,9 +493,9 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
-static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
int cpu)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
@@ -452,8 +513,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
*/
- u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) -
- (u64)kvm_ksym_ref(kvm_host_cpu_state));
+ u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
+ (u64)kvm_ksym_ref(kvm_host_data));
/*
* Call initialization code, and switch to the full blown HYP code.
@@ -491,9 +552,10 @@ static inline bool kvm_arch_requires_vhe(void)
return false;
}
+void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
+
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
@@ -516,11 +578,28 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
+static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+{
+ return (!has_vhe() && attr->exclude_host);
+}
+
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_run_map_fp(vcpu);
}
+
+void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
+void kvm_clr_pmu_events(u32 clr);
+
+void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt);
+bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt);
+
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+#else
+static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
#endif
static inline void kvm_arm_vhe_guest_enter(void)
@@ -594,4 +673,10 @@ void kvm_arch_free_vm(struct kvm *kvm);
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
+int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
+bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
+
+#define kvm_arm_vcpu_sve_finalized(vcpu) \
+ ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index c3060833b7a5..09fe8bd15f6e 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -149,7 +149,6 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
-bool __fpsimd_enabled(void);
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
void deactivate_traps_vhe_put(void);
diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h
new file mode 100644
index 000000000000..6301813dcace
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_ptrauth.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* arch/arm64/include/asm/kvm_ptrauth.h: Guest/host ptrauth save/restore
+ * Copyright 2019 Arm Limited
+ * Authors: Mark Rutland <mark.rutland@arm.com>
+ * Amit Daniel Kachhap <amit.kachhap@arm.com>
+ */
+
+#ifndef __ASM_KVM_PTRAUTH_H
+#define __ASM_KVM_PTRAUTH_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/sysreg.h>
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+
+#define PTRAUTH_REG_OFFSET(x) (x - CPU_APIAKEYLO_EL1)
+
+/*
+ * CPU_AP*_EL1 values exceed immediate offset range (512) for stp
+ * instruction so below macros takes CPU_APIAKEYLO_EL1 as base and
+ * calculates the offset of the keys from this base to avoid an extra add
+ * instruction. These macros assumes the keys offsets follow the order of
+ * the sysreg enum in kvm_host.h.
+ */
+.macro ptrauth_save_state base, reg1, reg2
+ mrs_s \reg1, SYS_APIAKEYLO_EL1
+ mrs_s \reg2, SYS_APIAKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
+ mrs_s \reg1, SYS_APIBKEYLO_EL1
+ mrs_s \reg2, SYS_APIBKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
+ mrs_s \reg1, SYS_APDAKEYLO_EL1
+ mrs_s \reg2, SYS_APDAKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
+ mrs_s \reg1, SYS_APDBKEYLO_EL1
+ mrs_s \reg2, SYS_APDBKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
+ mrs_s \reg1, SYS_APGAKEYLO_EL1
+ mrs_s \reg2, SYS_APGAKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
+.endm
+
+.macro ptrauth_restore_state base, reg1, reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
+ msr_s SYS_APIAKEYLO_EL1, \reg1
+ msr_s SYS_APIAKEYHI_EL1, \reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
+ msr_s SYS_APIBKEYLO_EL1, \reg1
+ msr_s SYS_APIBKEYHI_EL1, \reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
+ msr_s SYS_APDAKEYLO_EL1, \reg1
+ msr_s SYS_APDAKEYHI_EL1, \reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
+ msr_s SYS_APDBKEYLO_EL1, \reg1
+ msr_s SYS_APDBKEYHI_EL1, \reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
+ msr_s SYS_APGAKEYLO_EL1, \reg1
+ msr_s SYS_APGAKEYHI_EL1, \reg2
+.endm
+
+/*
+ * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will
+ * check for the presence of one of the cpufeature flag
+ * ARM64_HAS_ADDRESS_AUTH_ARCH or ARM64_HAS_ADDRESS_AUTH_IMP_DEF and
+ * then proceed ahead with the save/restore of Pointer Authentication
+ * key registers.
+ */
+.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
+alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH
+ b 1000f
+alternative_else_nop_endif
+alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF
+ b 1001f
+alternative_else_nop_endif
+1000:
+ ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)]
+ and \reg1, \reg1, #(HCR_API | HCR_APK)
+ cbz \reg1, 1001f
+ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
+ ptrauth_restore_state \reg1, \reg2, \reg3
+1001:
+.endm
+
+.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
+alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH
+ b 2000f
+alternative_else_nop_endif
+alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF
+ b 2001f
+alternative_else_nop_endif
+2000:
+ ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)]
+ and \reg1, \reg1, #(HCR_API | HCR_APK)
+ cbz \reg1, 2001f
+ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
+ ptrauth_save_state \reg1, \reg2, \reg3
+ add \reg1, \h_ctxt, #CPU_APIAKEYLO_EL1
+ ptrauth_restore_state \reg1, \reg2, \reg3
+ isb
+2001:
+.endm
+
+#else /* !CONFIG_ARM64_PTR_AUTH */
+.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
+.endm
+.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
+.endm
+#endif /* CONFIG_ARM64_PTR_AUTH */
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_KVM_PTRAUTH_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 3f7b917e8f3a..902d75b60914 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -454,6 +454,9 @@
#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6)
#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
+/* VHE encodings for architectural EL0/1 system registers */
+#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
+
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_DSSBS (_BITUL(44))
#define SCTLR_ELx_ENIA (_BITUL(31))
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index f2a83ff6b73c..70e6882853c0 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 428
+#define __NR_compat_syscalls 434
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 23f1a44acada..c39e90600bb3 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -874,6 +874,18 @@ __SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
#define __NR_io_uring_register 427
__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
+#define __NR_open_tree 428
+__SYSCALL(__NR_open_tree, sys_open_tree)
+#define __NR_move_mount 429
+__SYSCALL(__NR_move_mount, sys_move_mount)
+#define __NR_fsopen 430
+__SYSCALL(__NR_fsopen, sys_fsopen)
+#define __NR_fsconfig 431
+__SYSCALL(__NR_fsconfig, sys_fsconfig)
+#define __NR_fsmount 432
+__SYSCALL(__NR_fsmount, sys_fsmount)
+#define __NR_fspick 433
+__SYSCALL(__NR_fspick, sys_fspick)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 97c3478ee6e7..7b7ac0f6cec9 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -35,6 +35,7 @@
#include <linux/psci.h>
#include <linux/types.h>
#include <asm/ptrace.h>
+#include <asm/sve_context.h>
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
@@ -102,6 +103,9 @@ struct kvm_regs {
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */
+#define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */
+#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
+#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
struct kvm_vcpu_init {
__u32 target;
@@ -226,6 +230,45 @@ struct kvm_vcpu_events {
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+/* SVE registers */
+#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
+
+/* Z- and P-regs occupy blocks at the following offsets within this range: */
+#define KVM_REG_ARM64_SVE_ZREG_BASE 0
+#define KVM_REG_ARM64_SVE_PREG_BASE 0x400
+#define KVM_REG_ARM64_SVE_FFR_BASE 0x600
+
+#define KVM_ARM64_SVE_NUM_ZREGS __SVE_NUM_ZREGS
+#define KVM_ARM64_SVE_NUM_PREGS __SVE_NUM_PREGS
+
+#define KVM_ARM64_SVE_MAX_SLICES 32
+
+#define KVM_REG_ARM64_SVE_ZREG(n, i) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_ZREG_BASE | \
+ KVM_REG_SIZE_U2048 | \
+ (((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) | \
+ ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+
+#define KVM_REG_ARM64_SVE_PREG(n, i) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_PREG_BASE | \
+ KVM_REG_SIZE_U256 | \
+ (((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) | \
+ ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+
+#define KVM_REG_ARM64_SVE_FFR(i) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_FFR_BASE | \
+ KVM_REG_SIZE_U256 | \
+ ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+
+#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
+#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
+
+/* Vector lengths pseudo-register: */
+#define KVM_REG_ARM64_SVE_VLS (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
+ KVM_REG_SIZE_U512 | 0xffff)
+#define KVM_ARM64_SVE_VLS_WORDS \
+ ((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1)
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index e10e2a5d9ddc..947e39896e28 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -125,9 +125,16 @@ int main(void)
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
+ DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
+ DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
+ DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
+ DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1]));
+ DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
+ DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
#endif
#ifdef CONFIG_CPU_PM
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2b807f129e60..ca27e08e3d8a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1913,7 +1913,7 @@ static void verify_sve_features(void)
unsigned int len = zcr & ZCR_ELx_LEN_MASK;
if (len < safe_len || sve_verify_vq_map()) {
- pr_crit("CPU%d: SVE: required vector length(s) missing\n",
+ pr_crit("CPU%d: SVE: vector length support mismatch\n",
smp_processor_id());
cpu_die_early();
}
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 735cf1f8b109..a38bf74bcca8 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -18,6 +18,7 @@
*/
#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/bottom_half.h>
#include <linux/bug.h>
#include <linux/cache.h>
@@ -48,6 +49,7 @@
#include <asm/sigcontext.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
+#include <asm/virt.h>
#define FPEXC_IOF (1 << 0)
#define FPEXC_DZF (1 << 1)
@@ -119,6 +121,8 @@
*/
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
+ void *sve_state;
+ unsigned int sve_vl;
};
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
@@ -130,14 +134,23 @@ static int sve_default_vl = -1;
/* Maximum supported vector length across all CPUs (initially poisoned) */
int __ro_after_init sve_max_vl = SVE_VL_MIN;
-/* Set of available vector lengths, as vq_to_bit(vq): */
-static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
+
+/*
+ * Set of available vector lengths,
+ * where length vq encoded as bit __vq_to_bit(vq):
+ */
+__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+/* Set of vector lengths present on at least one cpu: */
+static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
+
static void __percpu *efi_sve_state;
#else /* ! CONFIG_ARM64_SVE */
/* Dummy declaration for code that will be optimised out: */
extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
extern void __percpu *efi_sve_state;
#endif /* ! CONFIG_ARM64_SVE */
@@ -235,14 +248,15 @@ static void task_fpsimd_load(void)
*/
void fpsimd_save(void)
{
- struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
+ struct fpsimd_last_state_struct const *last =
+ this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
WARN_ON(!in_softirq() && !irqs_disabled());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
- if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
+ if (WARN_ON(sve_get_vl() != last->sve_vl)) {
/*
* Can't save the user regs, so current would
* re-enter user with corrupt state.
@@ -252,32 +266,15 @@ void fpsimd_save(void)
return;
}
- sve_save_state(sve_pffr(&current->thread), &st->fpsr);
+ sve_save_state((char *)last->sve_state +
+ sve_ffr_offset(last->sve_vl),
+ &last->st->fpsr);
} else
- fpsimd_save_state(st);
+ fpsimd_save_state(last->st);
}
}
/*
- * Helpers to translate bit indices in sve_vq_map to VQ values (and
- * vice versa). This allows find_next_bit() to be used to find the
- * _maximum_ VQ not exceeding a certain value.
- */
-
-static unsigned int vq_to_bit(unsigned int vq)
-{
- return SVE_VQ_MAX - vq;
-}
-
-static unsigned int bit_to_vq(unsigned int bit)
-{
- if (WARN_ON(bit >= SVE_VQ_MAX))
- bit = SVE_VQ_MAX - 1;
-
- return SVE_VQ_MAX - bit;
-}
-
-/*
* All vector length selection from userspace comes through here.
* We're on a slow path, so some sanity-checks are included.
* If things go wrong there's a bug somewhere, but try to fall back to a
@@ -298,8 +295,8 @@ static unsigned int find_supported_vector_length(unsigned int vl)
vl = max_vl;
bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
- vq_to_bit(sve_vq_from_vl(vl)));
- return sve_vl_from_vq(bit_to_vq(bit));
+ __vq_to_bit(sve_vq_from_vl(vl)));
+ return sve_vl_from_vq(__bit_to_vq(bit));
}
#ifdef CONFIG_SYSCTL
@@ -550,7 +547,6 @@ int sve_set_vector_length(struct task_struct *task,
local_bh_disable();
fpsimd_save();
- set_thread_flag(TIF_FOREIGN_FPSTATE);
}
fpsimd_flush_task_state(task);
@@ -624,12 +620,6 @@ int sve_get_current_vl(void)
return sve_prctl_status(0);
}
-/*
- * Bitmap for temporary storage of the per-CPU set of supported vector lengths
- * during secondary boot.
- */
-static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX);
-
static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
{
unsigned int vq, vl;
@@ -644,40 +634,82 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
vl = sve_get_vl();
vq = sve_vq_from_vl(vl); /* skip intervening lengths */
- set_bit(vq_to_bit(vq), map);
+ set_bit(__vq_to_bit(vq), map);
}
}
+/*
+ * Initialise the set of known supported VQs for the boot CPU.
+ * This is called during kernel boot, before secondary CPUs are brought up.
+ */
void __init sve_init_vq_map(void)
{
sve_probe_vqs(sve_vq_map);
+ bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
}
/*
* If we haven't committed to the set of supported VQs yet, filter out
* those not supported by the current CPU.
+ * This function is called during the bring-up of early secondary CPUs only.
*/
void sve_update_vq_map(void)
{
- sve_probe_vqs(sve_secondary_vq_map);
- bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX);
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+
+ sve_probe_vqs(tmp_map);
+ bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
+ bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
}
-/* Check whether the current CPU supports all VQs in the committed set */
+/*
+ * Check whether the current CPU supports all VQs in the committed set.
+ * This function is called during the bring-up of late secondary CPUs only.
+ */
int sve_verify_vq_map(void)
{
- int ret = 0;
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
- sve_probe_vqs(sve_secondary_vq_map);
- bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
- SVE_VQ_MAX);
- if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
+ sve_probe_vqs(tmp_map);
+
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
smp_processor_id());
- ret = -EINVAL;
+ return -EINVAL;
}
- return ret;
+ if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
+ return 0;
+
+ /*
+ * For KVM, it is necessary to ensure that this CPU doesn't
+ * support any vector length that guests may have probed as
+ * unsupported.
+ */
+
+ /* Recover the set of supported VQs: */
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ /* Find VQs supported that are not globally supported: */
+ bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
+
+ /* Find the lowest such VQ, if any: */
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ return 0; /* no mismatches */
+
+ /*
+ * Mismatches above sve_max_virtualisable_vl are fine, since
+ * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
+ */
+ if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
+ pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
+ smp_processor_id());
+ return -EINVAL;
+ }
+
+ return 0;
}
static void __init sve_efi_setup(void)
@@ -744,6 +776,8 @@ u64 read_zcr_features(void)
void __init sve_setup(void)
{
u64 zcr;
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
if (!system_supports_sve())
return;
@@ -753,8 +787,8 @@ void __init sve_setup(void)
* so sve_vq_map must have at least SVE_VQ_MIN set.
* If something went wrong, at least try to patch it up:
*/
- if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
- set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
+ if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
+ set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
@@ -772,11 +806,31 @@ void __init sve_setup(void)
*/
sve_default_vl = find_supported_vector_length(64);
+ bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
+ SVE_VQ_MAX);
+
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ /* No non-virtualisable VLs found */
+ sve_max_virtualisable_vl = SVE_VQ_MAX;
+ else if (WARN_ON(b == SVE_VQ_MAX - 1))
+ /* No virtualisable VLs? This is architecturally forbidden. */
+ sve_max_virtualisable_vl = SVE_VQ_MIN;
+ else /* b + 1 < SVE_VQ_MAX */
+ sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
+
+ if (sve_max_virtualisable_vl > sve_max_vl)
+ sve_max_virtualisable_vl = sve_max_vl;
+
pr_info("SVE: maximum available vector length %u bytes per vector\n",
sve_max_vl);
pr_info("SVE: default vector length %u bytes per vector\n",
sve_default_vl);
+ /* KVM decides whether to support mismatched systems. Just warn here: */
+ if (sve_max_virtualisable_vl < sve_max_vl)
+ pr_warn("SVE: unvirtualisable vector lengths present\n");
+
sve_efi_setup();
}
@@ -816,12 +870,11 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
local_bh_disable();
fpsimd_save();
- fpsimd_to_sve(current);
/* Force ret_to_user to reload the registers: */
fpsimd_flush_task_state(current);
- set_thread_flag(TIF_FOREIGN_FPSTATE);
+ fpsimd_to_sve(current);
if (test_and_set_thread_flag(TIF_SVE))
WARN_ON(1); /* SVE access shouldn't have trapped */
@@ -894,9 +947,9 @@ void fpsimd_flush_thread(void)
local_bh_disable();
+ fpsimd_flush_task_state(current);
memset(&current->thread.uw.fpsimd_state, 0,
sizeof(current->thread.uw.fpsimd_state));
- fpsimd_flush_task_state(current);
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
@@ -933,8 +986,6 @@ void fpsimd_flush_thread(void)
current->thread.sve_vl_onexec = 0;
}
- set_thread_flag(TIF_FOREIGN_FPSTATE);
-
local_bh_enable();
}
@@ -974,6 +1025,8 @@ void fpsimd_bind_task_to_cpu(void)
this_cpu_ptr(&fpsimd_last_state);
last->st = &current->thread.uw.fpsimd_state;
+ last->sve_state = current->thread.sve_state;
+ last->sve_vl = current->thread.sve_vl;
current->thread.fpsimd_cpu = smp_processor_id();
if (system_supports_sve()) {
@@ -987,7 +1040,8 @@ void fpsimd_bind_task_to_cpu(void)
}
}
-void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
+void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ unsigned int sve_vl)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
@@ -995,6 +1049,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
+ last->sve_state = sve_state;
+ last->sve_vl = sve_vl;
}
/*
@@ -1043,12 +1099,29 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
/*
* Invalidate live CPU copies of task t's FPSIMD state
+ *
+ * This function may be called with preemption enabled. The barrier()
+ * ensures that the assignment to fpsimd_cpu is visible to any
+ * preemption/softirq that could race with set_tsk_thread_flag(), so
+ * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
+ *
+ * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
+ * subsequent code.
*/
void fpsimd_flush_task_state(struct task_struct *t)
{
t->thread.fpsimd_cpu = NR_CPUS;
+
+ barrier();
+ set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
+
+ barrier();
}
+/*
+ * Invalidate any task's FPSIMD state that is present on this cpu.
+ * This function must be called with softirqs disabled.
+ */
void fpsimd_flush_cpu_state(void)
{
__this_cpu_write(fpsimd_last_state.st, NULL);
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 6164d389eed6..348d12eec566 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/clocksource.h>
+#include <linux/kvm_host.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
@@ -528,12 +529,21 @@ static inline int armv8pmu_enable_counter(int idx)
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
{
+ struct perf_event_attr *attr = &event->attr;
int idx = event->hw.idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
- armv8pmu_enable_counter(idx);
if (armv8pmu_event_is_chained(event))
- armv8pmu_enable_counter(idx - 1);
- isb();
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_set_pmu_events(counter_bits, attr);
+
+ /* We rely on the hypervisor switch code to enable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ armv8pmu_enable_counter(idx);
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_enable_counter(idx - 1);
+ }
}
static inline int armv8pmu_disable_counter(int idx)
@@ -546,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx)
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
+ struct perf_event_attr *attr = &event->attr;
int idx = hwc->idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
if (armv8pmu_event_is_chained(event))
- armv8pmu_disable_counter(idx - 1);
- armv8pmu_disable_counter(idx);
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_clr_pmu_events(counter_bits);
+
+ /* We rely on the hypervisor switch code to disable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_disable_counter(idx - 1);
+ armv8pmu_disable_counter(idx);
+ }
}
static inline int armv8pmu_enable_intens(int idx)
@@ -827,14 +847,23 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
* with other architectures (x86 and Power).
*/
if (is_kernel_in_hyp_mode()) {
- if (!attr->exclude_kernel)
+ if (!attr->exclude_kernel && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
- } else {
- if (attr->exclude_kernel)
+ if (attr->exclude_guest)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (!attr->exclude_hv)
+ if (attr->exclude_host)
+ config_base |= ARMV8_PMU_EXCLUDE_EL0;
+ } else {
+ if (!attr->exclude_hv && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
}
+
+ /*
+ * Filter out !VHE kernels and guest kernels
+ */
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
@@ -864,6 +893,9 @@ static void armv8pmu_reset(void *info)
armv8pmu_disable_intens(idx);
}
+ /* Clear the counters we flip at guest entry/exit */
+ kvm_clr_pmu_events(U32_MAX);
+
/*
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 867a7cea70e5..a9b0485df074 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -296,11 +296,6 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
*/
fpsimd_flush_task_state(current);
- barrier();
- /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
-
- set_thread_flag(TIF_FOREIGN_FPSTATE);
- barrier();
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
sve_alloc(current);
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 690e033a91c0..3ac1a64d2fb9 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index aac7808ce216..6e3c9c8b2df9 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kvm_host.h>
+#include <asm/fpsimd.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_host.h>
#include <asm/kvm_mmu.h>
@@ -85,9 +86,12 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
- fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs,
+ vcpu->arch.sve_state,
+ vcpu->arch.sve_max_vl);
+
clear_thread_flag(TIF_FOREIGN_FPSTATE);
- clear_thread_flag(TIF_SVE);
+ update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
}
}
@@ -100,14 +104,21 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{
unsigned long flags;
+ bool host_has_sve = system_supports_sve();
+ bool guest_has_sve = vcpu_has_sve(vcpu);
local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ u64 *guest_zcr = &vcpu->arch.ctxt.sys_regs[ZCR_EL1];
+
/* Clean guest FP state to memory and invalidate cpu view */
fpsimd_save();
fpsimd_flush_cpu_state();
- } else if (system_supports_sve()) {
+
+ if (guest_has_sve)
+ *guest_zcr = read_sysreg_s(SYS_ZCR_EL12);
+ } else if (host_has_sve) {
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index dd436a50fce7..3ae2f82fca46 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -19,18 +19,25 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bits.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/nospec.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <linux/uaccess.h>
+#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_host.h>
+#include <asm/sigcontext.h>
#include "trace.h"
@@ -52,12 +59,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
+static bool core_reg_offset_is_vreg(u64 off)
+{
+ return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
+ off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
+}
+
static u64 core_reg_offset_from_id(u64 id)
{
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
-static int validate_core_offset(const struct kvm_one_reg *reg)
+static int validate_core_offset(const struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id);
int size;
@@ -89,11 +103,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg)
return -EINVAL;
}
- if (KVM_REG_SIZE(reg->id) == size &&
- IS_ALIGNED(off, size / sizeof(__u32)))
- return 0;
+ if (KVM_REG_SIZE(reg->id) != size ||
+ !IS_ALIGNED(off, size / sizeof(__u32)))
+ return -EINVAL;
- return -EINVAL;
+ /*
+ * The KVM_REG_ARM64_SVE regs must be used instead of
+ * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+ * SVE-enabled vcpus:
+ */
+ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
+ return -EINVAL;
+
+ return 0;
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@@ -115,7 +137,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(reg))
+ if (validate_core_offset(vcpu, reg))
return -EINVAL;
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
@@ -140,7 +162,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(reg))
+ if (validate_core_offset(vcpu, reg))
return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
@@ -183,6 +205,239 @@ out:
return err;
}
+#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
+#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
+
+static bool vq_present(
+ const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS],
+ unsigned int vq)
+{
+ return (*vqs)[vq_word(vq)] & vq_mask(vq);
+}
+
+static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ unsigned int max_vq, vq;
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
+
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
+ if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
+ return -EINVAL;
+
+ memset(vqs, 0, sizeof(vqs));
+
+ max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
+ if (sve_vq_available(vq))
+ vqs[vq_word(vq)] |= vq_mask(vq);
+
+ if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ unsigned int max_vq, vq;
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
+
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
+ if (kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM; /* too late! */
+
+ if (WARN_ON(vcpu->arch.sve_state))
+ return -EINVAL;
+
+ if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
+ return -EFAULT;
+
+ max_vq = 0;
+ for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
+ if (vq_present(&vqs, vq))
+ max_vq = vq;
+
+ if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
+ return -EINVAL;
+
+ /*
+ * Vector lengths supported by the host can't currently be
+ * hidden from the guest individually: instead we can only set a
+ * maxmium via ZCR_EL2.LEN. So, make sure the available vector
+ * lengths match the set requested exactly up to the requested
+ * maximum:
+ */
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
+ if (vq_present(&vqs, vq) != sve_vq_available(vq))
+ return -EINVAL;
+
+ /* Can't run with no vector lengths at all: */
+ if (max_vq < SVE_VQ_MIN)
+ return -EINVAL;
+
+ /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
+ vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
+
+ return 0;
+}
+
+#define SVE_REG_SLICE_SHIFT 0
+#define SVE_REG_SLICE_BITS 5
+#define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
+#define SVE_REG_ID_BITS 5
+
+#define SVE_REG_SLICE_MASK \
+ GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
+ SVE_REG_SLICE_SHIFT)
+#define SVE_REG_ID_MASK \
+ GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
+
+#define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
+
+#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
+#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
+
+/*
+ * Number of register slices required to cover each whole SVE register.
+ * NOTE: Only the first slice every exists, for now.
+ * If you are tempted to modify this, you must also rework sve_reg_to_region()
+ * to match:
+ */
+#define vcpu_sve_slices(vcpu) 1
+
+/* Bounds of a single SVE register slice within vcpu->arch.sve_state */
+struct sve_state_reg_region {
+ unsigned int koffset; /* offset into sve_state in kernel memory */
+ unsigned int klen; /* length in kernel memory */
+ unsigned int upad; /* extra trailing padding in user memory */
+};
+
+/*
+ * Validate SVE register ID and get sanitised bounds for user/kernel SVE
+ * register copy
+ */
+static int sve_reg_to_region(struct sve_state_reg_region *region,
+ struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ /* reg ID ranges for Z- registers */
+ const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
+ const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
+ SVE_NUM_SLICES - 1);
+
+ /* reg ID ranges for P- registers and FFR (which are contiguous) */
+ const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
+ const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
+
+ unsigned int vq;
+ unsigned int reg_num;
+
+ unsigned int reqoffset, reqlen; /* User-requested offset and length */
+ unsigned int maxlen; /* Maxmimum permitted length */
+
+ size_t sve_state_size;
+
+ const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
+ SVE_NUM_SLICES - 1);
+
+ /* Verify that the P-regs and FFR really do have contiguous IDs: */
+ BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
+
+ /* Verify that we match the UAPI header: */
+ BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
+
+ reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
+
+ if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
+ reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
+ SVE_SIG_REGS_OFFSET;
+ reqlen = KVM_SVE_ZREG_SIZE;
+ maxlen = SVE_SIG_ZREG_SIZE(vq);
+ } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
+ reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
+ SVE_SIG_REGS_OFFSET;
+ reqlen = KVM_SVE_PREG_SIZE;
+ maxlen = SVE_SIG_PREG_SIZE(vq);
+ } else {
+ return -EINVAL;
+ }
+
+ sve_state_size = vcpu_sve_state_size(vcpu);
+ if (WARN_ON(!sve_state_size))
+ return -EINVAL;
+
+ region->koffset = array_index_nospec(reqoffset, sve_state_size);
+ region->klen = min(maxlen, reqlen);
+ region->upad = reqlen - region->klen;
+
+ return 0;
+}
+
+static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ int ret;
+ struct sve_state_reg_region region;
+ char __user *uptr = (char __user *)reg->addr;
+
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
+ return get_sve_vls(vcpu, reg);
+
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(&region, vcpu, reg);
+ if (ret)
+ return ret;
+
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
+ region.klen) ||
+ clear_user(uptr + region.klen, region.upad))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ int ret;
+ struct sve_state_reg_region region;
+ const char __user *uptr = (const char __user *)reg->addr;
+
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
+ return set_sve_vls(vcpu, reg);
+
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(&region, vcpu, reg);
+ if (ret)
+ return ret;
+
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
+ region.klen))
+ return -EFAULT;
+
+ return 0;
+}
+
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
@@ -193,9 +448,37 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
-static unsigned long num_core_regs(void)
+static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int i;
+ int n = 0;
+ const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
+
+ for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
+ /*
+ * The KVM_REG_ARM64_SVE regs must be used instead of
+ * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+ * SVE-enabled vcpus:
+ */
+ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
+ continue;
+
+ if (uindices) {
+ if (put_user(core_reg | i, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
{
- return sizeof(struct kvm_regs) / sizeof(__u32);
+ return copy_core_reg_indices(vcpu, NULL);
}
/**
@@ -251,6 +534,67 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
+static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
+{
+ const unsigned int slices = vcpu_sve_slices(vcpu);
+
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ /* Policed by KVM_GET_REG_LIST: */
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+
+ return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
+ + 1; /* KVM_REG_ARM64_SVE_VLS */
+}
+
+static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ const unsigned int slices = vcpu_sve_slices(vcpu);
+ u64 reg;
+ unsigned int i, n;
+ int num_regs = 0;
+
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ /* Policed by KVM_GET_REG_LIST: */
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+
+ /*
+ * Enumerate this first, so that userspace can save/restore in
+ * the order reported by KVM_GET_REG_LIST:
+ */
+ reg = KVM_REG_ARM64_SVE_VLS;
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ ++num_regs;
+
+ for (i = 0; i < slices; i++) {
+ for (n = 0; n < SVE_NUM_ZREGS; n++) {
+ reg = KVM_REG_ARM64_SVE_ZREG(n, i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ for (n = 0; n < SVE_NUM_PREGS; n++) {
+ reg = KVM_REG_ARM64_SVE_PREG(n, i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ reg = KVM_REG_ARM64_SVE_FFR(i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ return num_regs;
+}
+
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
@@ -258,8 +602,15 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
- return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
- + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
+ unsigned long res = 0;
+
+ res += num_core_regs(vcpu);
+ res += num_sve_regs(vcpu);
+ res += kvm_arm_num_sys_reg_descs(vcpu);
+ res += kvm_arm_get_fw_num_regs(vcpu);
+ res += NUM_TIMER_REGS;
+
+ return res;
}
/**
@@ -269,23 +620,25 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- unsigned int i;
- const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
int ret;
- for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
- if (put_user(core_reg | i, uindices))
- return -EFAULT;
- uindices++;
- }
+ ret = copy_core_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_sve_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
- if (ret)
+ if (ret < 0)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
- if (ret)
+ if (ret < 0)
return ret;
uindices += NUM_TIMER_REGS;
@@ -298,12 +651,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
- /* Register group 16 means we want a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return get_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_get_fw_reg(vcpu, reg);
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
+ case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
+ case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
+ }
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@@ -317,12 +669,11 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
- /* Register group 16 means we set a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return set_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_set_fw_reg(vcpu, reg);
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
+ case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
+ case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
+ }
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 0b7983442071..516aead3c2a9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -173,20 +173,40 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+#define __ptrauth_save_key(regs, key) \
+({ \
+ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
+ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
+})
+
+/*
+ * Handle the guest trying to use a ptrauth instruction, or trying to access a
+ * ptrauth register.
+ */
+void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *ctxt;
+
+ if (vcpu_has_ptrauth(vcpu)) {
+ vcpu_ptrauth_enable(vcpu);
+ ctxt = vcpu->arch.host_cpu_context;
+ __ptrauth_save_key(ctxt->sys_regs, APIA);
+ __ptrauth_save_key(ctxt->sys_regs, APIB);
+ __ptrauth_save_key(ctxt->sys_regs, APDA);
+ __ptrauth_save_key(ctxt->sys_regs, APDB);
+ __ptrauth_save_key(ctxt->sys_regs, APGA);
+ } else {
+ kvm_inject_undefined(vcpu);
+ }
+}
+
/*
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
* a NOP).
*/
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- /*
- * We don't currently support ptrauth in a guest, and we mask the ID
- * registers to prevent well-behaved guests from trying to make use of
- * it.
- *
- * Inject an UNDEF, as if the feature really isn't present.
- */
- kvm_inject_undefined(vcpu);
+ kvm_arm_vcpu_ptrauth_trap(vcpu);
return 1;
}
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 675fdc186e3b..93ba3d7ef027 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -24,6 +24,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_ptrauth.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
@@ -64,6 +65,13 @@ ENTRY(__guest_enter)
add x18, x0, #VCPU_CONTEXT
+ // Macro ptrauth_switch_to_guest format:
+ // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
+ // The below macro to restore guest keys is not implemented in C code
+ // as it may cause Pointer Authentication key signing mismatch errors
+ // when this feature is enabled for kernel code.
+ ptrauth_switch_to_guest x18, x0, x1, x2
+
// Restore guest regs x0-x17
ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
@@ -118,6 +126,13 @@ ENTRY(__guest_exit)
get_host_ctxt x2, x3
+ // Macro ptrauth_switch_to_guest format:
+ // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
+ // The below macro to save/restore keys is not implemented in C code
+ // as it may cause Pointer Authentication key signing mismatch errors
+ // when this feature is enabled for kernel code.
+ ptrauth_switch_to_host x1, x2, x3, x4, x5
+
// Now restore the host regs
restore_callee_saved_regs x2
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 3563fe655cd5..22b4c335e0b2 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -100,7 +100,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
val &= ~CPACR_EL1_ZEN;
- if (!update_fp_enabled(vcpu)) {
+ if (update_fp_enabled(vcpu)) {
+ if (vcpu_has_sve(vcpu))
+ val |= CPACR_EL1_ZEN;
+ } else {
val &= ~CPACR_EL1_FPEN;
__activate_traps_fpsimd32(vcpu);
}
@@ -317,16 +320,48 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
return true;
}
-static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
+/* Check for an FPSIMD/SVE trap and handle as appropriate */
+static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{
- struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
+ bool vhe, sve_guest, sve_host;
+ u8 hsr_ec;
- if (has_vhe())
- write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
- cpacr_el1);
- else
+ if (!system_supports_fpsimd())
+ return false;
+
+ if (system_supports_sve()) {
+ sve_guest = vcpu_has_sve(vcpu);
+ sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
+ vhe = true;
+ } else {
+ sve_guest = false;
+ sve_host = false;
+ vhe = has_vhe();
+ }
+
+ hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
+ hsr_ec != ESR_ELx_EC_SVE)
+ return false;
+
+ /* Don't handle SVE traps for non-SVE vcpus here: */
+ if (!sve_guest)
+ if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
+ return false;
+
+ /* Valid trap. Switch the context: */
+
+ if (vhe) {
+ u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
+
+ if (sve_guest)
+ reg |= CPACR_EL1_ZEN;
+
+ write_sysreg(reg, cpacr_el1);
+ } else {
write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
cptr_el2);
+ }
isb();
@@ -335,21 +370,28 @@ static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
* In the SVE case, VHE is assumed: it is enforced by
* Kconfig and kvm_arch_init().
*/
- if (system_supports_sve() &&
- (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) {
+ if (sve_host) {
struct thread_struct *thread = container_of(
- host_fpsimd,
+ vcpu->arch.host_fpsimd_state,
struct thread_struct, uw.fpsimd_state);
- sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr);
+ sve_save_state(sve_pffr(thread),
+ &vcpu->arch.host_fpsimd_state->fpsr);
} else {
- __fpsimd_save_state(host_fpsimd);
+ __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
}
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
}
- __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ if (sve_guest) {
+ sve_load_state(vcpu_sve_pffr(vcpu),
+ &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
+ sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
+ write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
+ } else {
+ __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ }
/* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW))
@@ -385,10 +427,10 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* and restore the guest context lazily.
* If FP/SIMD is not implemented, handle the trap and inject an
* undefined instruction exception to the guest.
+ * Similarly for trapped SVE accesses.
*/
- if (system_supports_fpsimd() &&
- kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD)
- return __hyp_switch_fpsimd(vcpu);
+ if (__hyp_handle_fpsimd(vcpu))
+ return true;
if (!__populate_fault_info(vcpu))
return true;
@@ -524,6 +566,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
+ bool pmu_switch_needed;
u64 exit_code;
/*
@@ -543,6 +586,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
+ pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
+
__sysreg_save_state_nvhe(host_ctxt);
__activate_vm(kern_hyp_va(vcpu->kvm));
@@ -589,6 +634,9 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
*/
__debug_switch_to_host(vcpu);
+ if (pmu_switch_needed)
+ __pmu_switch_to_host(host_ctxt);
+
/* Returning to host will clear PSR.I, remask PMR if needed */
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQOFF);
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
new file mode 100644
index 000000000000..3da94a5bb6b7
--- /dev/null
+++ b/arch/arm64/kvm/pmu.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Arm Limited
+ * Author: Andrew Murray <Andrew.Murray@arm.com>
+ */
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include <asm/kvm_hyp.h>
+
+/*
+ * Given the perf event attributes and system type, determine
+ * if we are going to need to switch counters at guest entry/exit.
+ */
+static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
+{
+ /**
+ * With VHE the guest kernel runs at EL1 and the host at EL2,
+ * where user (EL0) is excluded then we have no reason to switch
+ * counters.
+ */
+ if (has_vhe() && attr->exclude_user)
+ return false;
+
+ /* Only switch if attributes are different */
+ return (attr->exclude_host != attr->exclude_guest);
+}
+
+/*
+ * Add events to track that we may want to switch at guest entry/exit
+ * time.
+ */
+void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
+{
+ struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+
+ if (!kvm_pmu_switch_needed(attr))
+ return;
+
+ if (!attr->exclude_host)
+ ctx->pmu_events.events_host |= set;
+ if (!attr->exclude_guest)
+ ctx->pmu_events.events_guest |= set;
+}
+
+/*
+ * Stop tracking events
+ */
+void kvm_clr_pmu_events(u32 clr)
+{
+ struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+
+ ctx->pmu_events.events_host &= ~clr;
+ ctx->pmu_events.events_guest &= ~clr;
+}
+
+/**
+ * Disable host events, enable guest events
+ */
+bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ if (pmu->events_host)
+ write_sysreg(pmu->events_host, pmcntenclr_el0);
+
+ if (pmu->events_guest)
+ write_sysreg(pmu->events_guest, pmcntenset_el0);
+
+ return (pmu->events_host || pmu->events_guest);
+}
+
+/**
+ * Disable guest events, enable host events
+ */
+void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ if (pmu->events_guest)
+ write_sysreg(pmu->events_guest, pmcntenclr_el0);
+
+ if (pmu->events_host)
+ write_sysreg(pmu->events_host, pmcntenset_el0);
+}
+
+#define PMEVTYPER_READ_CASE(idx) \
+ case idx: \
+ return read_sysreg(pmevtyper##idx##_el0)
+
+#define PMEVTYPER_WRITE_CASE(idx) \
+ case idx: \
+ write_sysreg(val, pmevtyper##idx##_el0); \
+ break
+
+#define PMEVTYPER_CASES(readwrite) \
+ PMEVTYPER_##readwrite##_CASE(0); \
+ PMEVTYPER_##readwrite##_CASE(1); \
+ PMEVTYPER_##readwrite##_CASE(2); \
+ PMEVTYPER_##readwrite##_CASE(3); \
+ PMEVTYPER_##readwrite##_CASE(4); \
+ PMEVTYPER_##readwrite##_CASE(5); \
+ PMEVTYPER_##readwrite##_CASE(6); \
+ PMEVTYPER_##readwrite##_CASE(7); \
+ PMEVTYPER_##readwrite##_CASE(8); \
+ PMEVTYPER_##readwrite##_CASE(9); \
+ PMEVTYPER_##readwrite##_CASE(10); \
+ PMEVTYPER_##readwrite##_CASE(11); \
+ PMEVTYPER_##readwrite##_CASE(12); \
+ PMEVTYPER_##readwrite##_CASE(13); \
+ PMEVTYPER_##readwrite##_CASE(14); \
+ PMEVTYPER_##readwrite##_CASE(15); \
+ PMEVTYPER_##readwrite##_CASE(16); \
+ PMEVTYPER_##readwrite##_CASE(17); \
+ PMEVTYPER_##readwrite##_CASE(18); \
+ PMEVTYPER_##readwrite##_CASE(19); \
+ PMEVTYPER_##readwrite##_CASE(20); \
+ PMEVTYPER_##readwrite##_CASE(21); \
+ PMEVTYPER_##readwrite##_CASE(22); \
+ PMEVTYPER_##readwrite##_CASE(23); \
+ PMEVTYPER_##readwrite##_CASE(24); \
+ PMEVTYPER_##readwrite##_CASE(25); \
+ PMEVTYPER_##readwrite##_CASE(26); \
+ PMEVTYPER_##readwrite##_CASE(27); \
+ PMEVTYPER_##readwrite##_CASE(28); \
+ PMEVTYPER_##readwrite##_CASE(29); \
+ PMEVTYPER_##readwrite##_CASE(30)
+
+/*
+ * Read a value direct from PMEVTYPER<idx> where idx is 0-30
+ * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
+ */
+static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
+{
+ switch (idx) {
+ PMEVTYPER_CASES(READ);
+ case ARMV8_PMU_CYCLE_IDX:
+ return read_sysreg(pmccfiltr_el0);
+ default:
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+
+/*
+ * Write a value direct to PMEVTYPER<idx> where idx is 0-30
+ * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
+ */
+static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
+{
+ switch (idx) {
+ PMEVTYPER_CASES(WRITE);
+ case ARMV8_PMU_CYCLE_IDX:
+ write_sysreg(val, pmccfiltr_el0);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Modify ARMv8 PMU events to include EL0 counting
+ */
+static void kvm_vcpu_pmu_enable_el0(unsigned long events)
+{
+ u64 typer;
+ u32 counter;
+
+ for_each_set_bit(counter, &events, 32) {
+ typer = kvm_vcpu_pmu_read_evtype_direct(counter);
+ typer &= ~ARMV8_PMU_EXCLUDE_EL0;
+ kvm_vcpu_pmu_write_evtype_direct(counter, typer);
+ }
+}
+
+/*
+ * Modify ARMv8 PMU events to exclude EL0 counting
+ */
+static void kvm_vcpu_pmu_disable_el0(unsigned long events)
+{
+ u64 typer;
+ u32 counter;
+
+ for_each_set_bit(counter, &events, 32) {
+ typer = kvm_vcpu_pmu_read_evtype_direct(counter);
+ typer |= ARMV8_PMU_EXCLUDE_EL0;
+ kvm_vcpu_pmu_write_evtype_direct(counter, typer);
+ }
+}
+
+/*
+ * On VHE ensure that only guest events have EL0 counting enabled
+ */
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_host_data *host;
+ u32 events_guest, events_host;
+
+ if (!has_vhe())
+ return;
+
+ host_ctxt = vcpu->arch.host_cpu_context;
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ events_guest = host->pmu_events.events_guest;
+ events_host = host->pmu_events.events_host;
+
+ kvm_vcpu_pmu_enable_el0(events_guest);
+ kvm_vcpu_pmu_disable_el0(events_host);
+}
+
+/*
+ * On VHE ensure that only host events have EL0 counting enabled
+ */
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_host_data *host;
+ u32 events_guest, events_host;
+
+ if (!has_vhe())
+ return;
+
+ host_ctxt = vcpu->arch.host_cpu_context;
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ events_guest = host->pmu_events.events_guest;
+ events_host = host->pmu_events.events_host;
+
+ kvm_vcpu_pmu_enable_el0(events_host);
+ kvm_vcpu_pmu_disable_el0(events_guest);
+}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index e2a0500cd7a2..1140b4485575 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -20,20 +20,26 @@
*/
#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/hw_breakpoint.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
#include <kvm/arm_arch_timer.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
+#include <asm/fpsimd.h>
#include <asm/ptrace.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
+#include <asm/virt.h>
/* Maximum phys_shift supported for any VM on this host */
static u32 kvm_ipa_limit;
@@ -92,6 +98,14 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_VM_IPA_SIZE:
r = kvm_ipa_limit;
break;
+ case KVM_CAP_ARM_SVE:
+ r = system_supports_sve();
+ break;
+ case KVM_CAP_ARM_PTRAUTH_ADDRESS:
+ case KVM_CAP_ARM_PTRAUTH_GENERIC:
+ r = has_vhe() && system_supports_address_auth() &&
+ system_supports_generic_auth();
+ break;
default:
r = 0;
}
@@ -99,13 +113,148 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
return r;
}
+unsigned int kvm_sve_max_vl;
+
+int kvm_arm_init_sve(void)
+{
+ if (system_supports_sve()) {
+ kvm_sve_max_vl = sve_max_virtualisable_vl;
+
+ /*
+ * The get_sve_reg()/set_sve_reg() ioctl interface will need
+ * to be extended with multiple register slice support in
+ * order to support vector lengths greater than
+ * SVE_VL_ARCH_MAX:
+ */
+ if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX))
+ kvm_sve_max_vl = SVE_VL_ARCH_MAX;
+
+ /*
+ * Don't even try to make use of vector lengths that
+ * aren't available on all CPUs, for now:
+ */
+ if (kvm_sve_max_vl < sve_max_vl)
+ pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
+ kvm_sve_max_vl);
+ }
+
+ return 0;
+}
+
+static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
+{
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ /* Verify that KVM startup enforced this when SVE was detected: */
+ if (WARN_ON(!has_vhe()))
+ return -EINVAL;
+
+ vcpu->arch.sve_max_vl = kvm_sve_max_vl;
+
+ /*
+ * Userspace can still customize the vector lengths by writing
+ * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
+ * kvm_arm_vcpu_finalize(), which freezes the configuration.
+ */
+ vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
+
+ return 0;
+}
+
+/*
+ * Finalize vcpu's maximum SVE vector length, allocating
+ * vcpu->arch.sve_state as necessary.
+ */
+static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
+{
+ void *buf;
+ unsigned int vl;
+
+ vl = vcpu->arch.sve_max_vl;
+
+ /*
+ * Resposibility for these properties is shared between
+ * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
+ * set_sve_vls(). Double-check here just to be sure:
+ */
+ if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl ||
+ vl > SVE_VL_ARCH_MAX))
+ return -EIO;
+
+ buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ vcpu->arch.sve_state = buf;
+ vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
+ return 0;
+}
+
+int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
+{
+ switch (feature) {
+ case KVM_ARM_VCPU_SVE:
+ if (!vcpu_has_sve(vcpu))
+ return -EINVAL;
+
+ if (kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ return kvm_vcpu_finalize_sve(vcpu);
+ }
+
+ return -EINVAL;
+}
+
+bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
+ return false;
+
+ return true;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ kfree(vcpu->arch.sve_state);
+}
+
+static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_has_sve(vcpu))
+ memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
+}
+
+static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
+{
+ /* Support ptrauth only if the system supports these capabilities. */
+ if (!has_vhe())
+ return -EINVAL;
+
+ if (!system_supports_address_auth() ||
+ !system_supports_generic_auth())
+ return -EINVAL;
+ /*
+ * For now make sure that both address/generic pointer authentication
+ * features are requested by the userspace together.
+ */
+ if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
+ !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features))
+ return -EINVAL;
+
+ vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
+ return 0;
+}
+
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on
* the virtual CPU struct to their architecturally defined reset
- * values.
+ * values, except for registers whose reset is deferred until
+ * kvm_arm_vcpu_finalize().
*
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
* ioctl or as part of handling a request issued by another VCPU in the PSCI
@@ -131,6 +280,22 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (loaded)
kvm_arch_vcpu_put(vcpu);
+ if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
+ if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
+ ret = kvm_vcpu_enable_sve(vcpu);
+ if (ret)
+ goto out;
+ }
+ } else {
+ kvm_vcpu_reset_sve(vcpu);
+ }
+
+ if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
+ test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
+ if (kvm_vcpu_enable_ptrauth(vcpu))
+ goto out;
+ }
+
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 539feecda5b8..857b226bcdde 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -695,6 +695,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val |= p->regval & ARMV8_PMU_PMCR_MASK;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
+ kvm_vcpu_pmu_restore_guest(vcpu);
} else {
/* PMCR.P & PMCR.C are RAZ */
val = __vcpu_sys_reg(vcpu, PMCR_EL0)
@@ -850,6 +851,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (p->is_write) {
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
+ kvm_vcpu_pmu_restore_guest(vcpu);
} else {
p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
}
@@ -875,6 +877,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* accessing PMCNTENSET_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
kvm_pmu_enable_counter(vcpu, val);
+ kvm_vcpu_pmu_restore_guest(vcpu);
} else {
/* accessing PMCNTENCLR_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
@@ -1007,6 +1010,37 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+static bool trap_ptrauth(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ kvm_arm_vcpu_ptrauth_trap(vcpu);
+
+ /*
+ * Return false for both cases as we never skip the trapped
+ * instruction:
+ *
+ * - Either we re-execute the same key register access instruction
+ * after enabling ptrauth.
+ * - Or an UNDEF is injected as ptrauth is not supported/enabled.
+ */
+ return false;
+}
+
+static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
+}
+
+#define __PTRAUTH_KEY(k) \
+ { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \
+ .visibility = ptrauth_visibility}
+
+#define PTRAUTH_KEY(k) \
+ __PTRAUTH_KEY(k ## KEYLO_EL1), \
+ __PTRAUTH_KEY(k ## KEYHI_EL1)
+
static bool access_arch_timer(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -1044,25 +1078,20 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
}
/* Read a sanitised cpufeature ID register by sys_reg_desc */
-static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
+static u64 read_id_reg(const struct kvm_vcpu *vcpu,
+ struct sys_reg_desc const *r, bool raz)
{
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
- if (id == SYS_ID_AA64PFR0_EL1) {
- if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
- kvm_debug("SVE unsupported for guests, suppressing\n");
-
+ if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) {
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
- } else if (id == SYS_ID_AA64ISAR1_EL1) {
- const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) |
- (0xfUL << ID_AA64ISAR1_API_SHIFT) |
- (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
- (0xfUL << ID_AA64ISAR1_GPI_SHIFT);
- if (val & ptrauth_mask)
- kvm_debug("ptrauth unsupported for guests, suppressing\n");
- val &= ~ptrauth_mask;
+ } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
+ val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_API_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
}
return val;
@@ -1078,7 +1107,7 @@ static bool __access_id_reg(struct kvm_vcpu *vcpu,
if (p->is_write)
return write_to_read_only(vcpu, p, r);
- p->regval = read_id_reg(r, raz);
+ p->regval = read_id_reg(vcpu, r, raz);
return true;
}
@@ -1100,6 +1129,81 @@ static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
+/* Visibility overrides for SVE-specific control registers */
+static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (vcpu_has_sve(vcpu))
+ return 0;
+
+ return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
+}
+
+/* Visibility overrides for SVE-specific ID registers */
+static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (vcpu_has_sve(vcpu))
+ return 0;
+
+ return REG_HIDDEN_USER;
+}
+
+/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
+static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
+{
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
+}
+
+static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, rd);
+
+ p->regval = guest_id_aa64zfr0_el1(vcpu);
+ return true;
+}
+
+static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ u64 val;
+
+ if (WARN_ON(!vcpu_has_sve(vcpu)))
+ return -ENOENT;
+
+ val = guest_id_aa64zfr0_el1(vcpu);
+ return reg_to_user(uaddr, &val, reg->id);
+}
+
+static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ const u64 id = sys_reg_to_index(rd);
+ int err;
+ u64 val;
+
+ if (WARN_ON(!vcpu_has_sve(vcpu)))
+ return -ENOENT;
+
+ err = reg_from_user(&val, uaddr, id);
+ if (err)
+ return err;
+
+ /* This is what we mean by invariant: you can't change it. */
+ if (val != guest_id_aa64zfr0_el1(vcpu))
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* cpufeature ID register user accessors
*
@@ -1107,16 +1211,18 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
* are stored, and for set_id_reg() we don't allow the effective value
* to be changed.
*/
-static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+static int __get_id_reg(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, void __user *uaddr,
bool raz)
{
const u64 id = sys_reg_to_index(rd);
- const u64 val = read_id_reg(rd, raz);
+ const u64 val = read_id_reg(vcpu, rd, raz);
return reg_to_user(uaddr, &val, id);
}
-static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+static int __set_id_reg(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, void __user *uaddr,
bool raz)
{
const u64 id = sys_reg_to_index(rd);
@@ -1128,7 +1234,7 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
return err;
/* This is what we mean by invariant: you can't change it. */
- if (val != read_id_reg(rd, raz))
+ if (val != read_id_reg(vcpu, rd, raz))
return -EINVAL;
return 0;
@@ -1137,25 +1243,25 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __get_id_reg(rd, uaddr, false);
+ return __get_id_reg(vcpu, rd, uaddr, false);
}
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __set_id_reg(rd, uaddr, false);
+ return __set_id_reg(vcpu, rd, uaddr, false);
}
static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __get_id_reg(rd, uaddr, true);
+ return __get_id_reg(vcpu, rd, uaddr, true);
}
static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __set_id_reg(rd, uaddr, true);
+ return __set_id_reg(vcpu, rd, uaddr, true);
}
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
@@ -1343,7 +1449,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
- ID_UNALLOCATED(4,4),
+ { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
ID_UNALLOCATED(4,5),
ID_UNALLOCATED(4,6),
ID_UNALLOCATED(4,7),
@@ -1380,10 +1486,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
+ { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
+ PTRAUTH_KEY(APIA),
+ PTRAUTH_KEY(APIB),
+ PTRAUTH_KEY(APDA),
+ PTRAUTH_KEY(APDB),
+ PTRAUTH_KEY(APGA),
+
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
@@ -1924,6 +2037,12 @@ static void perform_access(struct kvm_vcpu *vcpu,
{
trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
+ /* Check for regs disabled by runtime config */
+ if (sysreg_hidden_from_guest(vcpu, r)) {
+ kvm_inject_undefined(vcpu);
+ return;
+ }
+
/*
* Not having an accessor means that we have configured a trap
* that we don't know how to handle. This certainly qualifies
@@ -2435,6 +2554,10 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
if (!r)
return get_invariant_sys_reg(reg->id, uaddr);
+ /* Check for regs disabled by runtime config */
+ if (sysreg_hidden_from_user(vcpu, r))
+ return -ENOENT;
+
if (r->get_user)
return (r->get_user)(vcpu, r, reg, uaddr);
@@ -2456,6 +2579,10 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
if (!r)
return set_invariant_sys_reg(reg->id, uaddr);
+ /* Check for regs disabled by runtime config */
+ if (sysreg_hidden_from_user(vcpu, r))
+ return -ENOENT;
+
if (r->set_user)
return (r->set_user)(vcpu, r, reg, uaddr);
@@ -2512,7 +2639,8 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
return true;
}
-static int walk_one_sys_reg(const struct sys_reg_desc *rd,
+static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
u64 __user **uind,
unsigned int *total)
{
@@ -2523,6 +2651,9 @@ static int walk_one_sys_reg(const struct sys_reg_desc *rd,
if (!(rd->reg || rd->get_user))
return 0;
+ if (sysreg_hidden_from_user(vcpu, rd))
+ return 0;
+
if (!copy_reg_to_user(rd, uind))
return -EFAULT;
@@ -2551,9 +2682,9 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
int cmp = cmp_sys_reg(i1, i2);
/* target-specific overrides generic entry. */
if (cmp <= 0)
- err = walk_one_sys_reg(i1, &uind, &total);
+ err = walk_one_sys_reg(vcpu, i1, &uind, &total);
else
- err = walk_one_sys_reg(i2, &uind, &total);
+ err = walk_one_sys_reg(vcpu, i2, &uind, &total);
if (err)
return err;
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 3b1bc7f01d0b..2be99508dcb9 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -64,8 +64,15 @@ struct sys_reg_desc {
const struct kvm_one_reg *reg, void __user *uaddr);
int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr);
+
+ /* Return mask of REG_* runtime visibility overrides */
+ unsigned int (*visibility)(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd);
};
+#define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */
+#define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */
+
static inline void print_sys_reg_instr(const struct sys_reg_params *p)
{
/* Look, we even formatted it for you to paste into the table! */
@@ -102,6 +109,24 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
__vcpu_sys_reg(vcpu, r->reg) = r->val;
}
+static inline bool sysreg_hidden_from_guest(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ if (likely(!r->visibility))
+ return false;
+
+ return r->visibility(vcpu, r) & REG_HIDDEN_GUEST;
+}
+
+static inline bool sysreg_hidden_from_user(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ if (likely(!r->visibility))
+ return false;
+
+ return r->visibility(vcpu, r) & REG_HIDDEN_USER;
+}
+
static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
const struct sys_reg_desc *i2)
{
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 6b168d32fbff..2162eb32dcec 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -30,7 +30,6 @@ generic-y += pci.h
generic-y += percpu.h
generic-y += pgalloc.h
generic-y += preempt.h
-generic-y += segment.h
generic-y += serial.h
generic-y += shmparam.h
generic-y += tlbflush.h
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 61c01db6c292..ecfc4b4b6373 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -23,6 +23,7 @@ config H8300
select HAVE_ARCH_KGDB
select HAVE_ARCH_HASH
select CPU_NO_EFFICIENT_FFS
+ select UACCESS_MEMCPY
config CPU_BIG_ENDIAN
def_bool y
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index f2e22058e488..79cd1e605ec4 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -47,6 +47,7 @@ generic-y += timex.h
generic-y += tlbflush.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += uaccess.h
generic-y += unaligned.h
generic-y += vga.h
generic-y += word-at-a-time.h
diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h
deleted file mode 100644
index bc8031949d07..000000000000
--- a/arch/h8300/include/asm/uaccess.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_UACCESS_H
-#define _ASM_UACCESS_H
-
-#include <linux/string.h>
-
-static inline __must_check unsigned long
-raw_copy_from_user(void *to, const void __user * from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 *)to = *(u8 __force *)from;
- return 0;
- case 2:
- *(u16 *)to = *(u16 __force *)from;
- return 0;
- case 4:
- *(u32 *)to = *(u32 __force *)from;
- return 0;
- }
- }
-
- memcpy(to, (const void __force *)from, n);
- return 0;
-}
-
-static inline __must_check unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 __force *)to = *(u8 *)from;
- return 0;
- case 2:
- *(u16 __force *)to = *(u16 *)from;
- return 0;
- case 4:
- *(u32 __force *)to = *(u32 *)from;
- return 0;
- default:
- break;
- }
- }
-
- memcpy((void __force *)to, from, n);
- return 0;
-}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-#include <asm-generic/uaccess.h>
-
-#endif
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index b32bfa1fe99e..23a979a85f14 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/console.h>
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 4a3d72f76ea2..84bb1ed1b931 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -29,7 +29,6 @@ generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
-generic-y += segment.h
generic-y += serial.h
generic-y += shmparam.h
generic-y += topology.h
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
index a30e58d5f351..7a34092e8b58 100644
--- a/arch/hexagon/include/asm/uaccess.h
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -24,7 +24,6 @@
* User space memory access functions
*/
#include <linux/mm.h>
-#include <asm/segment.h>
#include <asm/sections.h>
/*
diff --git a/arch/ia64/include/asm/segment.h b/arch/ia64/include/asm/segment.h
deleted file mode 100644
index b89e2b3d648f..000000000000
--- a/arch/ia64/include/asm/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_IA64_SEGMENT_H
-#define _ASM_IA64_SEGMENT_H
-
-/* Only here because we have some old header files that expect it.. */
-
-#endif /* _ASM_IA64_SEGMENT_H */
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index 56e3d0b685e1..e01df3f2f80d 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -348,3 +348,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index df4ec3ec71d1..7e3d0734b2f3 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -427,3 +427,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 4964947732af..26339e417695 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -433,3 +433,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 677e5bfeff47..70d3200476bf 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -674,7 +674,10 @@ config SGI_IP27
select SYS_HAS_EARLY_PRINTK
select HAVE_PCI
select IRQ_MIPS_CPU
+ select IRQ_DOMAIN_HIERARCHY
select NR_CPUS_DEFAULT_64
+ select PCI_DRIVERS_GENERIC
+ select PCI_XTALK_BRIDGE
select SYS_HAS_CPU_R10000
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
@@ -1241,6 +1244,9 @@ config IRQ_GT641XX
config PCI_GT64XXX_PCI0
bool
+config PCI_XTALK_BRIDGE
+ bool
+
config NO_EXCEPT_FILL
bool
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 1454d9f6ab2d..b8f3397c59c9 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -131,9 +131,7 @@ static void __init alchemy_setup_uarts(int ctype)
}
-/* The dmamask must be set for OHCI/EHCI to work */
-static u64 alchemy_ohci_dmamask = DMA_BIT_MASK(32);
-static u64 __maybe_unused alchemy_ehci_dmamask = DMA_BIT_MASK(32);
+static u64 alchemy_all_dmamask = DMA_BIT_MASK(32);
/* Power on callback for the ehci platform driver */
static int alchemy_ehci_power_on(struct platform_device *pdev)
@@ -231,7 +229,7 @@ static void __init alchemy_setup_usb(int ctype)
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ohci-platform";
pdev->id = 0;
- pdev->dev.dma_mask = &alchemy_ohci_dmamask;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
pdev->dev.platform_data = &alchemy_ohci_pdata;
if (platform_device_register(pdev))
@@ -251,7 +249,7 @@ static void __init alchemy_setup_usb(int ctype)
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ehci-platform";
pdev->id = 0;
- pdev->dev.dma_mask = &alchemy_ehci_dmamask;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
pdev->dev.platform_data = &alchemy_ehci_pdata;
if (platform_device_register(pdev))
@@ -271,7 +269,7 @@ static void __init alchemy_setup_usb(int ctype)
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ohci-platform";
pdev->id = 1;
- pdev->dev.dma_mask = &alchemy_ohci_dmamask;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
pdev->dev.platform_data = &alchemy_ohci_pdata;
if (platform_device_register(pdev))
@@ -338,7 +336,11 @@ static struct platform_device au1xxx_eth0_device = {
.name = "au1000-eth",
.id = 0,
.num_resources = MAC_RES_COUNT,
- .dev.platform_data = &au1xxx_eth0_platform_data,
+ .dev = {
+ .dma_mask = &alchemy_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &au1xxx_eth0_platform_data,
+ },
};
static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = {
@@ -370,7 +372,11 @@ static struct platform_device au1xxx_eth1_device = {
.name = "au1000-eth",
.id = 1,
.num_resources = MAC_RES_COUNT,
- .dev.platform_data = &au1xxx_eth1_platform_data,
+ .dev = {
+ .dma_mask = &alchemy_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &au1xxx_eth1_platform_data,
+ },
};
void __init au1xxx_override_eth_cfg(unsigned int port,
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index d4ca97e2ec6c..228cdc736db7 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 25a57895a3a3..298b46b4e9cb 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/clk.h>
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index ff40fbc2f439..21a1168ae301 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -228,7 +228,7 @@ CONFIG_SERIAL_IP22_ZILOG=m
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=m
# CONFIG_HWMON is not set
-CONFIG_THERMAL=m
+CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_INDYDOG=m
# CONFIG_VGA_CONSOLE is not set
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 81c47e18131b..54db5dedf776 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -271,7 +271,7 @@ CONFIG_I2C_PARPORT_LIGHT=m
CONFIG_I2C_TAOS_EVM=m
CONFIG_I2C_STUB=m
# CONFIG_HWMON is not set
-CONFIG_THERMAL=m
+CONFIG_THERMAL=y
CONFIG_MFD_PCF50633=m
CONFIG_PCF50633_ADC=m
CONFIG_PCF50633_GPIO=m
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index a106f8113842..a84475f1924f 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -43,14 +43,14 @@ void __init *plat_get_fdt(void)
/* Already set up */
return (void *)fdt;
- if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
+ if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_passed_dtb)) {
/*
* We booted using the UHI boot protocol, so we have been
* provided with the appropriate device tree for the board.
* Make use of it & search for any machine struct based upon
* the root compatible string.
*/
- fdt = (void *)fw_arg1;
+ fdt = (void *)fw_passed_dtb;
for_each_mips_machine(check_mach) {
match = mips_machine_is_compatible(check_mach, fdt);
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 87b86cdf126a..a03cd4e24f37 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -19,7 +19,6 @@ generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += sections.h
-generic-y += segment.h
generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += user.h
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 42ea1313626c..965f0793a5f9 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -7,18 +7,9 @@
#include <asm/mmzone.h>
struct cpuinfo_ip27 {
-// cpuid_t p_cpuid; /* PROM assigned cpuid */
cnodeid_t p_nodeid; /* my node ID in compact-id-space */
nasid_t p_nasid; /* my node ID in numa-as-id-space */
unsigned char p_slice; /* Physical position on node board */
-#if 0
- unsigned long loops_per_sec;
- unsigned long ipi_count;
- unsigned long irq_attempt[NR_IRQS];
- unsigned long smp_local_irq_count;
- unsigned long prof_multiplier;
- unsigned long prof_counter;
-#endif
};
extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
@@ -30,7 +21,7 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);
-#define cpumask_of_pcibus(bus) (cpu_online_mask)
+#define cpumask_of_pcibus(bus) (cpumask_of_node(pcibus_to_node(bus)))
extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 23574c27eb40..a92cd30b48c9 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -801,15 +801,13 @@ struct bridge_err_cmdword {
#define PCI64_ATTR_RMF_SHFT 48
struct bridge_controller {
- struct pci_controller pc;
- struct resource mem;
- struct resource io;
struct resource busn;
struct bridge_regs *base;
- nasid_t nasid;
- unsigned int widget_id;
- u64 baddr;
+ unsigned long baddr;
+ unsigned long intr_addr;
+ struct irq_domain *domain;
unsigned int pci_int[8];
+ nasid_t nasid;
};
#define BRIDGE_CONTROLLER(bus) \
@@ -822,8 +820,4 @@ struct bridge_controller {
#define bridge_clr(bc, reg, val) \
__raw_writel(__raw_readl(&bc->base->reg) & ~(val), &bc->base->reg)
-extern int request_bridge_irq(struct bridge_controller *bc, int pin);
-
-extern struct pci_ops bridge_pci_ops;
-
#endif /* _ASM_PCI_BRIDGE_H */
diff --git a/arch/mips/include/asm/sn/irq_alloc.h b/arch/mips/include/asm/sn/irq_alloc.h
new file mode 100644
index 000000000000..09b89cecff56
--- /dev/null
+++ b/arch/mips/include/asm/sn/irq_alloc.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SN_IRQ_ALLOC_H
+#define __ASM_SN_IRQ_ALLOC_H
+
+struct irq_alloc_info {
+ void *ctrl;
+ nasid_t nasid;
+ int pin;
+};
+
+#endif /* __ASM_SN_IRQ_ALLOC_H */
diff --git a/arch/mips/include/asm/xtalk/xtalk.h b/arch/mips/include/asm/xtalk/xtalk.h
index 26d2ed1fa917..680e7efebbaf 100644
--- a/arch/mips/include/asm/xtalk/xtalk.h
+++ b/arch/mips/include/asm/xtalk/xtalk.h
@@ -47,15 +47,6 @@ typedef struct xtalk_piomap_s *xtalk_piomap_t;
#define XIO_PORT(x) ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
#define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
-#ifdef CONFIG_PCI
-extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
-#else
-static inline int bridge_probe(nasid_t nasid, int widget, int masterwid)
-{
- return 0;
-}
-#endif
-
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_XTALK_XTALK_H */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index d5e335e6846a..6126b77d5a62 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1973,6 +1973,14 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
panic("Unknown Ingenic Processor ID!");
break;
}
+
+ /*
+ * The config0 register in the Xburst CPUs with a processor ID of
+ * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
+ * but they don't actually support this ISA.
+ */
+ if ((c->processor_id & PRID_COMP_MASK) == PRID_COMP_INGENIC_D0)
+ c->isa_level &= ~MIPS_CPU_ISA_M32R2;
}
static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 413863508f6f..d67fb64e908c 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -64,17 +64,11 @@ struct mips_perf_event {
#define CNTR_EVEN 0x55555555
#define CNTR_ODD 0xaaaaaaaa
#define CNTR_ALL 0xffffffff
-#ifdef CONFIG_MIPS_MT_SMP
enum {
T = 0,
V = 1,
P = 2,
} range;
-#else
- #define T
- #define V
- #define P
-#endif
};
static struct mips_perf_event raw_event;
@@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
{
struct perf_event *event = container_of(evt, struct perf_event, hw);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
-#ifdef CONFIG_MIPS_MT_SMP
unsigned int range = evt->event_base >> 24;
-#endif /* CONFIG_MIPS_MT_SMP */
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
@@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
/* Make sure interrupt enabled. */
MIPS_PERFCTRL_IE;
-#ifdef CONFIG_CPU_BMIPS5000
- {
+ if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
/* enable the counter for the calling thread */
cpuc->saved_ctrl[idx] |=
(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
- }
-#else
-#ifdef CONFIG_MIPS_MT_SMP
- if (range > V) {
+ } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
/* The counter is processor wide. Set it up to count all TCs. */
pr_debug("Enabling perf counter for all TCs\n");
cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
- } else
-#endif /* CONFIG_MIPS_MT_SMP */
- {
+ } else {
unsigned int cpu, ctrl;
/*
@@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
cpuc->saved_ctrl[idx] |= ctrl;
pr_debug("Enabling perf counter for CPU%d\n", cpu);
}
-#endif /* CONFIG_CPU_BMIPS5000 */
/*
* We do not actually let the counter run. Leave it until start().
*/
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 9392dfe33f97..0e2dd68ade57 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -366,3 +366,9 @@
425 n32 io_uring_setup sys_io_uring_setup
426 n32 io_uring_enter sys_io_uring_enter
427 n32 io_uring_register sys_io_uring_register
+428 n32 open_tree sys_open_tree
+429 n32 move_mount sys_move_mount
+430 n32 fsopen sys_fsopen
+431 n32 fsconfig sys_fsconfig
+432 n32 fsmount sys_fsmount
+433 n32 fspick sys_fspick
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index cd0c8aa21fba..5eebfa0d155c 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -342,3 +342,9 @@
425 n64 io_uring_setup sys_io_uring_setup
426 n64 io_uring_enter sys_io_uring_enter
427 n64 io_uring_register sys_io_uring_register
+428 n64 open_tree sys_open_tree
+429 n64 move_mount sys_move_mount
+430 n64 fsopen sys_fsopen
+431 n64 fsconfig sys_fsconfig
+432 n64 fsmount sys_fsmount
+433 n64 fspick sys_fspick
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index e849e8ffe4a2..3cc1374e02d0 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -415,3 +415,9 @@
425 o32 io_uring_setup sys_io_uring_setup
426 o32 io_uring_enter sys_io_uring_enter
427 o32 io_uring_register sys_io_uring_register
+428 o32 open_tree sys_open_tree
+429 o32 move_mount sys_move_mount
+430 o32 fsopen sys_fsopen
+431 o32 fsconfig sys_fsconfig
+432 o32 fsmount sys_fsmount
+433 o32 fspick sys_fspick
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index c4f976593061..d6de4cb2e31c 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_PCI_AR2315) += pci-ar2315.o
obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o
+obj-$(CONFIG_PCI_XTALK_BRIDGE) += pci-xtalk-bridge.o
#
# These are still pretty much in the old state, watch, go blind.
#
@@ -39,7 +40,7 @@ obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o
obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o
obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o
obj-$(CONFIG_PMC_MSP7120_FPGA) += fixup-pmcmsp.o ops-pmcmsp.o
-obj-$(CONFIG_SGI_IP27) += ops-bridge.o pci-ip27.o
+obj-$(CONFIG_SGI_IP27) += pci-ip27.o
obj-$(CONFIG_SGI_IP32) += fixup-ip32.o ops-mace.o pci-ip32.o
obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o
obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
deleted file mode 100644
index df95b0da08f2..000000000000
--- a/arch/mips/pci/ops-bridge.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999, 2000, 04, 06 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- */
-#include <linux/pci.h>
-#include <asm/paccess.h>
-#include <asm/pci/bridge.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn0/hub.h>
-
-/*
- * Most of the IOC3 PCI config register aren't present
- * we emulate what is needed for a normal PCI enumeration
- */
-static u32 emulate_ioc3_cfg(int where, int size)
-{
- if (size == 1 && where == 0x3d)
- return 0x01;
- else if (size == 2 && where == 0x3c)
- return 0x0100;
- else if (size == 4 && where == 0x3c)
- return 0x00000100;
-
- return 0;
-}
-
-/*
- * The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
- * not really documented, so right now I can't write code which uses it.
- * Therefore we use type 0 accesses for now even though they won't work
- * correctly for PCI-to-PCI bridges.
- *
- * The function is complicated by the ultimate brokenness of the IOC3 chip
- * which is used in SGI systems. The IOC3 can only handle 32-bit PCI
- * accesses and does only decode parts of it's address space.
- */
-
-static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 * value)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- struct bridge_regs *bridge = bc->base;
- int slot = PCI_SLOT(devfn);
- int fn = PCI_FUNC(devfn);
- volatile void *addr;
- u32 cf, shift, mask;
- int res;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /*
- * IOC3 is broken beyond belief ... Don't even give the
- * generic PCI code a chance to look at it for real ...
- */
- if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto is_ioc3;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
-
- if (size == 1)
- res = get_dbe(*value, (u8 *) addr);
- else if (size == 2)
- res = get_dbe(*value, (u16 *) addr);
- else
- res = get_dbe(*value, (u32 *) addr);
-
- return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
- /*
- * IOC3 special handling
- */
- if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
- *value = emulate_ioc3_cfg(where, size);
- return PCIBIOS_SUCCESSFUL;
- }
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- shift = ((where & 3) << 3);
- mask = (0xffffffffU >> ((4 - size) << 3));
- *value = (cf >> shift) & mask;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 * value)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- struct bridge_regs *bridge = bc->base;
- int busno = bus->number;
- int slot = PCI_SLOT(devfn);
- int fn = PCI_FUNC(devfn);
- volatile void *addr;
- u32 cf, shift, mask;
- int res;
-
- bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
- addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /*
- * IOC3 is broken beyond belief ... Don't even give the
- * generic PCI code a chance to look at it for real ...
- */
- if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto is_ioc3;
-
- bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
- addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
-
- if (size == 1)
- res = get_dbe(*value, (u8 *) addr);
- else if (size == 2)
- res = get_dbe(*value, (u16 *) addr);
- else
- res = get_dbe(*value, (u32 *) addr);
-
- return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
- /*
- * IOC3 special handling
- */
- if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
- *value = emulate_ioc3_cfg(where, size);
- return PCIBIOS_SUCCESSFUL;
- }
-
- bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
- addr = &bridge->b_type1_cfg.c[(fn << 8) | where];
-
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- shift = ((where & 3) << 3);
- mask = (0xffffffffU >> ((4 - size) << 3));
- *value = (cf >> shift) & mask;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 * value)
-{
- if (!pci_is_root_bus(bus))
- return pci_conf1_read_config(bus, devfn, where, size, value);
-
- return pci_conf0_read_config(bus, devfn, where, size, value);
-}
-
-static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- struct bridge_regs *bridge = bc->base;
- int slot = PCI_SLOT(devfn);
- int fn = PCI_FUNC(devfn);
- volatile void *addr;
- u32 cf, shift, mask, smask;
- int res;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /*
- * IOC3 is broken beyond belief ... Don't even give the
- * generic PCI code a chance to look at it for real ...
- */
- if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto is_ioc3;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
-
- if (size == 1) {
- res = put_dbe(value, (u8 *) addr);
- } else if (size == 2) {
- res = put_dbe(value, (u16 *) addr);
- } else {
- res = put_dbe(value, (u32 *) addr);
- }
-
- if (res)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
- /*
- * IOC3 special handling
- */
- if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
- return PCIBIOS_SUCCESSFUL;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- shift = ((where & 3) << 3);
- mask = (0xffffffffU >> ((4 - size) << 3));
- smask = mask << shift;
-
- cf = (cf & ~smask) | ((value & mask) << shift);
- if (put_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- struct bridge_regs *bridge = bc->base;
- int slot = PCI_SLOT(devfn);
- int fn = PCI_FUNC(devfn);
- int busno = bus->number;
- volatile void *addr;
- u32 cf, shift, mask, smask;
- int res;
-
- bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
- addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /*
- * IOC3 is broken beyond belief ... Don't even give the
- * generic PCI code a chance to look at it for real ...
- */
- if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto is_ioc3;
-
- addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
-
- if (size == 1) {
- res = put_dbe(value, (u8 *) addr);
- } else if (size == 2) {
- res = put_dbe(value, (u16 *) addr);
- } else {
- res = put_dbe(value, (u32 *) addr);
- }
-
- if (res)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
- /*
- * IOC3 special handling
- */
- if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
- return PCIBIOS_SUCCESSFUL;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- shift = ((where & 3) << 3);
- mask = (0xffffffffU >> ((4 - size) << 3));
- smask = mask << shift;
-
- cf = (cf & ~smask) | ((value & mask) << shift);
- if (put_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
-{
- if (!pci_is_root_bus(bus))
- return pci_conf1_write_config(bus, devfn, where, size, value);
-
- return pci_conf0_write_config(bus, devfn, where, size, value);
-}
-
-struct pci_ops bridge_pci_ops = {
- .read = pci_read_config,
- .write = pci_write_config,
-};
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 3c177b4d0609..441eb9383b20 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -7,162 +7,7 @@
* Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/smp.h>
-#include <linux/dma-direct.h>
-#include <asm/sn/arch.h>
#include <asm/pci/bridge.h>
-#include <asm/paccess.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn0/hub.h>
-
-/*
- * Max #PCI busses we can handle; ie, max #PCI bridges.
- */
-#define MAX_PCI_BUSSES 40
-
-/*
- * XXX: No kmalloc available when we do our crosstalk scan,
- * we should try to move it later in the boot process.
- */
-static struct bridge_controller bridges[MAX_PCI_BUSSES];
-
-extern struct pci_ops bridge_pci_ops;
-
-int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
-{
- unsigned long offset = NODE_OFFSET(nasid);
- struct bridge_controller *bc;
- static int num_bridges = 0;
- int slot;
-
- pci_set_flags(PCI_PROBE_ONLY);
-
- printk("a bridge\n");
-
- /* XXX: kludge alert.. */
- if (!num_bridges)
- ioport_resource.end = ~0UL;
-
- bc = &bridges[num_bridges];
-
- bc->pc.pci_ops = &bridge_pci_ops;
- bc->pc.mem_resource = &bc->mem;
- bc->pc.io_resource = &bc->io;
-
- bc->pc.index = num_bridges;
-
- bc->mem.name = "Bridge PCI MEM";
- bc->pc.mem_offset = offset;
- bc->mem.start = 0;
- bc->mem.end = ~0UL;
- bc->mem.flags = IORESOURCE_MEM;
-
- bc->io.name = "Bridge IO MEM";
- bc->pc.io_offset = offset;
- bc->io.start = 0UL;
- bc->io.end = ~0UL;
- bc->io.flags = IORESOURCE_IO;
-
- bc->widget_id = widget_id;
- bc->nasid = nasid;
-
- bc->baddr = (u64)masterwid << 60 | PCI64_ATTR_BAR;
-
- /*
- * point to this bridge
- */
- bc->base = (struct bridge_regs *)RAW_NODE_SWIN_BASE(nasid, widget_id);
-
- /*
- * Clear all pending interrupts.
- */
- bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR);
-
- /*
- * Until otherwise set up, assume all interrupts are from slot 0
- */
- bridge_write(bc, b_int_device, 0x0);
-
- /*
- * swap pio's to pci mem and io space (big windows)
- */
- bridge_set(bc, b_wid_control, BRIDGE_CTRL_IO_SWAP |
- BRIDGE_CTRL_MEM_SWAP);
-#ifdef CONFIG_PAGE_SIZE_4KB
- bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
-#else /* 16kB or larger */
- bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
-#endif
-
- /*
- * Hmm... IRIX sets additional bits in the address which
- * are documented as reserved in the bridge docs.
- */
- bridge_write(bc, b_wid_int_upper, 0x8000 | (masterwid << 16));
- bridge_write(bc, b_wid_int_lower, 0x01800090); /* PI_INT_PEND_MOD off*/
- bridge_write(bc, b_dir_map, (masterwid << 20)); /* DMA */
- bridge_write(bc, b_int_enable, 0);
-
- for (slot = 0; slot < 8; slot ++) {
- bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
- bc->pci_int[slot] = -1;
- }
- bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */
-
- register_pci_controller(&bc->pc);
-
- num_bridges++;
-
- return 0;
-}
-
-/*
- * All observed requests have pin == 1. We could have a global here, that
- * gets incremented and returned every time - unfortunately, pci_map_irq
- * may be called on the same device over and over, and need to return the
- * same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7].
- *
- * A given PCI device, in general, should be able to intr any of the cpus
- * on any one of the hubs connected to its xbow.
- */
-int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return 0;
-}
-
-static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
-{
- while (dev->bus->parent) {
- /* Move up the chain of bridges. */
- dev = dev->bus->self;
- }
-
- return dev;
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
- struct pci_dev *rdev = bridge_root_dev(dev);
- int slot = PCI_SLOT(rdev->devfn);
- int irq;
-
- irq = bc->pci_int[slot];
- if (irq == -1) {
- irq = request_bridge_irq(bc, slot);
- if (irq < 0)
- return irq;
-
- bc->pci_int[slot] = irq;
- }
- dev->irq = irq;
-
- return 0;
-}
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
@@ -177,29 +22,6 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr & ~(0xffUL << 56);
}
-/*
- * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
- * to find the slot number in sense of the bridge device register.
- * XXX This also means multiple devices might rely on conflicting bridge
- * settings.
- */
-
-static inline void pci_disable_swapping(struct pci_dev *dev)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
- struct bridge_regs *bridge = bc->base;
- int slot = PCI_SLOT(dev->devfn);
-
- /* Turn off byte swapping */
- bridge->b_device[slot].reg &= ~BRIDGE_DEV_SWAP_DIR;
- bridge->b_widget.w_tflush; /* Flush */
-}
-
-static void pci_fixup_ioc3(struct pci_dev *d)
-{
- pci_disable_swapping(d);
-}
-
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *bus)
{
@@ -209,6 +31,3 @@ int pcibus_to_node(struct pci_bus *bus)
}
EXPORT_SYMBOL(pcibus_to_node);
#endif /* CONFIG_NUMA */
-
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
- pci_fixup_ioc3);
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
new file mode 100644
index 000000000000..bcf7f559789a
--- /dev/null
+++ b/arch/mips/pci/pci-xtalk-bridge.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2003 Christoph Hellwig (hch@lst.de)
+ * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/dma-direct.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/xtalk-bridge.h>
+
+#include <asm/pci/bridge.h>
+#include <asm/paccess.h>
+#include <asm/sn/irq_alloc.h>
+
+/*
+ * Most of the IOC3 PCI config register aren't present
+ * we emulate what is needed for a normal PCI enumeration
+ */
+static u32 emulate_ioc3_cfg(int where, int size)
+{
+ if (size == 1 && where == 0x3d)
+ return 0x01;
+ else if (size == 2 && where == 0x3c)
+ return 0x0100;
+ else if (size == 4 && where == 0x3c)
+ return 0x00000100;
+
+ return 0;
+}
+
+static void bridge_disable_swapping(struct pci_dev *dev)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
+ int slot = PCI_SLOT(dev->devfn);
+
+ /* Turn off byte swapping */
+ bridge_clr(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
+ bridge_read(bc, b_widget.w_tflush); /* Flush */
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
+ bridge_disable_swapping);
+
+
+/*
+ * The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
+ * not really documented, so right now I can't write code which uses it.
+ * Therefore we use type 0 accesses for now even though they won't work
+ * correctly for PCI-to-PCI bridges.
+ *
+ * The function is complicated by the ultimate brokenness of the IOC3 chip
+ * which is used in SGI systems. The IOC3 can only handle 32-bit PCI
+ * accesses and does only decode parts of it's address space.
+ */
+static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ void *addr;
+ u32 cf, shift, mask;
+ int res;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+ goto is_ioc3;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
+
+ if (size == 1)
+ res = get_dbe(*value, (u8 *)addr);
+ else if (size == 2)
+ res = get_dbe(*value, (u16 *)addr);
+ else
+ res = get_dbe(*value, (u32 *)addr);
+
+ return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+ /*
+ * IOC3 special handling
+ */
+ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
+ *value = emulate_ioc3_cfg(where, size);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = ((where & 3) << 3);
+ mask = (0xffffffffU >> ((4 - size) << 3));
+ *value = (cf >> shift) & mask;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int busno = bus->number;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ void *addr;
+ u32 cf, shift, mask;
+ int res;
+
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+ goto is_ioc3;
+
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
+
+ if (size == 1)
+ res = get_dbe(*value, (u8 *)addr);
+ else if (size == 2)
+ res = get_dbe(*value, (u16 *)addr);
+ else
+ res = get_dbe(*value, (u32 *)addr);
+
+ return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+ /*
+ * IOC3 special handling
+ */
+ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
+ *value = emulate_ioc3_cfg(where, size);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | where];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = ((where & 3) << 3);
+ mask = (0xffffffffU >> ((4 - size) << 3));
+ *value = (cf >> shift) & mask;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (!pci_is_root_bus(bus))
+ return pci_conf1_read_config(bus, devfn, where, size, value);
+
+ return pci_conf0_read_config(bus, devfn, where, size, value);
+}
+
+static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ void *addr;
+ u32 cf, shift, mask, smask;
+ int res;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+ goto is_ioc3;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
+
+ if (size == 1)
+ res = put_dbe(value, (u8 *)addr);
+ else if (size == 2)
+ res = put_dbe(value, (u16 *)addr);
+ else
+ res = put_dbe(value, (u32 *)addr);
+
+ if (res)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+ /*
+ * IOC3 special handling
+ */
+ if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
+ return PCIBIOS_SUCCESSFUL;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = ((where & 3) << 3);
+ mask = (0xffffffffU >> ((4 - size) << 3));
+ smask = mask << shift;
+
+ cf = (cf & ~smask) | ((value & mask) << shift);
+ if (put_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ int busno = bus->number;
+ void *addr;
+ u32 cf, shift, mask, smask;
+ int res;
+
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+ goto is_ioc3;
+
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
+
+ if (size == 1)
+ res = put_dbe(value, (u8 *)addr);
+ else if (size == 2)
+ res = put_dbe(value, (u16 *)addr);
+ else
+ res = put_dbe(value, (u32 *)addr);
+
+ if (res)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+ /*
+ * IOC3 special handling
+ */
+ if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
+ return PCIBIOS_SUCCESSFUL;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = ((where & 3) << 3);
+ mask = (0xffffffffU >> ((4 - size) << 3));
+ smask = mask << shift;
+
+ cf = (cf & ~smask) | ((value & mask) << shift);
+ if (put_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (!pci_is_root_bus(bus))
+ return pci_conf1_write_config(bus, devfn, where, size, value);
+
+ return pci_conf0_write_config(bus, devfn, where, size, value);
+}
+
+static struct pci_ops bridge_pci_ops = {
+ .read = pci_read_config,
+ .write = pci_write_config,
+};
+
+struct bridge_irq_chip_data {
+ struct bridge_controller *bc;
+ nasid_t nasid;
+};
+
+static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+#ifdef CONFIG_NUMA
+ struct bridge_irq_chip_data *data = d->chip_data;
+ int bit = d->parent_data->hwirq;
+ int pin = d->hwirq;
+ nasid_t nasid;
+ int ret, cpu;
+
+ ret = irq_chip_set_affinity_parent(d, mask, force);
+ if (ret >= 0) {
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ bridge_write(data->bc, b_int_addr[pin].addr,
+ (((data->bc->intr_addr >> 30) & 0x30000) |
+ bit | (nasid << 8)));
+ bridge_read(data->bc, b_wid_tflush);
+ }
+ return ret;
+#else
+ return irq_chip_set_affinity_parent(d, mask, force);
+#endif
+}
+
+struct irq_chip bridge_irq_chip = {
+ .name = "BRIDGE",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = bridge_set_affinity
+};
+
+static int bridge_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct bridge_irq_chip_data *data;
+ struct irq_alloc_info *info = arg;
+ int ret;
+
+ if (nr_irqs > 1 || !info)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret >= 0) {
+ data->bc = info->ctrl;
+ data->nasid = info->nasid;
+ irq_domain_set_info(domain, virq, info->pin, &bridge_irq_chip,
+ data, handle_level_irq, NULL, NULL);
+ } else {
+ kfree(data);
+ }
+
+ return ret;
+}
+
+static void bridge_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
+
+ if (nr_irqs)
+ return;
+
+ kfree(irqd->chip_data);
+ irq_domain_free_irqs_top(domain, virq, nr_irqs);
+}
+
+static int bridge_domain_activate(struct irq_domain *domain,
+ struct irq_data *irqd, bool reserve)
+{
+ struct bridge_irq_chip_data *data = irqd->chip_data;
+ struct bridge_controller *bc = data->bc;
+ int bit = irqd->parent_data->hwirq;
+ int pin = irqd->hwirq;
+ u32 device;
+
+ bridge_write(bc, b_int_addr[pin].addr,
+ (((bc->intr_addr >> 30) & 0x30000) |
+ bit | (data->nasid << 8)));
+ bridge_set(bc, b_int_enable, (1 << pin));
+ bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
+
+ /*
+ * Enable sending of an interrupt clear packt to the hub on a high to
+ * low transition of the interrupt pin.
+ *
+ * IRIX sets additional bits in the address which are documented as
+ * reserved in the bridge docs.
+ */
+ bridge_set(bc, b_int_mode, (1UL << pin));
+
+ /*
+ * We assume the bridge to have a 1:1 mapping between devices
+ * (slots) and intr pins.
+ */
+ device = bridge_read(bc, b_int_device);
+ device &= ~(7 << (pin*3));
+ device |= (pin << (pin*3));
+ bridge_write(bc, b_int_device, device);
+
+ bridge_read(bc, b_wid_tflush);
+ return 0;
+}
+
+static void bridge_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *irqd)
+{
+ struct bridge_irq_chip_data *data = irqd->chip_data;
+
+ bridge_clr(data->bc, b_int_enable, (1 << irqd->hwirq));
+ bridge_read(data->bc, b_wid_tflush);
+}
+
+static const struct irq_domain_ops bridge_domain_ops = {
+ .alloc = bridge_domain_alloc,
+ .free = bridge_domain_free,
+ .activate = bridge_domain_activate,
+ .deactivate = bridge_domain_deactivate
+};
+
+/*
+ * All observed requests have pin == 1. We could have a global here, that
+ * gets incremented and returned every time - unfortunately, pci_map_irq
+ * may be called on the same device over and over, and need to return the
+ * same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7].
+ *
+ * A given PCI device, in general, should be able to intr any of the cpus
+ * on any one of the hubs connected to its xbow.
+ */
+static int bridge_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
+ struct irq_alloc_info info;
+ int irq;
+
+ irq = bc->pci_int[slot];
+ if (irq == -1) {
+ info.ctrl = bc;
+ info.nasid = bc->nasid;
+ info.pin = slot;
+
+ irq = irq_domain_alloc_irqs(bc->domain, 1, bc->nasid, &info);
+ if (irq < 0)
+ return irq;
+
+ bc->pci_int[slot] = irq;
+ }
+ return irq;
+}
+
+static int bridge_probe(struct platform_device *pdev)
+{
+ struct xtalk_bridge_platform_data *bd = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct bridge_controller *bc;
+ struct pci_host_bridge *host;
+ struct irq_domain *domain, *parent;
+ struct fwnode_handle *fn;
+ int slot;
+ int err;
+
+ parent = irq_get_default_host();
+ if (!parent)
+ return -ENODEV;
+ fn = irq_domain_alloc_named_fwnode("BRIDGE");
+ if (!fn)
+ return -ENOMEM;
+ domain = irq_domain_create_hierarchy(parent, 0, 8, fn,
+ &bridge_domain_ops, NULL);
+ irq_domain_free_fwnode(fn);
+ if (!domain)
+ return -ENOMEM;
+
+ pci_set_flags(PCI_PROBE_ONLY);
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*bc));
+ if (!host) {
+ err = -ENOMEM;
+ goto err_remove_domain;
+ }
+
+ bc = pci_host_bridge_priv(host);
+
+ bc->busn.name = "Bridge PCI busn";
+ bc->busn.start = 0;
+ bc->busn.end = 0xff;
+ bc->busn.flags = IORESOURCE_BUS;
+
+ bc->domain = domain;
+
+ pci_add_resource_offset(&host->windows, &bd->mem, bd->mem_offset);
+ pci_add_resource_offset(&host->windows, &bd->io, bd->io_offset);
+ pci_add_resource(&host->windows, &bc->busn);
+
+ err = devm_request_pci_bus_resources(dev, &host->windows);
+ if (err < 0)
+ goto err_free_resource;
+
+ bc->nasid = bd->nasid;
+
+ bc->baddr = (u64)bd->masterwid << 60 | PCI64_ATTR_BAR;
+ bc->base = (struct bridge_regs *)bd->bridge_addr;
+ bc->intr_addr = bd->intr_addr;
+
+ /*
+ * Clear all pending interrupts.
+ */
+ bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR);
+
+ /*
+ * Until otherwise set up, assume all interrupts are from slot 0
+ */
+ bridge_write(bc, b_int_device, 0x0);
+
+ /*
+ * disable swapping for big windows
+ */
+ bridge_clr(bc, b_wid_control,
+ BRIDGE_CTRL_IO_SWAP | BRIDGE_CTRL_MEM_SWAP);
+#ifdef CONFIG_PAGE_SIZE_4KB
+ bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
+#else /* 16kB or larger */
+ bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
+#endif
+
+ /*
+ * Hmm... IRIX sets additional bits in the address which
+ * are documented as reserved in the bridge docs.
+ */
+ bridge_write(bc, b_wid_int_upper,
+ ((bc->intr_addr >> 32) & 0xffff) | (bd->masterwid << 16));
+ bridge_write(bc, b_wid_int_lower, bc->intr_addr & 0xffffffff);
+ bridge_write(bc, b_dir_map, (bd->masterwid << 20)); /* DMA */
+ bridge_write(bc, b_int_enable, 0);
+
+ for (slot = 0; slot < 8; slot++) {
+ bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
+ bc->pci_int[slot] = -1;
+ }
+ bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */
+
+ host->dev.parent = dev;
+ host->sysdata = bc;
+ host->busnr = 0;
+ host->ops = &bridge_pci_ops;
+ host->map_irq = bridge_map_irq;
+ host->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(host);
+ if (err < 0)
+ goto err_free_resource;
+
+ pci_bus_claim_resources(host->bus);
+ pci_bus_add_devices(host->bus);
+
+ platform_set_drvdata(pdev, host->bus);
+
+ return 0;
+
+err_free_resource:
+ pci_free_resource_list(&host->windows);
+err_remove_domain:
+ irq_domain_remove(domain);
+ return err;
+}
+
+static int bridge_remove(struct platform_device *pdev)
+{
+ struct pci_bus *bus = platform_get_drvdata(pdev);
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+
+ irq_domain_remove(bc->domain);
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bus);
+ pci_remove_root_bus(bus);
+ pci_unlock_rescan_remove();
+
+ return 0;
+}
+
+static struct platform_driver bridge_driver = {
+ .probe = bridge_probe,
+ .remove = bridge_remove,
+ .driver = {
+ .name = "xtalk-bridge",
+ }
+};
+
+builtin_platform_driver(bridge_driver);
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c
index 37ad26716579..0b2002e02a47 100644
--- a/arch/mips/sgi-ip22/ip22-platform.c
+++ b/arch/mips/sgi-ip22/ip22-platform.c
@@ -3,6 +3,7 @@
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <asm/paccess.h>
#include <asm/sgi/ip22.h>
@@ -25,6 +26,8 @@ static struct sgiwd93_platform_data sgiwd93_0_pd = {
.irq = SGI_WD93_0_IRQ,
};
+static u64 sgiwd93_0_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device sgiwd93_0_device = {
.name = "sgiwd93",
.id = 0,
@@ -32,6 +35,8 @@ static struct platform_device sgiwd93_0_device = {
.resource = sgiwd93_0_resources,
.dev = {
.platform_data = &sgiwd93_0_pd,
+ .dma_mask = &sgiwd93_0_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -49,6 +54,8 @@ static struct sgiwd93_platform_data sgiwd93_1_pd = {
.irq = SGI_WD93_1_IRQ,
};
+static u64 sgiwd93_1_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device sgiwd93_1_device = {
.name = "sgiwd93",
.id = 1,
@@ -56,6 +63,8 @@ static struct platform_device sgiwd93_1_device = {
.resource = sgiwd93_1_resources,
.dev = {
.platform_data = &sgiwd93_1_pd,
+ .dma_mask = &sgiwd93_1_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -96,6 +105,8 @@ static struct resource sgiseeq_0_resources[] = {
static struct sgiseeq_platform_data eth0_pd;
+static u64 sgiseeq_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device eth0_device = {
.name = "sgiseeq",
.id = 0,
@@ -103,6 +114,8 @@ static struct platform_device eth0_device = {
.resource = sgiseeq_0_resources,
.dev = {
.platform_data = &eth0_pd,
+ .dma_mask = &sgiseeq_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 6074efeff894..066b33f50bcc 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -184,5 +184,7 @@ void __init plat_mem_setup(void)
ioc3_eth_init();
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0UL;
set_io_port_base(IO_BASE);
}
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index a32f843cdbe0..37be04975831 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -12,22 +12,20 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/sched.h>
#include <asm/io.h>
#include <asm/irq_cpu.h>
-#include <asm/pci/bridge.h>
#include <asm/sn/addrs.h>
#include <asm/sn/agent.h>
#include <asm/sn/arch.h>
#include <asm/sn/hub.h>
#include <asm/sn/intr.h>
+#include <asm/sn/irq_alloc.h>
struct hub_irq_data {
- struct bridge_controller *bc;
u64 *irq_mask[2];
cpuid_t cpu;
- int bit;
- int pin;
};
static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
@@ -54,7 +52,7 @@ static void enable_hub_irq(struct irq_data *d)
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
- set_bit(hd->bit, mask);
+ set_bit(d->hwirq, mask);
__raw_writeq(mask[0], hd->irq_mask[0]);
__raw_writeq(mask[1], hd->irq_mask[1]);
}
@@ -64,71 +62,11 @@ static void disable_hub_irq(struct irq_data *d)
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
- clear_bit(hd->bit, mask);
+ clear_bit(d->hwirq, mask);
__raw_writeq(mask[0], hd->irq_mask[0]);
__raw_writeq(mask[1], hd->irq_mask[1]);
}
-static unsigned int startup_bridge_irq(struct irq_data *d)
-{
- struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
- struct bridge_controller *bc;
- nasid_t nasid;
- u32 device;
- int pin;
-
- if (!hd)
- return -EINVAL;
-
- pin = hd->pin;
- bc = hd->bc;
-
- nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(hd->cpu));
- bridge_write(bc, b_int_addr[pin].addr,
- (0x20000 | hd->bit | (nasid << 8)));
- bridge_set(bc, b_int_enable, (1 << pin));
- bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
-
- /*
- * Enable sending of an interrupt clear packt to the hub on a high to
- * low transition of the interrupt pin.
- *
- * IRIX sets additional bits in the address which are documented as
- * reserved in the bridge docs.
- */
- bridge_set(bc, b_int_mode, (1UL << pin));
-
- /*
- * We assume the bridge to have a 1:1 mapping between devices
- * (slots) and intr pins.
- */
- device = bridge_read(bc, b_int_device);
- device &= ~(7 << (pin*3));
- device |= (pin << (pin*3));
- bridge_write(bc, b_int_device, device);
-
- bridge_read(bc, b_wid_tflush);
-
- enable_hub_irq(d);
-
- return 0; /* Never anything pending. */
-}
-
-static void shutdown_bridge_irq(struct irq_data *d)
-{
- struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
- struct bridge_controller *bc;
-
- if (!hd)
- return;
-
- disable_hub_irq(d);
-
- bc = hd->bc;
- bridge_clr(bc, b_int_enable, (1 << hd->pin));
- bridge_read(bc, b_wid_tflush);
-}
-
static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
{
nasid_t nasid;
@@ -144,9 +82,6 @@ static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
}
-
- /* Make sure it's not already pending when we connect it. */
- REMOTE_HUB_CLR_INTR(nasid, hd->bit);
}
static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
@@ -163,7 +98,7 @@ static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
setup_hub_mask(hd, mask);
if (irqd_is_started(d))
- startup_bridge_irq(d);
+ enable_hub_irq(d);
irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
@@ -172,20 +107,22 @@ static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
static struct irq_chip hub_irq_type = {
.name = "HUB",
- .irq_startup = startup_bridge_irq,
- .irq_shutdown = shutdown_bridge_irq,
.irq_mask = disable_hub_irq,
.irq_unmask = enable_hub_irq,
.irq_set_affinity = set_affinity_hub_irq,
};
-int request_bridge_irq(struct bridge_controller *bc, int pin)
+static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
{
+ struct irq_alloc_info *info = arg;
struct hub_irq_data *hd;
struct hub_data *hub;
struct irq_desc *desc;
int swlevel;
- int irq;
+
+ if (nr_irqs > 1 || !info)
+ return -EINVAL;
hd = kzalloc(sizeof(*hd), GFP_KERNEL);
if (!hd)
@@ -196,46 +133,41 @@ int request_bridge_irq(struct bridge_controller *bc, int pin)
kfree(hd);
return -EAGAIN;
}
- irq = swlevel + IP27_HUB_IRQ_BASE;
-
- hd->bc = bc;
- hd->bit = swlevel;
- hd->pin = pin;
- irq_set_chip_data(irq, hd);
+ irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
+ handle_level_irq, NULL, NULL);
/* use CPU connected to nearest hub */
- hub = hub_data(NASID_TO_COMPACT_NODEID(bc->nasid));
+ hub = hub_data(NASID_TO_COMPACT_NODEID(info->nasid));
setup_hub_mask(hd, &hub->h_cpus);
- desc = irq_to_desc(irq);
- desc->irq_common_data.node = bc->nasid;
+ /* Make sure it's not already pending when we connect it. */
+ REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
+
+ desc = irq_to_desc(virq);
+ desc->irq_common_data.node = info->nasid;
cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
- return irq;
+ return 0;
}
-void ip27_hub_irq_init(void)
+static void hub_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
- int i;
+ struct irq_data *irqd;
- for (i = IP27_HUB_IRQ_BASE;
- i < (IP27_HUB_IRQ_BASE + IP27_HUB_IRQ_COUNT); i++)
- irq_set_chip_and_handler(i, &hub_irq_type, handle_level_irq);
-
- /*
- * Some interrupts are reserved by hardware or by software convention.
- * Mark these as reserved right away so they won't be used accidentally
- * later.
- */
- for (i = 0; i <= BASE_PCI_IRQ; i++)
- set_bit(i, hub_irq_map);
-
- set_bit(IP_PEND0_6_63, hub_irq_map);
+ if (nr_irqs > 1)
+ return;
- for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
- set_bit(i, hub_irq_map);
+ irqd = irq_domain_get_irq_data(domain, virq);
+ if (irqd && irqd->chip_data)
+ kfree(irqd->chip_data);
}
+static const struct irq_domain_ops hub_domain_ops = {
+ .alloc = hub_domain_alloc,
+ .free = hub_domain_free,
+};
+
/*
* This code is unnecessarily complex, because we do
* intr enabling. Basically, once we grab the set of intrs we need
@@ -252,7 +184,9 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
{
cpuid_t cpu = smp_processor_id();
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ struct irq_domain *domain;
u64 pend0;
+ int irq;
/* copied from Irix intpend0() */
pend0 = LOCAL_HUB_L(PI_INT_PEND0);
@@ -276,7 +210,14 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
generic_smp_call_function_interrupt();
} else
#endif
- generic_handle_irq(__ffs(pend0) + IP27_HUB_IRQ_BASE);
+ {
+ domain = irq_desc_get_handler_data(desc);
+ irq = irq_linear_revmap(domain, __ffs(pend0));
+ if (irq)
+ generic_handle_irq(irq);
+ else
+ spurious_interrupt();
+ }
LOCAL_HUB_L(PI_INT_PEND0);
}
@@ -285,7 +226,9 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
{
cpuid_t cpu = smp_processor_id();
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ struct irq_domain *domain;
u64 pend1;
+ int irq;
/* copied from Irix intpend0() */
pend1 = LOCAL_HUB_L(PI_INT_PEND1);
@@ -294,7 +237,12 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
if (!pend1)
return;
- generic_handle_irq(__ffs(pend1) + IP27_HUB_IRQ_BASE + 64);
+ domain = irq_desc_get_handler_data(desc);
+ irq = irq_linear_revmap(domain, __ffs(pend1) + 64);
+ if (irq)
+ generic_handle_irq(irq);
+ else
+ spurious_interrupt();
LOCAL_HUB_L(PI_INT_PEND1);
}
@@ -325,11 +273,41 @@ void install_ipi(void)
void __init arch_init_irq(void)
{
+ struct irq_domain *domain;
+ struct fwnode_handle *fn;
+ int i;
+
mips_cpu_irq_init();
- ip27_hub_irq_init();
+
+ /*
+ * Some interrupts are reserved by hardware or by software convention.
+ * Mark these as reserved right away so they won't be used accidentally
+ * later.
+ */
+ for (i = 0; i <= BASE_PCI_IRQ; i++)
+ set_bit(i, hub_irq_map);
+
+ set_bit(IP_PEND0_6_63, hub_irq_map);
+
+ for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
+ set_bit(i, hub_irq_map);
+
+ fn = irq_domain_alloc_named_fwnode("HUB");
+ WARN_ON(fn == NULL);
+ if (!fn)
+ return;
+ domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
+ &hub_domain_ops, NULL);
+ WARN_ON(domain == NULL);
+ if (!domain)
+ return;
+
+ irq_set_default_host(domain);
irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
- irq_set_chained_handler(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0);
+ irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
+ domain);
irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
- irq_set_chained_handler(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1);
+ irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
+ domain);
}
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index ce06aaa115ae..bd5cb855c6e5 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -9,6 +9,9 @@
#include <linux/kernel.h>
#include <linux/smp.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/xtalk-bridge.h>
+#include <asm/sn/addrs.h>
#include <asm/sn/types.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/hub.h>
@@ -20,7 +23,48 @@
#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
#define BASE_XBOW_PORT 8 /* Lowest external port */
-extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
+static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
+{
+ struct xtalk_bridge_platform_data *bd;
+ struct platform_device *pdev;
+ unsigned long offset;
+
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ goto no_mem;
+ pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
+ if (!pdev) {
+ kfree(bd);
+ goto no_mem;
+ }
+
+ offset = NODE_OFFSET(nasid);
+
+ bd->bridge_addr = RAW_NODE_SWIN_BASE(nasid, widget);
+ bd->intr_addr = BIT_ULL(47) + 0x01800000 + PI_INT_PEND_MOD;
+ bd->nasid = nasid;
+ bd->masterwid = masterwid;
+
+ bd->mem.name = "Bridge PCI MEM";
+ bd->mem.start = offset + (widget << SWIN_SIZE_BITS);
+ bd->mem.end = bd->mem.start + SWIN_SIZE - 1;
+ bd->mem.flags = IORESOURCE_MEM;
+ bd->mem_offset = offset;
+
+ bd->io.name = "Bridge PCI IO";
+ bd->io.start = offset + (widget << SWIN_SIZE_BITS);
+ bd->io.end = bd->io.start + SWIN_SIZE - 1;
+ bd->io.flags = IORESOURCE_IO;
+ bd->io_offset = offset;
+
+ platform_device_add_data(pdev, bd, sizeof(*bd));
+ platform_device_add(pdev);
+ pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
+ return;
+
+no_mem:
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+}
static int probe_one_port(nasid_t nasid, int widget, int masterwid)
{
@@ -31,13 +75,10 @@ static int probe_one_port(nasid_t nasid, int widget, int masterwid)
(RAW_NODE_SWIN_BASE(nasid, widget) + WIDGET_ID);
partnum = XWIDGET_PART_NUM(widget_id);
- printk(KERN_INFO "Cpu %d, Nasid 0x%x, widget 0x%x (partnum 0x%x) is ",
- smp_processor_id(), nasid, widget, partnum);
-
switch (partnum) {
case BRIDGE_WIDGET_PART_NUM:
case XBRIDGE_WIDGET_PART_NUM:
- bridge_probe(nasid, widget, masterwid);
+ bridge_platform_create(nasid, widget, masterwid);
break;
default:
break;
@@ -52,8 +93,6 @@ static int xbow_probe(nasid_t nasid)
klxbow_t *xbow_p;
unsigned masterwid, i;
- printk("is xbow\n");
-
/*
* found xbow, so may have multiple bridges
* need to probe xbow
@@ -117,19 +156,17 @@ static void xtalk_probe_node(cnodeid_t nid)
(RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
partnum = XWIDGET_PART_NUM(widget_id);
- printk(KERN_INFO "Cpu %d, Nasid 0x%x: partnum 0x%x is ",
- smp_processor_id(), nasid, partnum);
-
switch (partnum) {
case BRIDGE_WIDGET_PART_NUM:
- bridge_probe(nasid, 0x8, 0xa);
+ bridge_platform_create(nasid, 0x8, 0xa);
break;
case XBOW_WIDGET_PART_NUM:
case XXBOW_WIDGET_PART_NUM:
+ pr_info("xtalk:n%d/0 xbow widget\n", nasid);
xbow_probe(nasid);
break;
default:
- printk(" unknown widget??\n");
+ pr_info("xtalk:n%d/0 unknown widget (0x%x)\n", nasid, partnum);
break;
}
}
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 70a1ab66d252..46537c2ca86a 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -26,6 +26,7 @@
#include <linux/leds.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 55559ca0efe4..2245169c72af 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -4,7 +4,7 @@
#
config NDS32
- def_bool y
+ def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -51,20 +51,20 @@ config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_CSUM
- def_bool y
+ def_bool y
config GENERIC_HWEIGHT
- def_bool y
+ def_bool y
config GENERIC_LOCKBREAK
- def_bool y
+ def_bool y
depends on PREEMPT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
config STACKTRACE_SUPPORT
- def_bool y
+ def_bool y
config FIX_EARLYCON_MEM
def_bool y
@@ -79,11 +79,11 @@ config NR_CPUS
default 1
config MMU
- def_bool y
+ def_bool y
config NDS32_BUILTIN_DTB
- string "Builtin DTB"
- default ""
+ string "Builtin DTB"
+ default ""
help
User can use it to specify the dts of the SoC
endmenu
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
index f67a327777b5..f43b44d692ca 100644
--- a/arch/nds32/include/asm/Kbuild
+++ b/arch/nds32/include/asm/Kbuild
@@ -6,7 +6,6 @@ generic-y += bugs.h
generic-y += checksum.h
generic-y += clkdev.h
generic-y += cmpxchg.h
-generic-y += cmpxchg-local.h
generic-y += compat.h
generic-y += cputime.h
generic-y += device.h
@@ -37,7 +36,6 @@ generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
-generic-y += segment.h
generic-y += serial.h
generic-y += switch_to.h
generic-y += timex.h
diff --git a/arch/nds32/include/asm/assembler.h b/arch/nds32/include/asm/assembler.h
index c3855782a541..5e7c56926049 100644
--- a/arch/nds32/include/asm/assembler.h
+++ b/arch/nds32/include/asm/assembler.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_ASSEMBLER_H__
diff --git a/arch/nds32/include/asm/barrier.h b/arch/nds32/include/asm/barrier.h
index faafc373ea6c..16413172fd50 100644
--- a/arch/nds32/include/asm/barrier.h
+++ b/arch/nds32/include/asm/barrier.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_ASM_BARRIER_H
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h
index 7414fcbbab4e..e75212c76b20 100644
--- a/arch/nds32/include/asm/bitfield.h
+++ b/arch/nds32/include/asm/bitfield.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_BITFIELD_H__
diff --git a/arch/nds32/include/asm/cache.h b/arch/nds32/include/asm/cache.h
index 347db4881c5f..fc3c41b59169 100644
--- a/arch/nds32/include/asm/cache.h
+++ b/arch/nds32/include/asm/cache.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_CACHE_H__
diff --git a/arch/nds32/include/asm/cache_info.h b/arch/nds32/include/asm/cache_info.h
index 38ec458ba543..e89d8078f3a6 100644
--- a/arch/nds32/include/asm/cache_info.h
+++ b/arch/nds32/include/asm/cache_info.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
struct cache_info {
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index 8b26198d51bb..d9ac7e6408ef 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_CACHEFLUSH_H__
diff --git a/arch/nds32/include/asm/current.h b/arch/nds32/include/asm/current.h
index b4dcd22b7bcb..65d30096142b 100644
--- a/arch/nds32/include/asm/current.h
+++ b/arch/nds32/include/asm/current.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_NDS32_CURRENT_H
diff --git a/arch/nds32/include/asm/delay.h b/arch/nds32/include/asm/delay.h
index 519ba97acb6e..56ea3894f8f8 100644
--- a/arch/nds32/include/asm/delay.h
+++ b/arch/nds32/include/asm/delay.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_DELAY_H__
diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h
index 02250626b9f0..1c8e56d7013d 100644
--- a/arch/nds32/include/asm/elf.h
+++ b/arch/nds32/include/asm/elf.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASMNDS32_ELF_H
diff --git a/arch/nds32/include/asm/fixmap.h b/arch/nds32/include/asm/fixmap.h
index 0e60e153a71a..5a4bf11e5800 100644
--- a/arch/nds32/include/asm/fixmap.h
+++ b/arch/nds32/include/asm/fixmap.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_FIXMAP_H
diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h
index baf178bf1d0b..5213c65c2e0b 100644
--- a/arch/nds32/include/asm/futex.h
+++ b/arch/nds32/include/asm/futex.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_FUTEX_H__
diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h
index 425d546cb059..b3a82c97ded3 100644
--- a/arch/nds32/include/asm/highmem.h
+++ b/arch/nds32/include/asm/highmem.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_HIGHMEM_H
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h
index 5ef8ae5ba833..16f262322b8f 100644
--- a/arch/nds32/include/asm/io.h
+++ b/arch/nds32/include/asm/io.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_IO_H
diff --git a/arch/nds32/include/asm/irqflags.h b/arch/nds32/include/asm/irqflags.h
index 2bfd00f8bc48..fb45ec46bb1b 100644
--- a/arch/nds32/include/asm/irqflags.h
+++ b/arch/nds32/include/asm/irqflags.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <asm/nds32.h>
diff --git a/arch/nds32/include/asm/l2_cache.h b/arch/nds32/include/asm/l2_cache.h
index 37dd5ef61de8..3ea48e19e6de 100644
--- a/arch/nds32/include/asm/l2_cache.h
+++ b/arch/nds32/include/asm/l2_cache.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef L2_CACHE_H
diff --git a/arch/nds32/include/asm/linkage.h b/arch/nds32/include/asm/linkage.h
index e708c8bdb926..a696469abb70 100644
--- a/arch/nds32/include/asm/linkage.h
+++ b/arch/nds32/include/asm/linkage.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_LINKAGE_H
diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h
index 60efc726b56e..940d32842793 100644
--- a/arch/nds32/include/asm/memory.h
+++ b/arch/nds32/include/asm/memory.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_MEMORY_H
@@ -15,14 +15,6 @@
#define PHYS_OFFSET (0x0)
#endif
-#ifndef __virt_to_bus
-#define __virt_to_bus __virt_to_phys
-#endif
-
-#ifndef __bus_to_virt
-#define __bus_to_virt __phys_to_virt
-#endif
-
/*
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
diff --git a/arch/nds32/include/asm/mmu.h b/arch/nds32/include/asm/mmu.h
index 88b9ee8c1064..89d63afee455 100644
--- a/arch/nds32/include/asm/mmu.h
+++ b/arch/nds32/include/asm/mmu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_MMU_H
diff --git a/arch/nds32/include/asm/mmu_context.h b/arch/nds32/include/asm/mmu_context.h
index fd7d13cefccc..b8fd3d189fdc 100644
--- a/arch/nds32/include/asm/mmu_context.h
+++ b/arch/nds32/include/asm/mmu_context.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_MMU_CONTEXT_H
diff --git a/arch/nds32/include/asm/module.h b/arch/nds32/include/asm/module.h
index 16cf9c7237ad..a3a08e993c65 100644
--- a/arch/nds32/include/asm/module.h
+++ b/arch/nds32/include/asm/module.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_NDS32_MODULE_H
diff --git a/arch/nds32/include/asm/nds32.h b/arch/nds32/include/asm/nds32.h
index 68c38151c3e4..4994f6a9e0a0 100644
--- a/arch/nds32/include/asm/nds32.h
+++ b/arch/nds32/include/asm/nds32.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_NDS32_NDS32_H_
diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h
index 947f0491c9a7..8feb1fa12f01 100644
--- a/arch/nds32/include/asm/page.h
+++ b/arch/nds32/include/asm/page.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* Copyright (C) 2005-2017 Andes Technology Corporation
*/
diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h
index 3c5fee5b5759..3cbc749c79aa 100644
--- a/arch/nds32/include/asm/pgalloc.h
+++ b/arch/nds32/include/asm/pgalloc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_PGALLOC_H
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index ee59c1f9e4fc..c70cc56bec09 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_PGTABLE_H
diff --git a/arch/nds32/include/asm/proc-fns.h b/arch/nds32/include/asm/proc-fns.h
index bedc4f59e064..27c617fa77af 100644
--- a/arch/nds32/include/asm/proc-fns.h
+++ b/arch/nds32/include/asm/proc-fns.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_PROCFNS_H__
diff --git a/arch/nds32/include/asm/processor.h b/arch/nds32/include/asm/processor.h
index 72024f8bc129..b82369c7659d 100644
--- a/arch/nds32/include/asm/processor.h
+++ b/arch/nds32/include/asm/processor.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_PROCESSOR_H
diff --git a/arch/nds32/include/asm/ptrace.h b/arch/nds32/include/asm/ptrace.h
index c4538839055c..919ee223620c 100644
--- a/arch/nds32/include/asm/ptrace.h
+++ b/arch/nds32/include/asm/ptrace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_PTRACE_H
diff --git a/arch/nds32/include/asm/shmparam.h b/arch/nds32/include/asm/shmparam.h
index fd1cff64b68e..3aeee946973d 100644
--- a/arch/nds32/include/asm/shmparam.h
+++ b/arch/nds32/include/asm/shmparam.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_SHMPARAM_H
diff --git a/arch/nds32/include/asm/string.h b/arch/nds32/include/asm/string.h
index 179272caa540..cae8fe16de98 100644
--- a/arch/nds32/include/asm/string.h
+++ b/arch/nds32/include/asm/string.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_STRING_H
diff --git a/arch/nds32/include/asm/swab.h b/arch/nds32/include/asm/swab.h
index e01a755a37d2..362a466f2976 100644
--- a/arch/nds32/include/asm/swab.h
+++ b/arch/nds32/include/asm/swab.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_SWAB_H__
diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h
index 174b8571d362..899b2fb4b52f 100644
--- a/arch/nds32/include/asm/syscall.h
+++ b/arch/nds32/include/asm/syscall.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
// Copyright (C) 2005-2017 Andes Technology Corporation
diff --git a/arch/nds32/include/asm/syscalls.h b/arch/nds32/include/asm/syscalls.h
index da32101b455d..f3b16f602cb5 100644
--- a/arch/nds32/include/asm/syscalls.h
+++ b/arch/nds32/include/asm/syscalls.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_SYSCALLS_H
diff --git a/arch/nds32/include/asm/thread_info.h b/arch/nds32/include/asm/thread_info.h
index bff741ff337b..c135111ec44e 100644
--- a/arch/nds32/include/asm/thread_info.h
+++ b/arch/nds32/include/asm/thread_info.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_THREAD_INFO_H
@@ -42,7 +42,6 @@ struct thread_info {
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
- * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/
#define TIF_SIGPENDING 1
@@ -50,7 +49,6 @@ struct thread_info {
#define TIF_SINGLESTEP 3
#define TIF_NOTIFY_RESUME 4 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
-#define TIF_USEDFPU 16
#define TIF_POLLING_NRFLAG 17
#define TIF_MEMDIE 18
#define TIF_FREEZE 19
diff --git a/arch/nds32/include/asm/tlb.h b/arch/nds32/include/asm/tlb.h
index d5ae571c8d30..a8aff1c8b4f4 100644
--- a/arch/nds32/include/asm/tlb.h
+++ b/arch/nds32/include/asm/tlb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASMNDS32_TLB_H
diff --git a/arch/nds32/include/asm/tlbflush.h b/arch/nds32/include/asm/tlbflush.h
index 38ee769b18d8..97155366ea01 100644
--- a/arch/nds32/include/asm/tlbflush.h
+++ b/arch/nds32/include/asm/tlbflush.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_TLBFLUSH_H
diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h
index 116598b47c4d..8916ad9f9f13 100644
--- a/arch/nds32/include/asm/uaccess.h
+++ b/arch/nds32/include/asm/uaccess.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMANDES_UACCESS_H
diff --git a/arch/nds32/include/asm/unistd.h b/arch/nds32/include/asm/unistd.h
index b586a2862beb..bf5e2d440913 100644
--- a/arch/nds32/include/asm/unistd.h
+++ b/arch/nds32/include/asm/unistd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/nds32/include/asm/vdso.h b/arch/nds32/include/asm/vdso.h
index af2c6afc2469..89b113ffc3dc 100644
--- a/arch/nds32/include/asm/vdso.h
+++ b/arch/nds32/include/asm/vdso.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* Copyright (C) 2005-2017 Andes Technology Corporation
*/
diff --git a/arch/nds32/include/asm/vdso_datapage.h b/arch/nds32/include/asm/vdso_datapage.h
index 79db5a12ca5e..74c68802021e 100644
--- a/arch/nds32/include/asm/vdso_datapage.h
+++ b/arch/nds32/include/asm/vdso_datapage.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2012 ARM Limited
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_VDSO_DATAPAGE_H
@@ -20,6 +20,7 @@ struct vdso_data {
u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
u32 cs_mult; /* clocksource multiplier */
u32 cs_shift; /* Cycle to nanosecond divisor (power of two) */
+ u32 hrtimer_res; /* hrtimer resolution */
u64 cs_cycle_last; /* last cycle value */
u64 cs_mask; /* clocksource mask */
diff --git a/arch/nds32/include/asm/vdso_timer_info.h b/arch/nds32/include/asm/vdso_timer_info.h
index 50ba117cff12..328439ce37db 100644
--- a/arch/nds32/include/asm/vdso_timer_info.h
+++ b/arch/nds32/include/asm/vdso_timer_info.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
extern struct timer_info_t timer_info;
diff --git a/arch/nds32/include/uapi/asm/auxvec.h b/arch/nds32/include/uapi/asm/auxvec.h
index 2d3213f5e595..b5d58ea8decb 100644
--- a/arch/nds32/include/uapi/asm/auxvec.h
+++ b/arch/nds32/include/uapi/asm/auxvec.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_AUXVEC_H
diff --git a/arch/nds32/include/uapi/asm/byteorder.h b/arch/nds32/include/uapi/asm/byteorder.h
index a23f6f3a2468..511e653c709d 100644
--- a/arch/nds32/include/uapi/asm/byteorder.h
+++ b/arch/nds32/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_BYTEORDER_H__
diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h
index 4cdca9b23974..73793662815c 100644
--- a/arch/nds32/include/uapi/asm/cachectl.h
+++ b/arch/nds32/include/uapi/asm/cachectl.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 1994, 1995, 1996 by Ralf Baechle
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_CACHECTL
diff --git a/arch/nds32/include/uapi/asm/param.h b/arch/nds32/include/uapi/asm/param.h
index e3fb723ee362..2977534a6bd3 100644
--- a/arch/nds32/include/uapi/asm/param.h
+++ b/arch/nds32/include/uapi/asm/param.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_PARAM_H
diff --git a/arch/nds32/include/uapi/asm/ptrace.h b/arch/nds32/include/uapi/asm/ptrace.h
index 358c99e399d0..1a6e01c00e6f 100644
--- a/arch/nds32/include/uapi/asm/ptrace.h
+++ b/arch/nds32/include/uapi/asm/ptrace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __UAPI_ASM_NDS32_PTRACE_H
diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h
index 58afc416473e..628ff6b75825 100644
--- a/arch/nds32/include/uapi/asm/sigcontext.h
+++ b/arch/nds32/include/uapi/asm/sigcontext.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_SIGCONTEXT_H
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
index 4ec8f543103f..c691735017ed 100644
--- a/arch/nds32/include/uapi/asm/unistd.h
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#define __ARCH_WANT_STAT64
diff --git a/arch/nds32/kernel/.gitignore b/arch/nds32/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/nds32/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/nds32/kernel/cacheinfo.c b/arch/nds32/kernel/cacheinfo.c
index 0a7bc696dd55..aab98e447feb 100644
--- a/arch/nds32/kernel/cacheinfo.c
+++ b/arch/nds32/kernel/cacheinfo.c
@@ -13,7 +13,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->level = level;
this_leaf->type = type;
this_leaf->coherency_line_size = CACHE_LINE_SIZE(cache_type);
- this_leaf->number_of_sets = CACHE_SET(cache_type);;
+ this_leaf->number_of_sets = CACHE_SET(cache_type);
this_leaf->ways_of_associativity = CACHE_WAY(cache_type);
this_leaf->size = this_leaf->number_of_sets *
this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S
index 97ba15cd4180..1df02a793364 100644
--- a/arch/nds32/kernel/ex-exit.S
+++ b/arch/nds32/kernel/ex-exit.S
@@ -163,7 +163,7 @@ resume_kernel:
gie_disable
lwi $t0, [tsk+#TSK_TI_PREEMPT]
bnez $t0, no_work_pending
-need_resched:
+
lwi $t0, [tsk+#TSK_TI_FLAGS]
andi $p1, $t0, #_TIF_NEED_RESCHED
beqz $p1, no_work_pending
@@ -173,7 +173,7 @@ need_resched:
beqz $t0, no_work_pending
jal preempt_schedule_irq
- b need_resched
+ b no_work_pending
#endif
/*
diff --git a/arch/nds32/kernel/nds32_ksyms.c b/arch/nds32/kernel/nds32_ksyms.c
index 5ecebd0e60cb..20719e42ae36 100644
--- a/arch/nds32/kernel/nds32_ksyms.c
+++ b/arch/nds32/kernel/nds32_ksyms.c
@@ -23,9 +23,3 @@ EXPORT_SYMBOL(memzero);
EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__arch_clear_user);
-
-/* cache handling */
-EXPORT_SYMBOL(cpu_icache_inval_all);
-EXPORT_SYMBOL(cpu_dcache_wbinval_all);
-EXPORT_SYMBOL(cpu_dma_inval_range);
-EXPORT_SYMBOL(cpu_dma_wb_range);
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
index 016f15891f6d..90bcae6f8554 100644
--- a/arch/nds32/kernel/vdso.c
+++ b/arch/nds32/kernel/vdso.c
@@ -220,6 +220,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
tk->tkr_mono.shift;
+ vdso_data->hrtimer_res = hrtimer_resolution;
vdso_write_end(vdso_data);
}
diff --git a/arch/nds32/kernel/vdso/.gitignore b/arch/nds32/kernel/vdso/.gitignore
new file mode 100644
index 000000000000..f8b69d84238e
--- /dev/null
+++ b/arch/nds32/kernel/vdso/.gitignore
@@ -0,0 +1 @@
+vdso.lds
diff --git a/arch/nds32/kernel/vdso/Makefile b/arch/nds32/kernel/vdso/Makefile
index e6c50a701313..8792fda19a64 100644
--- a/arch/nds32/kernel/vdso/Makefile
+++ b/arch/nds32/kernel/vdso/Makefile
@@ -11,10 +11,8 @@ obj-vdso := note.o datapage.o sigreturn.o gettimeofday.o
targets := $(obj-vdso) vdso.so vdso.so.dbg
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-ccflags-y := -shared -fno-common -fno-builtin
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
-ccflags-y += -fPIC -Wl,-shared -g
+ccflags-y := -shared -fno-common -fno-builtin -nostdlib -fPIC -Wl,-shared -g \
+ -Wl,-soname=linux-vdso.so.1 -Wl,--hash-style=sysv
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
@@ -28,7 +26,7 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
$(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file, .lds has to be first
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
@@ -40,9 +38,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
- $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
-endef
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
@@ -65,7 +61,7 @@ gettimeofday.o : gettimeofday.c FORCE
# Actual build commands
quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $(real-prereqs) -o $@
quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
quiet_cmd_vdsocc = VDSOA $@
diff --git a/arch/nds32/kernel/vdso/gettimeofday.c b/arch/nds32/kernel/vdso/gettimeofday.c
index 038721af40e3..b02581891c33 100644
--- a/arch/nds32/kernel/vdso/gettimeofday.c
+++ b/arch/nds32/kernel/vdso/gettimeofday.c
@@ -208,6 +208,8 @@ static notrace int clock_getres_fallback(clockid_t _clk_id,
notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res)
{
+ struct vdso_data *vdata = __get_datapage();
+
if (res == NULL)
return 0;
switch (clk_id) {
@@ -215,7 +217,7 @@ notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res)
case CLOCK_MONOTONIC:
case CLOCK_MONOTONIC_RAW:
res->tv_sec = 0;
- res->tv_nsec = CLOCK_REALTIME_RES;
+ res->tv_nsec = vdata->hrtimer_res;
break;
case CLOCK_REALTIME_COARSE:
case CLOCK_MONOTONIC_COARSE:
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index 1a4ab1b7525f..55703b03d172 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -260,7 +260,7 @@ void __set_fixmap(enum fixed_addresses idx,
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
- pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];;
+ pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];
if (pgprot_val(flags)) {
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index d7ef3512504a..a8ffdd007f6c 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -33,7 +33,6 @@ generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
-generic-y += segment.h
generic-y += serial.h
generic-y += spinlock.h
generic-y += topology.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 1919cc5e0f11..164be10062bc 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -34,7 +34,6 @@ generic-y += qspinlock.h
generic-y += qrwlock_types.h
generic-y += qrwlock.h
generic-y += sections.h
-generic-y += segment.h
generic-y += shmparam.h
generic-y += switch_to.h
generic-y += topology.h
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
index eb97a8e7c8aa..e8fb2a764f46 100644
--- a/arch/openrisc/kernel/ptrace.c
+++ b/arch/openrisc/kernel/ptrace.c
@@ -30,7 +30,6 @@
#include <linux/elf.h>
#include <asm/thread_info.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index c605bdad1746..17c00d06d91b 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -39,7 +39,6 @@
#include <linux/device.h>
#include <asm/sections.h>
-#include <asm/segment.h>
#include <asm/pgtable.h>
#include <asm/types.h>
#include <asm/setup.h>
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index d8981cbb852a..6ed7293ef007 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -35,7 +35,6 @@
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
-#include <asm/segment.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/unwinder.h>
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index abe87e54e231..e63cb4a91a3e 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -32,7 +32,6 @@
#include <linux/blkdev.h> /* for initrd_* */
#include <linux/pagemap.h>
-#include <asm/segment.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index 6c253a2e86bc..7f9f50161dfe 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -26,7 +26,6 @@
#include <linux/mm.h>
#include <linux/init.h>
-#include <asm/segment.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index ed2d8cc94909..005ee8ad0446 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -19,7 +19,6 @@ generic-y += mmiowb.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += seccomp.h
-generic-y += segment.h
generic-y += trace_clock.h
generic-y += user.h
generic-y += vga.h
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index fe8ca623add8..c9e377d59232 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -424,3 +424,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 1d1183048cfd..2781ebf6add4 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -93,6 +93,7 @@
#define VMALLOC_REGION_ID NON_LINEAR_REGION_ID(H_VMALLOC_START)
#define IO_REGION_ID NON_LINEAR_REGION_ID(H_KERN_IO_START)
#define VMEMMAP_REGION_ID NON_LINEAR_REGION_ID(H_VMEMMAP_START)
+#define INVALID_REGION_ID (VMEMMAP_REGION_ID + 1)
/*
* Defines the address of the vmemap area, in its own region on
@@ -119,14 +120,15 @@ static inline int get_region_id(unsigned long ea)
if (id == 0)
return USER_REGION_ID;
+ if (id != (PAGE_OFFSET >> 60))
+ return INVALID_REGION_ID;
+
if (ea < H_KERN_VIRT_START)
return LINEAR_MAP_REGION_ID;
- VM_BUG_ON(id != 0xc);
BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
region_id = NON_LINEAR_REGION_ID(ea);
- VM_BUG_ON(region_id > VMEMMAP_REGION_ID);
return region_id;
}
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index e6b5bb012ccb..013c76a0a03e 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -201,6 +201,8 @@ struct kvmppc_spapr_tce_iommu_table {
struct kref kref;
};
+#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+
struct kvmppc_spapr_tce_table {
struct list_head list;
struct kvm *kvm;
@@ -210,6 +212,7 @@ struct kvmppc_spapr_tce_table {
u64 offset; /* in pages */
u64 size; /* window size in pages */
struct list_head iommu_tables;
+ struct mutex alloc_lock;
struct page *pages[0];
};
@@ -222,6 +225,7 @@ extern struct kvm_device_ops kvm_xics_ops;
struct kvmppc_xive;
struct kvmppc_xive_vcpu;
extern struct kvm_device_ops kvm_xive_ops;
+extern struct kvm_device_ops kvm_xive_native_ops;
struct kvmppc_passthru_irqmap;
@@ -312,7 +316,11 @@ struct kvm_arch {
#endif
#ifdef CONFIG_KVM_XICS
struct kvmppc_xics *xics;
- struct kvmppc_xive *xive;
+ struct kvmppc_xive *xive; /* Current XIVE device in use */
+ struct {
+ struct kvmppc_xive *native;
+ struct kvmppc_xive *xics_on_xive;
+ } xive_devices;
struct kvmppc_passthru_irqmap *pimap;
#endif
struct kvmppc_ops *kvm_ops;
@@ -449,6 +457,7 @@ struct kvmppc_passthru_irqmap {
#define KVMPPC_IRQ_DEFAULT 0
#define KVMPPC_IRQ_MPIC 1
#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */
+#define KVMPPC_IRQ_XIVE 3 /* XIVE native exploitation mode */
#define MMIO_HPTE_CACHE_SIZE 4
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index ac22b28ae78d..bc892380e6cd 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -197,10 +197,6 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \
H_PARAMETER : H_SUCCESS)
-extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
- unsigned long *ua, unsigned long **prmap);
-extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
- unsigned long idx, unsigned long tce);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
@@ -273,6 +269,7 @@ union kvmppc_one_reg {
u64 addr;
u64 length;
} vpaval;
+ u64 xive_timaval[2];
};
struct kvmppc_ops {
@@ -480,6 +477,9 @@ extern void kvm_hv_vm_activated(void);
extern void kvm_hv_vm_deactivated(void);
extern bool kvm_hv_mode_active(void);
+extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ struct kvm_nested_guest *nested);
+
#else
static inline void __init kvm_cma_reserve(void)
{}
@@ -594,6 +594,22 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
+
+static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
+}
+
+extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_native_init_module(void);
+extern void kvmppc_xive_native_exit_module(void);
+extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val);
+extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val);
+
#else
static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
u32 priority) { return -1; }
@@ -617,6 +633,21 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
+
+static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
+ { return 0; }
+static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
+static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_native_init_module(void) { }
+static inline void kvmppc_xive_native_exit_module(void) { }
+static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val)
+{ return 0; }
+static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val)
+{ return -ENOENT; }
+
#endif /* CONFIG_KVM_XIVE */
#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
@@ -665,6 +696,8 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index);
+long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long dest, unsigned long src);
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
unsigned long slb_v, unsigned int status, bool data);
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index b579a943407b..eaf76f57023a 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -23,6 +23,7 @@
* same offset regardless of where the code is executing
*/
extern void __iomem *xive_tima;
+extern unsigned long xive_tima_os;
/*
* Offset in the TM area of our current execution level (provided by
@@ -73,6 +74,8 @@ struct xive_q {
u32 esc_irq;
atomic_t count;
atomic_t pending_count;
+ u64 guest_qaddr;
+ u32 guest_qshift;
};
/* Global enable flags for the XIVE support */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 26ca425f4c2c..b0f72dea8b11 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -482,6 +482,8 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */
#define KVM_REG_PPC_ICP_PPRI_MASK 0xff
+#define KVM_REG_PPC_VP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x8d)
+
/* Device control API: PPC-specific devices */
#define KVM_DEV_MPIC_GRP_MISC 1
#define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */
@@ -677,4 +679,48 @@ struct kvm_ppc_cpu_char {
#define KVM_XICS_PRESENTED (1ULL << 43)
#define KVM_XICS_QUEUED (1ULL << 44)
+/* POWER9 XIVE Native Interrupt Controller */
+#define KVM_DEV_XIVE_GRP_CTRL 1
+#define KVM_DEV_XIVE_RESET 1
+#define KVM_DEV_XIVE_EQ_SYNC 2
+#define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */
+#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */
+#define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */
+#define KVM_DEV_XIVE_GRP_SOURCE_SYNC 5 /* 64-bit source identifier */
+
+/* Layout of 64-bit XIVE source attribute values */
+#define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0)
+#define KVM_XIVE_LEVEL_ASSERTED (1ULL << 1)
+
+/* Layout of 64-bit XIVE source configuration attribute values */
+#define KVM_XIVE_SOURCE_PRIORITY_SHIFT 0
+#define KVM_XIVE_SOURCE_PRIORITY_MASK 0x7
+#define KVM_XIVE_SOURCE_SERVER_SHIFT 3
+#define KVM_XIVE_SOURCE_SERVER_MASK 0xfffffff8ULL
+#define KVM_XIVE_SOURCE_MASKED_SHIFT 32
+#define KVM_XIVE_SOURCE_MASKED_MASK 0x100000000ULL
+#define KVM_XIVE_SOURCE_EISN_SHIFT 33
+#define KVM_XIVE_SOURCE_EISN_MASK 0xfffffffe00000000ULL
+
+/* Layout of 64-bit EQ identifier */
+#define KVM_XIVE_EQ_PRIORITY_SHIFT 0
+#define KVM_XIVE_EQ_PRIORITY_MASK 0x7
+#define KVM_XIVE_EQ_SERVER_SHIFT 3
+#define KVM_XIVE_EQ_SERVER_MASK 0xfffffff8ULL
+
+/* Layout of EQ configuration values (64 bytes) */
+struct kvm_ppc_xive_eq {
+ __u32 flags;
+ __u32 qshift;
+ __u64 qaddr;
+ __u32 qtoggle;
+ __u32 qindex;
+ __u8 pad[40];
+};
+
+#define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001
+
+#define KVM_XIVE_TIMA_PAGE_OFFSET 0
+#define KVM_XIVE_ESB_PAGE_OFFSET 4
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index f2ed3ef4b129..862e2890bd3d 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -767,7 +767,6 @@ static void cacheinfo_create_index_dir(struct cache *cache, int index,
cache_dir->kobj, "index%d", index);
if (rc) {
kobject_put(&index_dir->kobj);
- kfree(index_dir);
return;
}
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 00f5a63c8d9a..103655d84b4b 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -509,3 +509,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 3223aec88b2c..4c67cc79de7c 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -94,7 +94,7 @@ endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
-kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o
+kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o book3s_xive_native.o
kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o
kvm-book3s_64-module-objs := \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 10c5579d20ce..61a212d0daf0 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -651,6 +651,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
break;
#endif /* CONFIG_KVM_XICS */
+#ifdef CONFIG_KVM_XIVE
+ case KVM_REG_PPC_VP_STATE:
+ if (!vcpu->arch.xive_vcpu) {
+ r = -ENXIO;
+ break;
+ }
+ if (xive_enabled())
+ r = kvmppc_xive_native_get_vp(vcpu, val);
+ else
+ r = -ENXIO;
+ break;
+#endif /* CONFIG_KVM_XIVE */
case KVM_REG_PPC_FSCR:
*val = get_reg_val(id, vcpu->arch.fscr);
break;
@@ -724,6 +736,18 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
break;
#endif /* CONFIG_KVM_XICS */
+#ifdef CONFIG_KVM_XIVE
+ case KVM_REG_PPC_VP_STATE:
+ if (!vcpu->arch.xive_vcpu) {
+ r = -ENXIO;
+ break;
+ }
+ if (xive_enabled())
+ r = kvmppc_xive_native_set_vp(vcpu, val);
+ else
+ r = -ENXIO;
+ break;
+#endif /* CONFIG_KVM_XIVE */
case KVM_REG_PPC_FSCR:
vcpu->arch.fscr = set_reg_val(id, *val);
break;
@@ -891,6 +915,17 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
kvmppc_rtas_tokens_free(kvm);
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
#endif
+
+#ifdef CONFIG_KVM_XICS
+ /*
+ * Free the XIVE devices which are not directly freed by the
+ * device 'release' method
+ */
+ kfree(kvm->arch.xive_devices.native);
+ kvm->arch.xive_devices.native = NULL;
+ kfree(kvm->arch.xive_devices.xics_on_xive);
+ kvm->arch.xive_devices.xics_on_xive = NULL;
+#endif /* CONFIG_KVM_XICS */
}
int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
@@ -1050,6 +1085,9 @@ static int kvmppc_book3s_init(void)
if (xics_on_xive()) {
kvmppc_xive_init_module();
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
+ kvmppc_xive_native_init_module();
+ kvm_register_device_ops(&kvm_xive_native_ops,
+ KVM_DEV_TYPE_XIVE);
} else
#endif
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
@@ -1060,8 +1098,10 @@ static int kvmppc_book3s_init(void)
static void kvmppc_book3s_exit(void)
{
#ifdef CONFIG_KVM_XICS
- if (xics_on_xive())
+ if (xics_on_xive()) {
kvmppc_xive_exit_module();
+ kvmppc_xive_native_exit_module();
+ }
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr();
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index f100e331e69b..66270e07449a 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -228,11 +228,33 @@ static void release_spapr_tce_table(struct rcu_head *head)
unsigned long i, npages = kvmppc_tce_pages(stt->size);
for (i = 0; i < npages; i++)
- __free_page(stt->pages[i]);
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
kfree(stt);
}
+static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
+ unsigned long sttpage)
+{
+ struct page *page = stt->pages[sttpage];
+
+ if (page)
+ return page;
+
+ mutex_lock(&stt->alloc_lock);
+ page = stt->pages[sttpage];
+ if (!page) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ WARN_ON_ONCE(!page);
+ if (page)
+ stt->pages[sttpage] = page;
+ }
+ mutex_unlock(&stt->alloc_lock);
+
+ return page;
+}
+
static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
@@ -241,7 +263,10 @@ static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
return VM_FAULT_SIGBUS;
- page = stt->pages[vmf->pgoff];
+ page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
+ if (!page)
+ return VM_FAULT_OOM;
+
get_page(page);
vmf->page = page;
return 0;
@@ -296,7 +321,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvmppc_spapr_tce_table *siter;
unsigned long npages, size = args->size;
int ret = -ENOMEM;
- int i;
if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
@@ -318,14 +342,9 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
stt->offset = args->offset;
stt->size = size;
stt->kvm = kvm;
+ mutex_init(&stt->alloc_lock);
INIT_LIST_HEAD_RCU(&stt->iommu_tables);
- for (i = 0; i < npages; i++) {
- stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!stt->pages[i])
- goto fail;
- }
-
mutex_lock(&kvm->lock);
/* Check this LIOBN hasn't been previously allocated */
@@ -352,17 +371,28 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
if (ret >= 0)
return ret;
- fail:
- for (i = 0; i < npages; i++)
- if (stt->pages[i])
- __free_page(stt->pages[i]);
-
kfree(stt);
fail_acct:
kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
return ret;
}
+static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
+ unsigned long *ua)
+{
+ unsigned long gfn = tce >> PAGE_SHIFT;
+ struct kvm_memory_slot *memslot;
+
+ memslot = search_memslots(kvm_memslots(kvm), gfn);
+ if (!memslot)
+ return -EINVAL;
+
+ *ua = __gfn_to_hva_memslot(memslot, gfn) |
+ (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+
+ return 0;
+}
+
static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
unsigned long tce)
{
@@ -378,7 +408,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_TOO_HARD;
- if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
return H_TOO_HARD;
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
@@ -397,6 +427,36 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
return H_SUCCESS;
}
+/*
+ * Handles TCE requests for emulated devices.
+ * Puts guest TCE values to the table and expects user space to convert them.
+ * Cannot fail so kvmppc_tce_validate must be called before it.
+ */
+static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
+ unsigned long idx, unsigned long tce)
+{
+ struct page *page;
+ u64 *tbl;
+ unsigned long sttpage;
+
+ idx -= stt->offset;
+ sttpage = idx / TCES_PER_PAGE;
+ page = stt->pages[sttpage];
+
+ if (!page) {
+ /* We allow any TCE, not just with read|write permissions */
+ if (!tce)
+ return;
+
+ page = kvm_spapr_get_tce_page(stt, sttpage);
+ if (!page)
+ return;
+ }
+ tbl = page_to_virt(page);
+
+ tbl[idx % TCES_PER_PAGE] = tce;
+}
+
static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
unsigned long entry)
{
@@ -551,7 +611,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
dir = iommu_tce_direction(tce);
- if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
+ if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
goto unlock_exit;
}
@@ -612,7 +672,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return ret;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
ret = H_TOO_HARD;
goto unlock_exit;
}
@@ -647,7 +707,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
}
tce = be64_to_cpu(tce);
- if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua))
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 2206bc729b9a..484b47fa3960 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -66,8 +66,6 @@
#endif
-#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
-
/*
* Finds a TCE table descriptor by LIOBN.
*
@@ -88,6 +86,25 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
EXPORT_SYMBOL_GPL(kvmppc_find_table);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
+ unsigned long *ua, unsigned long **prmap)
+{
+ unsigned long gfn = tce >> PAGE_SHIFT;
+ struct kvm_memory_slot *memslot;
+
+ memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
+ if (!memslot)
+ return -EINVAL;
+
+ *ua = __gfn_to_hva_memslot(memslot, gfn) |
+ (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+
+ if (prmap)
+ *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
+
+ return 0;
+}
+
/*
* Validates TCE address.
* At the moment flags and page mask are validated.
@@ -111,7 +128,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_PARAMETER;
- if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
return H_TOO_HARD;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -129,7 +146,6 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
return H_SUCCESS;
}
-#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
/* Note on the use of page_address() in real mode,
*
@@ -161,13 +177,9 @@ static u64 *kvmppc_page_address(struct page *page)
/*
* Handles TCE requests for emulated devices.
* Puts guest TCE values to the table and expects user space to convert them.
- * Called in both real and virtual modes.
- * Cannot fail so kvmppc_tce_validate must be called before it.
- *
- * WARNING: This will be called in real-mode on HV KVM and virtual
- * mode on PR KVM
+ * Cannot fail so kvmppc_rm_tce_validate must be called before it.
*/
-void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
+static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
unsigned long idx, unsigned long tce)
{
struct page *page;
@@ -175,35 +187,48 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
idx -= stt->offset;
page = stt->pages[idx / TCES_PER_PAGE];
+ /*
+ * page must not be NULL in real mode,
+ * kvmppc_rm_ioba_validate() must have taken care of this.
+ */
+ WARN_ON_ONCE_RM(!page);
tbl = kvmppc_page_address(page);
tbl[idx % TCES_PER_PAGE] = tce;
}
-EXPORT_SYMBOL_GPL(kvmppc_tce_put);
-long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
- unsigned long *ua, unsigned long **prmap)
+/*
+ * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
+ * in real mode.
+ * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
+ * allocated or not required (when clearing a tce entry).
+ */
+static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+ unsigned long ioba, unsigned long npages, bool clearing)
{
- unsigned long gfn = tce >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot;
+ unsigned long i, idx, sttpage, sttpages;
+ unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
- memslot = search_memslots(kvm_memslots(kvm), gfn);
- if (!memslot)
- return -EINVAL;
-
- *ua = __gfn_to_hva_memslot(memslot, gfn) |
- (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+ if (ret)
+ return ret;
+ /*
+ * clearing==true says kvmppc_rm_tce_put won't be allocating pages
+ * for empty tces.
+ */
+ if (clearing)
+ return H_SUCCESS;
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- if (prmap)
- *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
-#endif
+ idx = (ioba >> stt->page_shift) - stt->offset;
+ sttpage = idx / TCES_PER_PAGE;
+ sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
+ TCES_PER_PAGE;
+ for (i = sttpage; i < sttpage + sttpages; ++i)
+ if (!stt->pages[i])
+ return H_TOO_HARD;
- return 0;
+ return H_SUCCESS;
}
-EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
unsigned long entry, unsigned long *hpa,
enum dma_data_direction *direction)
@@ -381,7 +406,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (!stt)
return H_TOO_HARD;
- ret = kvmppc_ioba_validate(stt, ioba, 1);
+ ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
if (ret != H_SUCCESS)
return ret;
@@ -390,7 +415,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
dir = iommu_tce_direction(tce);
- if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+ if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
entry = ioba >> stt->page_shift;
@@ -409,7 +434,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
}
}
- kvmppc_tce_put(stt, entry, tce);
+ kvmppc_rm_tce_put(stt, entry, tce);
return H_SUCCESS;
}
@@ -480,7 +505,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (tce_list & (SZ_4K - 1))
return H_PARAMETER;
- ret = kvmppc_ioba_validate(stt, ioba, npages);
+ ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
if (ret != H_SUCCESS)
return ret;
@@ -492,7 +517,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
struct mm_iommu_table_group_mem_t *mem;
- if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
return H_TOO_HARD;
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
@@ -508,7 +533,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
* We do not require memory to be preregistered in this case
* so lock rmap and do __find_linux_pte_or_hugepte().
*/
- if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
return H_TOO_HARD;
rmap = (void *) vmalloc_to_phys(rmap);
@@ -542,7 +567,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
ua = 0;
- if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -557,7 +582,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
}
}
- kvmppc_tce_put(stt, entry + i, tce);
+ kvmppc_rm_tce_put(stt, entry + i, tce);
}
unlock_exit:
@@ -583,7 +608,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
if (!stt)
return H_TOO_HARD;
- ret = kvmppc_ioba_validate(stt, ioba, npages);
+ ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
if (ret != H_SUCCESS)
return ret;
@@ -610,7 +635,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
- kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
+ kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
return H_SUCCESS;
}
@@ -635,6 +660,10 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = (ioba >> stt->page_shift) - stt->offset;
page = stt->pages[idx / TCES_PER_PAGE];
+ if (!page) {
+ vcpu->arch.regs.gpr[4] = 0;
+ return H_SUCCESS;
+ }
tbl = (u64 *)page_address(page);
vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7bdcd4d7a9f0..d5fc624e0655 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -750,7 +750,7 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
/*
* Ensure that the read of vcore->dpdes comes after the read
* of vcpu->doorbell_request. This barrier matches the
- * smb_wmb() in kvmppc_guest_entry_inject().
+ * smp_wmb() in kvmppc_guest_entry_inject().
*/
smp_rmb();
vc = vcpu->arch.vcore;
@@ -802,6 +802,80 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
}
}
+/* Copy guest memory in place - must reside within a single memslot */
+static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
+ unsigned long len)
+{
+ struct kvm_memory_slot *to_memslot = NULL;
+ struct kvm_memory_slot *from_memslot = NULL;
+ unsigned long to_addr, from_addr;
+ int r;
+
+ /* Get HPA for from address */
+ from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT);
+ if (!from_memslot)
+ return -EFAULT;
+ if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages)
+ << PAGE_SHIFT))
+ return -EINVAL;
+ from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT);
+ if (kvm_is_error_hva(from_addr))
+ return -EFAULT;
+ from_addr |= (from & (PAGE_SIZE - 1));
+
+ /* Get HPA for to address */
+ to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
+ if (!to_memslot)
+ return -EFAULT;
+ if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
+ << PAGE_SHIFT))
+ return -EINVAL;
+ to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT);
+ if (kvm_is_error_hva(to_addr))
+ return -EFAULT;
+ to_addr |= (to & (PAGE_SIZE - 1));
+
+ /* Perform copy */
+ r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr,
+ len);
+ if (r)
+ return -EFAULT;
+ mark_page_dirty(kvm, to >> PAGE_SHIFT);
+ return 0;
+}
+
+static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long dest, unsigned long src)
+{
+ u64 pg_sz = SZ_4K; /* 4K page size */
+ u64 pg_mask = SZ_4K - 1;
+ int ret;
+
+ /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
+ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
+ H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
+ return H_PARAMETER;
+
+ /* dest (and src if copy_page flag set) must be page aligned */
+ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
+ return H_PARAMETER;
+
+ /* zero and/or copy the page as determined by the flags */
+ if (flags & H_COPY_PAGE) {
+ ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
+ if (ret < 0)
+ return H_PARAMETER;
+ } else if (flags & H_ZERO_PAGE) {
+ ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
+ if (ret < 0)
+ return H_PARAMETER;
+ }
+
+ /* We can ignore the remaining flags */
+
+ return H_SUCCESS;
+}
+
static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
{
struct kvmppc_vcore *vcore = target->arch.vcore;
@@ -1004,6 +1078,11 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (nesting_enabled(vcpu->kvm))
ret = kvmhv_copy_tofrom_guest_nested(vcpu);
break;
+ case H_PAGE_INIT:
+ ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ break;
default:
return RESUME_HOST;
}
@@ -1048,6 +1127,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_IPOLL:
case H_XIRR_X:
#endif
+ case H_PAGE_INIT:
return 1;
}
@@ -2505,37 +2585,6 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
}
}
-static void kvmppc_radix_check_need_tlb_flush(struct kvm *kvm, int pcpu,
- struct kvm_nested_guest *nested)
-{
- cpumask_t *need_tlb_flush;
- int lpid;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return;
-
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- pcpu &= ~0x3UL;
-
- if (nested) {
- lpid = nested->shadow_lpid;
- need_tlb_flush = &nested->need_tlb_flush;
- } else {
- lpid = kvm->arch.lpid;
- need_tlb_flush = &kvm->arch.need_tlb_flush;
- }
-
- mtspr(SPRN_LPID, lpid);
- isync();
- smp_mb();
-
- if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
- radix__local_flush_tlb_lpid_guest(lpid);
- /* Clear the bit after the TLB flush */
- cpumask_clear_cpu(pcpu, need_tlb_flush);
- }
-}
-
static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
{
int cpu;
@@ -3229,19 +3278,11 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
for (sub = 0; sub < core_info.n_subcores; ++sub)
spin_unlock(&core_info.vc[sub]->lock);
- if (kvm_is_radix(vc->kvm)) {
- /*
- * Do we need to flush the process scoped TLB for the LPAR?
- *
- * On POWER9, individual threads can come in here, but the
- * TLB is shared between the 4 threads in a core, hence
- * invalidating on one thread invalidates for all.
- * Thus we make all 4 threads use the same bit here.
- *
- * Hash must be flushed in realmode in order to use tlbiel.
- */
- kvmppc_radix_check_need_tlb_flush(vc->kvm, pcpu, NULL);
- }
+ guest_enter_irqoff();
+
+ srcu_idx = srcu_read_lock(&vc->kvm->srcu);
+
+ this_cpu_disable_ftrace();
/*
* Interrupts will be enabled once we get into the guest,
@@ -3249,19 +3290,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*/
trace_hardirqs_on();
- guest_enter_irqoff();
-
- srcu_idx = srcu_read_lock(&vc->kvm->srcu);
-
- this_cpu_disable_ftrace();
-
trap = __kvmppc_vcore_entry();
+ trace_hardirqs_off();
+
this_cpu_enable_ftrace();
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
- trace_hardirqs_off();
set_irq_happened(trap);
spin_lock(&vc->lock);
@@ -3514,6 +3550,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
#ifdef CONFIG_ALTIVEC
load_vr_state(&vcpu->arch.vr);
#endif
+ mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
mtspr(SPRN_DSCR, vcpu->arch.dscr);
mtspr(SPRN_IAMR, vcpu->arch.iamr);
@@ -3605,6 +3642,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
#ifdef CONFIG_ALTIVEC
store_vr_state(&vcpu->arch.vr);
#endif
+ vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
if (cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
@@ -3970,7 +4008,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
unsigned long lpcr)
{
int trap, r, pcpu;
- int srcu_idx;
+ int srcu_idx, lpid;
struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
@@ -4046,8 +4084,12 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
vc->vcore_state = VCORE_RUNNING;
trace_kvmppc_run_core(vc, 0);
- if (cpu_has_feature(CPU_FTR_HVMODE))
- kvmppc_radix_check_need_tlb_flush(kvm, pcpu, nested);
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+ mtspr(SPRN_LPID, lpid);
+ isync();
+ kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
+ }
trace_hardirqs_on();
guest_enter_irqoff();
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index b0cf22477e87..6035d24f1d1d 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -805,3 +805,60 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
vcpu->arch.doorbell_request = 0;
}
}
+
+static void flush_guest_tlb(struct kvm *kvm)
+{
+ unsigned long rb, set;
+
+ rb = PPC_BIT(52); /* IS = 2 */
+ if (kvm_is_radix(kvm)) {
+ /* R=1 PRS=1 RIC=2 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (1), "i" (1), "i" (2),
+ "r" (0) : "memory");
+ for (set = 1; set < kvm->arch.tlb_sets; ++set) {
+ rb += PPC_BIT(51); /* increment set number */
+ /* R=1 PRS=1 RIC=0 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (1), "i" (1), "i" (0),
+ "r" (0) : "memory");
+ }
+ } else {
+ for (set = 0; set < kvm->arch.tlb_sets; ++set) {
+ /* R=0 PRS=0 RIC=0 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (0), "i" (0), "i" (0),
+ "r" (0) : "memory");
+ rb += PPC_BIT(51); /* increment set number */
+ }
+ }
+ asm volatile("ptesync": : :"memory");
+}
+
+void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ struct kvm_nested_guest *nested)
+{
+ cpumask_t *need_tlb_flush;
+
+ /*
+ * On POWER9, individual threads can come in here, but the
+ * TLB is shared between the 4 threads in a core, hence
+ * invalidating on one thread invalidates for all.
+ * Thus we make all 4 threads use the same bit.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ pcpu = cpu_first_thread_sibling(pcpu);
+
+ if (nested)
+ need_tlb_flush = &nested->need_tlb_flush;
+ else
+ need_tlb_flush = &kvm->arch.need_tlb_flush;
+
+ if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
+ flush_guest_tlb(kvm);
+
+ /* Clear the bit after the TLB flush */
+ cpumask_clear_cpu(pcpu, need_tlb_flush);
+ }
+}
+EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 3b3791ed74a6..8431ad1e8391 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -13,6 +13,7 @@
#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/log2.h>
+#include <linux/sizes.h>
#include <asm/trace.h>
#include <asm/kvm_ppc.h>
@@ -867,6 +868,149 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
return ret;
}
+static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
+ int writing, unsigned long *hpa,
+ struct kvm_memory_slot **memslot_p)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_memory_slot *memslot;
+ unsigned long gfn, hva, pa, psize = PAGE_SHIFT;
+ unsigned int shift;
+ pte_t *ptep, pte;
+
+ /* Find the memslot for this address */
+ gfn = gpa >> PAGE_SHIFT;
+ memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
+ return H_PARAMETER;
+
+ /* Translate to host virtual address */
+ hva = __gfn_to_hva_memslot(memslot, gfn);
+
+ /* Try to find the host pte for that virtual address */
+ ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ if (!ptep)
+ return H_TOO_HARD;
+ pte = kvmppc_read_update_linux_pte(ptep, writing);
+ if (!pte_present(pte))
+ return H_TOO_HARD;
+
+ /* Convert to a physical address */
+ if (shift)
+ psize = 1UL << shift;
+ pa = pte_pfn(pte) << PAGE_SHIFT;
+ pa |= hva & (psize - 1);
+ pa |= gpa & ~PAGE_MASK;
+
+ if (hpa)
+ *hpa = pa;
+ if (memslot_p)
+ *memslot_p = memslot;
+
+ return H_SUCCESS;
+}
+
+static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
+ unsigned long dest)
+{
+ struct kvm_memory_slot *memslot;
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long pa, mmu_seq;
+ long ret = H_SUCCESS;
+ int i;
+
+ /* Used later to detect if we might have been invalidated */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
+ if (ret != H_SUCCESS)
+ return ret;
+
+ /* Check if we've been invalidated */
+ raw_spin_lock(&kvm->mmu_lock.rlock);
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
+ ret = H_TOO_HARD;
+ goto out_unlock;
+ }
+
+ /* Zero the page */
+ for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
+ dcbz((void *)pa);
+ kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
+
+out_unlock:
+ raw_spin_unlock(&kvm->mmu_lock.rlock);
+ return ret;
+}
+
+static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
+ unsigned long dest, unsigned long src)
+{
+ unsigned long dest_pa, src_pa, mmu_seq;
+ struct kvm_memory_slot *dest_memslot;
+ struct kvm *kvm = vcpu->kvm;
+ long ret = H_SUCCESS;
+
+ /* Used later to detect if we might have been invalidated */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot);
+ if (ret != H_SUCCESS)
+ return ret;
+ ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
+ if (ret != H_SUCCESS)
+ return ret;
+
+ /* Check if we've been invalidated */
+ raw_spin_lock(&kvm->mmu_lock.rlock);
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
+ ret = H_TOO_HARD;
+ goto out_unlock;
+ }
+
+ /* Copy the page */
+ memcpy((void *)dest_pa, (void *)src_pa, SZ_4K);
+
+ kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
+
+out_unlock:
+ raw_spin_unlock(&kvm->mmu_lock.rlock);
+ return ret;
+}
+
+long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long dest, unsigned long src)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 pg_mask = SZ_4K - 1; /* 4K page size */
+ long ret = H_SUCCESS;
+
+ /* Don't handle radix mode here, go up to the virtual mode handler */
+ if (kvm_is_radix(kvm))
+ return H_TOO_HARD;
+
+ /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
+ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
+ H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
+ return H_PARAMETER;
+
+ /* dest (and src if copy_page flag set) must be page aligned */
+ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
+ return H_PARAMETER;
+
+ /* zero and/or copy the page as determined by the flags */
+ if (flags & H_COPY_PAGE)
+ ret = kvmppc_do_h_page_init_copy(vcpu, dest, src);
+ else if (flags & H_ZERO_PAGE)
+ ret = kvmppc_do_h_page_init_zero(vcpu, dest);
+
+ /* We can ignore the other flags */
+
+ return ret;
+}
+
void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index)
{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index dd014308f065..f9b2620fbecd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -589,11 +589,8 @@ kvmppc_hv_entry:
1:
#endif
- /* Use cr7 as an indication of radix mode */
ld r5, HSTATE_KVM_VCORE(r13)
ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
- lbz r0, KVM_RADIX(r9)
- cmpwi cr7, r0, 0
/*
* POWER7/POWER8 host -> guest partition switch code.
@@ -616,9 +613,6 @@ kvmppc_hv_entry:
cmpwi r6,0
bne 10f
- /* Radix has already switched LPID and flushed core TLB */
- bne cr7, 22f
-
lwz r7,KVM_LPID(r9)
BEGIN_FTR_SECTION
ld r6,KVM_SDR1(r9)
@@ -630,41 +624,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
- /* See if we need to flush the TLB. Hash has to be done in RM */
- lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
-BEGIN_FTR_SECTION
- /*
- * On POWER9, individual threads can come in here, but the
- * TLB is shared between the 4 threads in a core, hence
- * invalidating on one thread invalidates for all.
- * Thus we make all 4 threads use the same bit here.
- */
- clrrdi r6,r6,2
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- clrldi r7,r6,64-6 /* extract bit number (6 bits) */
- srdi r6,r6,6 /* doubleword number */
- sldi r6,r6,3 /* address offset */
- add r6,r6,r9
- addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
- li r8,1
- sld r8,r8,r7
- ld r7,0(r6)
- and. r7,r7,r8
- beq 22f
- /* Flush the TLB of any entries for this LPID */
- lwz r0,KVM_TLB_SETS(r9)
- mtctr r0
- li r7,0x800 /* IS field = 0b10 */
- ptesync
- li r0,0 /* RS for P9 version of tlbiel */
-28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
- addi r7,r7,0x1000
- bdnz 28b
- ptesync
-23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
- andc r7,r7,r8
- stdcx. r7,0,r6
- bne 23b
+ /* See if we need to flush the TLB. */
+ mr r3, r9 /* kvm pointer */
+ lhz r4, PACAPACAINDEX(r13) /* physical cpu number */
+ li r5, 0 /* nested vcpu pointer */
+ bl kvmppc_check_need_tlb_flush
+ nop
+ ld r5, HSTATE_KVM_VCORE(r13)
/* Add timebase offset onto timebase */
22: ld r8,VCORE_TB_OFFSET(r5)
@@ -980,17 +946,27 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
#ifdef CONFIG_KVM_XICS
/* We are entering the guest on that thread, push VCPU to XIVE */
- ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr0, r10, 0
- beq no_xive
ld r11, VCPU_XIVE_SAVED_STATE(r4)
li r9, TM_QW1_OS
+ lwz r8, VCPU_XIVE_CAM_WORD(r4)
+ li r7, TM_QW1_OS + TM_WORD2
+ mfmsr r0
+ andi. r0, r0, MSR_DR /* in real mode? */
+ beq 2f
+ ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
+ cmpldi cr1, r10, 0
+ beq cr1, no_xive
+ eieio
+ stdx r11,r9,r10
+ stwx r8,r7,r10
+ b 3f
+2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
+ cmpldi cr1, r10, 0
+ beq cr1, no_xive
eieio
stdcix r11,r9,r10
- lwz r11, VCPU_XIVE_CAM_WORD(r4)
- li r9, TM_QW1_OS + TM_WORD2
- stwcix r11,r9,r10
- li r9, 1
+ stwcix r8,r7,r10
+3: li r9, 1
stb r9, VCPU_XIVE_PUSHED(r4)
eieio
@@ -1009,12 +985,16 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
* on, we mask it.
*/
lbz r0, VCPU_XIVE_ESC_ON(r4)
- cmpwi r0,0
- beq 1f
- ld r10, VCPU_XIVE_ESC_RADDR(r4)
+ cmpwi cr1, r0,0
+ beq cr1, 1f
li r9, XIVE_ESB_SET_PQ_01
+ beq 4f /* in real mode? */
+ ld r10, VCPU_XIVE_ESC_VADDR(r4)
+ ldx r0, r10, r9
+ b 5f
+4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
ldcix r0, r10, r9
- sync
+5: sync
/* We have a possible subtle race here: The escalation interrupt might
* have fired and be on its way to the host queue while we mask it,
@@ -2292,7 +2272,7 @@ hcall_real_table:
#endif
.long 0 /* 0x24 - H_SET_SPRG0 */
.long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
- .long 0 /* 0x2c */
+ .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
.long 0 /* 0x30 */
.long 0 /* 0x34 */
.long 0 /* 0x38 */
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f78d002f0fe0..4953957333b7 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -166,7 +166,8 @@ static irqreturn_t xive_esc_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
+int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
+ bool single_escalation)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
struct xive_q *q = &xc->queues[prio];
@@ -185,7 +186,7 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
return -EIO;
}
- if (xc->xive->single_escalation)
+ if (single_escalation)
name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
vcpu->kvm->arch.lpid, xc->server_num);
else
@@ -217,7 +218,7 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
* interrupt, thus leaving it effectively masked after
* it fires once.
*/
- if (xc->xive->single_escalation) {
+ if (single_escalation) {
struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
@@ -291,7 +292,8 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio)
continue;
rc = xive_provision_queue(vcpu, prio);
if (rc == 0 && !xive->single_escalation)
- xive_attach_escalation(vcpu, prio);
+ kvmppc_xive_attach_escalation(vcpu, prio,
+ xive->single_escalation);
if (rc)
return rc;
}
@@ -342,7 +344,7 @@ static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
}
-static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
+int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
{
struct kvm_vcpu *vcpu;
int i, rc;
@@ -380,11 +382,6 @@ static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
return -EBUSY;
}
-static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
-{
- return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
-}
-
static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
struct kvmppc_xive_src_block *sb,
struct kvmppc_xive_irq_state *state)
@@ -430,8 +427,8 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
*/
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num,
- xive_vp(xive, state->act_server),
- MASKED, state->number);
+ kvmppc_xive_vp(xive, state->act_server),
+ MASKED, state->number);
/* set old_p so we can track if an H_EOI was done */
state->old_p = true;
state->old_q = false;
@@ -486,8 +483,8 @@ static void xive_finish_unmask(struct kvmppc_xive *xive,
*/
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num,
- xive_vp(xive, state->act_server),
- state->act_priority, state->number);
+ kvmppc_xive_vp(xive, state->act_server),
+ state->act_priority, state->number);
/* If an EOI is needed, do it here */
if (!state->old_p)
xive_vm_source_eoi(hw_num, xd);
@@ -535,7 +532,7 @@ static int xive_target_interrupt(struct kvm *kvm,
* priority. The count for that new target will have
* already been incremented.
*/
- rc = xive_select_target(kvm, &server, prio);
+ rc = kvmppc_xive_select_target(kvm, &server, prio);
/*
* We failed to find a target ? Not much we can do
@@ -563,7 +560,7 @@ static int xive_target_interrupt(struct kvm *kvm,
kvmppc_xive_select_irq(state, &hw_num, NULL);
return xive_native_configure_irq(hw_num,
- xive_vp(xive, server),
+ kvmppc_xive_vp(xive, server),
prio, state->number);
}
@@ -849,7 +846,8 @@ int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
/*
* We can't update the state of a "pushed" VCPU, but that
- * shouldn't happen.
+ * shouldn't happen because the vcpu->mutex makes running a
+ * vcpu mutually exclusive with doing one_reg get/set on it.
*/
if (WARN_ON(vcpu->arch.xive_pushed))
return -EIO;
@@ -940,6 +938,13 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
/* Turn the IPI hard off */
xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+ /*
+ * Reset ESB guest mapping. Needed when ESB pages are exposed
+ * to the guest in XIVE native mode
+ */
+ if (xive->ops && xive->ops->reset_mapped)
+ xive->ops->reset_mapped(kvm, guest_irq);
+
/* Grab info about irq */
state->pt_number = hw_irq;
state->pt_data = irq_data_get_irq_handler_data(host_data);
@@ -951,7 +956,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
* which is fine for a never started interrupt.
*/
xive_native_configure_irq(hw_irq,
- xive_vp(xive, state->act_server),
+ kvmppc_xive_vp(xive, state->act_server),
state->act_priority, state->number);
/*
@@ -1025,9 +1030,17 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
state->pt_number = 0;
state->pt_data = NULL;
+ /*
+ * Reset ESB guest mapping. Needed when ESB pages are exposed
+ * to the guest in XIVE native mode
+ */
+ if (xive->ops && xive->ops->reset_mapped) {
+ xive->ops->reset_mapped(kvm, guest_irq);
+ }
+
/* Reconfigure the IPI */
xive_native_configure_irq(state->ipi_number,
- xive_vp(xive, state->act_server),
+ kvmppc_xive_vp(xive, state->act_server),
state->act_priority, state->number);
/*
@@ -1049,7 +1062,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
}
EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
-static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
+void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
struct kvm *kvm = vcpu->kvm;
@@ -1083,14 +1096,35 @@ static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
arch_spin_unlock(&sb->lock);
}
}
+
+ /* Disable vcpu's escalation interrupt */
+ if (vcpu->arch.xive_esc_on) {
+ __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
+ XIVE_ESB_SET_PQ_01));
+ vcpu->arch.xive_esc_on = false;
+ }
+
+ /*
+ * Clear pointers to escalation interrupt ESB.
+ * This is safe because the vcpu->mutex is held, preventing
+ * any other CPU from concurrently executing a KVM_RUN ioctl.
+ */
+ vcpu->arch.xive_esc_vaddr = 0;
+ vcpu->arch.xive_esc_raddr = 0;
}
void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- struct kvmppc_xive *xive = xc->xive;
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
int i;
+ if (!kvmppc_xics_enabled(vcpu))
+ return;
+
+ if (!xc)
+ return;
+
pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
/* Ensure no interrupt is still routed to that VP */
@@ -1129,6 +1163,10 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
}
/* Free the VP */
kfree(xc);
+
+ /* Cleanup the vcpu */
+ vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
+ vcpu->arch.xive_vcpu = NULL;
}
int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
@@ -1146,7 +1184,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
}
if (xive->kvm != vcpu->kvm)
return -EPERM;
- if (vcpu->arch.irq_type)
+ if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY;
if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
pr_devel("Duplicate !\n");
@@ -1166,7 +1204,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
xc->xive = xive;
xc->vcpu = vcpu;
xc->server_num = cpu;
- xc->vp_id = xive_vp(xive, cpu);
+ xc->vp_id = kvmppc_xive_vp(xive, cpu);
xc->mfrr = 0xff;
xc->valid = true;
@@ -1219,7 +1257,8 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
if (xive->qmap & (1 << i)) {
r = xive_provision_queue(vcpu, i);
if (r == 0 && !xive->single_escalation)
- xive_attach_escalation(vcpu, i);
+ kvmppc_xive_attach_escalation(
+ vcpu, i, xive->single_escalation);
if (r)
goto bail;
} else {
@@ -1234,7 +1273,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
}
/* If not done above, attach priority 0 escalation */
- r = xive_attach_escalation(vcpu, 0);
+ r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
if (r)
goto bail;
@@ -1485,8 +1524,8 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
return 0;
}
-static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
- int irq)
+struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
+ struct kvmppc_xive *xive, int irq)
{
struct kvm *kvm = xive->kvm;
struct kvmppc_xive_src_block *sb;
@@ -1509,6 +1548,7 @@ static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *x
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
+ sb->irq_state[i].eisn = 0;
sb->irq_state[i].guest_priority = MASKED;
sb->irq_state[i].saved_priority = MASKED;
sb->irq_state[i].act_priority = MASKED;
@@ -1565,7 +1605,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
sb = kvmppc_xive_find_source(xive, irq, &idx);
if (!sb) {
pr_devel("No source, creating source block...\n");
- sb = xive_create_src_block(xive, irq);
+ sb = kvmppc_xive_create_src_block(xive, irq);
if (!sb) {
pr_devel("Failed to create block...\n");
return -ENOMEM;
@@ -1789,7 +1829,7 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
xive_cleanup_irq_data(xd);
}
-static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
+void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
{
int i;
@@ -1810,16 +1850,55 @@ static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
}
}
-static void kvmppc_xive_free(struct kvm_device *dev)
+/*
+ * Called when device fd is closed. kvm->lock is held.
+ */
+static void kvmppc_xive_release(struct kvm_device *dev)
{
struct kvmppc_xive *xive = dev->private;
struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
int i;
+ int was_ready;
+
+ pr_devel("Releasing xive device\n");
debugfs_remove(xive->dentry);
- if (kvm)
- kvm->arch.xive = NULL;
+ /*
+ * Clearing mmu_ready temporarily while holding kvm->lock
+ * is a way of ensuring that no vcpus can enter the guest
+ * until we drop kvm->lock. Doing kick_all_cpus_sync()
+ * ensures that any vcpu executing inside the guest has
+ * exited the guest. Once kick_all_cpus_sync() has finished,
+ * we know that no vcpu can be executing the XIVE push or
+ * pull code, or executing a XICS hcall.
+ *
+ * Since this is the device release function, we know that
+ * userspace does not have any open fd referring to the
+ * device. Therefore there can not be any of the device
+ * attribute set/get functions being executed concurrently,
+ * and similarly, the connect_vcpu and set/clr_mapped
+ * functions also cannot be being executed.
+ */
+ was_ready = kvm->arch.mmu_ready;
+ kvm->arch.mmu_ready = 0;
+ kick_all_cpus_sync();
+
+ /*
+ * We should clean up the vCPU interrupt presenters first.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ /*
+ * Take vcpu->mutex to ensure that no one_reg get/set ioctl
+ * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
+ */
+ mutex_lock(&vcpu->mutex);
+ kvmppc_xive_cleanup_vcpu(vcpu);
+ mutex_unlock(&vcpu->mutex);
+ }
+
+ kvm->arch.xive = NULL;
/* Mask and free interrupts */
for (i = 0; i <= xive->max_sbid; i++) {
@@ -1832,11 +1911,47 @@ static void kvmppc_xive_free(struct kvm_device *dev)
if (xive->vp_base != XIVE_INVALID_VP)
xive_native_free_vp_block(xive->vp_base);
+ kvm->arch.mmu_ready = was_ready;
+
+ /*
+ * A reference of the kvmppc_xive pointer is now kept under
+ * the xive_devices struct of the machine for reuse. It is
+ * freed when the VM is destroyed for now until we fix all the
+ * execution paths.
+ */
- kfree(xive);
kfree(dev);
}
+/*
+ * When the guest chooses the interrupt mode (XICS legacy or XIVE
+ * native), the VM will switch of KVM device. The previous device will
+ * be "released" before the new one is created.
+ *
+ * Until we are sure all execution paths are well protected, provide a
+ * fail safe (transitional) method for device destruction, in which
+ * the XIVE device pointer is recycled and not directly freed.
+ */
+struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
+{
+ struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
+ &kvm->arch.xive_devices.native :
+ &kvm->arch.xive_devices.xics_on_xive;
+ struct kvmppc_xive *xive = *kvm_xive_device;
+
+ if (!xive) {
+ xive = kzalloc(sizeof(*xive), GFP_KERNEL);
+ *kvm_xive_device = xive;
+ } else {
+ memset(xive, 0, sizeof(*xive));
+ }
+
+ return xive;
+}
+
+/*
+ * Create a XICS device with XIVE backend. kvm->lock is held.
+ */
static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
{
struct kvmppc_xive *xive;
@@ -1845,7 +1960,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
pr_devel("Creating xive for partition\n");
- xive = kzalloc(sizeof(*xive), GFP_KERNEL);
+ xive = kvmppc_xive_get_device(kvm, type);
if (!xive)
return -ENOMEM;
@@ -1883,6 +1998,43 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
return 0;
}
+int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ unsigned int i;
+
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ struct xive_q *q = &xc->queues[i];
+ u32 i0, i1, idx;
+
+ if (!q->qpage && !xc->esc_virq[i])
+ continue;
+
+ seq_printf(m, " [q%d]: ", i);
+
+ if (q->qpage) {
+ idx = q->idx;
+ i0 = be32_to_cpup(q->qpage + idx);
+ idx = (idx + 1) & q->msk;
+ i1 = be32_to_cpup(q->qpage + idx);
+ seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
+ i0, i1);
+ }
+ if (xc->esc_virq[i]) {
+ struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
+ struct xive_irq_data *xd =
+ irq_data_get_irq_handler_data(d);
+ u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
+
+ seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
+ (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
+ (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
+ xc->esc_virq[i], pq, xd->eoi_page);
+ seq_puts(m, "\n");
+ }
+ }
+ return 0;
+}
static int xive_debug_show(struct seq_file *m, void *private)
{
@@ -1908,7 +2060,6 @@ static int xive_debug_show(struct seq_file *m, void *private)
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- unsigned int i;
if (!xc)
continue;
@@ -1918,33 +2069,8 @@ static int xive_debug_show(struct seq_file *m, void *private)
xc->server_num, xc->cppr, xc->hw_cppr,
xc->mfrr, xc->pending,
xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
- for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
- struct xive_q *q = &xc->queues[i];
- u32 i0, i1, idx;
-
- if (!q->qpage && !xc->esc_virq[i])
- continue;
- seq_printf(m, " [q%d]: ", i);
-
- if (q->qpage) {
- idx = q->idx;
- i0 = be32_to_cpup(q->qpage + idx);
- idx = (idx + 1) & q->msk;
- i1 = be32_to_cpup(q->qpage + idx);
- seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
- }
- if (xc->esc_virq[i]) {
- struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
- struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
- u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
- seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
- (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
- (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
- xc->esc_virq[i], pq, xd->eoi_page);
- seq_printf(m, "\n");
- }
- }
+ kvmppc_xive_debug_show_queues(m, vcpu);
t_rm_h_xirr += xc->stat_rm_h_xirr;
t_rm_h_ipoll += xc->stat_rm_h_ipoll;
@@ -1999,7 +2125,7 @@ struct kvm_device_ops kvm_xive_ops = {
.name = "kvm-xive",
.create = kvmppc_xive_create,
.init = kvmppc_xive_init,
- .destroy = kvmppc_xive_free,
+ .release = kvmppc_xive_release,
.set_attr = xive_set_attr,
.get_attr = xive_get_attr,
.has_attr = xive_has_attr,
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index a08ae6fd4c51..426146332984 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -13,6 +13,13 @@
#include "book3s_xics.h"
/*
+ * The XIVE Interrupt source numbers are within the range 0 to
+ * KVMPPC_XICS_NR_IRQS.
+ */
+#define KVMPPC_XIVE_FIRST_IRQ 0
+#define KVMPPC_XIVE_NR_IRQS KVMPPC_XICS_NR_IRQS
+
+/*
* State for one guest irq source.
*
* For each guest source we allocate a HW interrupt in the XIVE
@@ -54,6 +61,9 @@ struct kvmppc_xive_irq_state {
bool saved_p;
bool saved_q;
u8 saved_scan_prio;
+
+ /* Xive native */
+ u32 eisn; /* Guest Effective IRQ number */
};
/* Select the "right" interrupt (IPI vs. passthrough) */
@@ -84,6 +94,11 @@ struct kvmppc_xive_src_block {
struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
};
+struct kvmppc_xive;
+
+struct kvmppc_xive_ops {
+ int (*reset_mapped)(struct kvm *kvm, unsigned long guest_irq);
+};
struct kvmppc_xive {
struct kvm *kvm;
@@ -122,6 +137,10 @@ struct kvmppc_xive {
/* Flags */
u8 single_escalation;
+
+ struct kvmppc_xive_ops *ops;
+ struct address_space *mapping;
+ struct mutex mapping_lock;
};
#define KVMPPC_XIVE_Q_COUNT 8
@@ -198,6 +217,11 @@ static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmpp
return xive->src_blocks[bid];
}
+static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
+{
+ return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
+}
+
/*
* Mapping between guest priorities and host priorities
* is as follow.
@@ -248,5 +272,18 @@ extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
+/*
+ * Common Xive routines for XICS-over-XIVE and XIVE native
+ */
+void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu);
+int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu);
+struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
+ struct kvmppc_xive *xive, int irq);
+void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb);
+int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio);
+int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
+ bool single_escalation);
+struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
+
#endif /* CONFIG_KVM_XICS */
#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
new file mode 100644
index 000000000000..6a8e698c4b6e
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -0,0 +1,1249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2019, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) "xive-kvm: " fmt
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+#include <asm/debug.h>
+#include <asm/debugfs.h>
+#include <asm/opal.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "book3s_xive.h"
+
+static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
+{
+ u64 val;
+
+ if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
+ offset |= offset << 4;
+
+ val = in_be64(xd->eoi_mmio + offset);
+ return (u8)val;
+}
+
+static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_q *q = &xc->queues[prio];
+
+ xive_native_disable_queue(xc->vp_id, q, prio);
+ if (q->qpage) {
+ put_page(virt_to_page(q->qpage));
+ q->qpage = NULL;
+ }
+}
+
+void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ int i;
+
+ if (!kvmppc_xive_enabled(vcpu))
+ return;
+
+ if (!xc)
+ return;
+
+ pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num);
+
+ /* Ensure no interrupt is still routed to that VP */
+ xc->valid = false;
+ kvmppc_xive_disable_vcpu_interrupts(vcpu);
+
+ /* Disable the VP */
+ xive_native_disable_vp(xc->vp_id);
+
+ /* Free the queues & associated interrupts */
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ /* Free the escalation irq */
+ if (xc->esc_virq[i]) {
+ free_irq(xc->esc_virq[i], vcpu);
+ irq_dispose_mapping(xc->esc_virq[i]);
+ kfree(xc->esc_virq_names[i]);
+ xc->esc_virq[i] = 0;
+ }
+
+ /* Free the queue */
+ kvmppc_xive_native_cleanup_queue(vcpu, i);
+ }
+
+ /* Free the VP */
+ kfree(xc);
+
+ /* Cleanup the vcpu */
+ vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
+ vcpu->arch.xive_vcpu = NULL;
+}
+
+int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 server_num)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvmppc_xive_vcpu *xc = NULL;
+ int rc;
+
+ pr_devel("native_connect_vcpu(server=%d)\n", server_num);
+
+ if (dev->ops != &kvm_xive_native_ops) {
+ pr_devel("Wrong ops !\n");
+ return -EPERM;
+ }
+ if (xive->kvm != vcpu->kvm)
+ return -EPERM;
+ if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
+ return -EBUSY;
+ if (server_num >= KVM_MAX_VCPUS) {
+ pr_devel("Out of bounds !\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
+ pr_devel("Duplicate !\n");
+ rc = -EEXIST;
+ goto bail;
+ }
+
+ xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+ if (!xc) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+
+ vcpu->arch.xive_vcpu = xc;
+ xc->xive = xive;
+ xc->vcpu = vcpu;
+ xc->server_num = server_num;
+
+ xc->vp_id = kvmppc_xive_vp(xive, server_num);
+ xc->valid = true;
+ vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
+
+ rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
+ if (rc) {
+ pr_err("Failed to get VP info from OPAL: %d\n", rc);
+ goto bail;
+ }
+
+ /*
+ * Enable the VP first as the single escalation mode will
+ * affect escalation interrupts numbering
+ */
+ rc = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
+ if (rc) {
+ pr_err("Failed to enable VP in OPAL: %d\n", rc);
+ goto bail;
+ }
+
+ /* Configure VCPU fields for use by assembly push/pull */
+ vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
+ vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
+
+ /* TODO: reset all queues to a clean state ? */
+bail:
+ mutex_unlock(&vcpu->kvm->lock);
+ if (rc)
+ kvmppc_xive_native_cleanup_vcpu(vcpu);
+
+ return rc;
+}
+
+/*
+ * Device passthrough support
+ */
+static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+
+ if (irq >= KVMPPC_XIVE_NR_IRQS)
+ return -EINVAL;
+
+ /*
+ * Clear the ESB pages of the IRQ number being mapped (or
+ * unmapped) into the guest and let the the VM fault handler
+ * repopulate with the appropriate ESB pages (device or IC)
+ */
+ pr_debug("clearing esb pages for girq 0x%lx\n", irq);
+ mutex_lock(&xive->mapping_lock);
+ if (xive->mapping)
+ unmap_mapping_range(xive->mapping,
+ irq * (2ull << PAGE_SHIFT),
+ 2ull << PAGE_SHIFT, 1);
+ mutex_unlock(&xive->mapping_lock);
+ return 0;
+}
+
+static struct kvmppc_xive_ops kvmppc_xive_native_ops = {
+ .reset_mapped = kvmppc_xive_native_reset_mapped,
+};
+
+static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct kvm_device *dev = vma->vm_file->private_data;
+ struct kvmppc_xive *xive = dev->private;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct xive_irq_data *xd;
+ u32 hw_num;
+ u16 src;
+ u64 page;
+ unsigned long irq;
+ u64 page_offset;
+
+ /*
+ * Linux/KVM uses a two pages ESB setting, one for trigger and
+ * one for EOI
+ */
+ page_offset = vmf->pgoff - vma->vm_pgoff;
+ irq = page_offset / 2;
+
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb) {
+ pr_devel("%s: source %lx not found !\n", __func__, irq);
+ return VM_FAULT_SIGBUS;
+ }
+
+ state = &sb->irq_state[src];
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ arch_spin_lock(&sb->lock);
+
+ /*
+ * first/even page is for trigger
+ * second/odd page is for EOI and management.
+ */
+ page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
+ arch_spin_unlock(&sb->lock);
+
+ if (WARN_ON(!page)) {
+ pr_err("%s: accessing invalid ESB page for source %lx !\n",
+ __func__, irq);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct xive_native_esb_vmops = {
+ .fault = xive_native_esb_fault,
+};
+
+static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+
+ switch (vmf->pgoff - vma->vm_pgoff) {
+ case 0: /* HW - forbid access */
+ case 1: /* HV - forbid access */
+ return VM_FAULT_SIGBUS;
+ case 2: /* OS */
+ vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
+ return VM_FAULT_NOPAGE;
+ case 3: /* USER - TODO */
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static const struct vm_operations_struct xive_native_tima_vmops = {
+ .fault = xive_native_tima_fault,
+};
+
+static int kvmppc_xive_native_mmap(struct kvm_device *dev,
+ struct vm_area_struct *vma)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ /* We only allow mappings at fixed offset for now */
+ if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
+ if (vma_pages(vma) > 4)
+ return -EINVAL;
+ vma->vm_ops = &xive_native_tima_vmops;
+ } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
+ if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
+ return -EINVAL;
+ vma->vm_ops = &xive_native_esb_vmops;
+ } else {
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
+
+ /*
+ * Grab the KVM device file address_space to be able to clear
+ * the ESB pages mapping when a device is passed-through into
+ * the guest.
+ */
+ xive->mapping = vma->vm_file->f_mapping;
+ return 0;
+}
+
+static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
+ u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u64 val;
+ u16 idx;
+ int rc;
+
+ pr_devel("%s irq=0x%lx\n", __func__, irq);
+
+ if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS)
+ return -E2BIG;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb) {
+ pr_debug("No source, creating source block...\n");
+ sb = kvmppc_xive_create_src_block(xive, irq);
+ if (!sb) {
+ pr_err("Failed to create block...\n");
+ return -ENOMEM;
+ }
+ }
+ state = &sb->irq_state[idx];
+
+ if (get_user(val, ubufp)) {
+ pr_err("fault getting user info !\n");
+ return -EFAULT;
+ }
+
+ arch_spin_lock(&sb->lock);
+
+ /*
+ * If the source doesn't already have an IPI, allocate
+ * one and get the corresponding data
+ */
+ if (!state->ipi_number) {
+ state->ipi_number = xive_native_alloc_irq();
+ if (state->ipi_number == 0) {
+ pr_err("Failed to allocate IRQ !\n");
+ rc = -ENXIO;
+ goto unlock;
+ }
+ xive_native_populate_irq_data(state->ipi_number,
+ &state->ipi_data);
+ pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__,
+ state->ipi_number, irq);
+ }
+
+ /* Restore LSI state */
+ if (val & KVM_XIVE_LEVEL_SENSITIVE) {
+ state->lsi = true;
+ if (val & KVM_XIVE_LEVEL_ASSERTED)
+ state->asserted = true;
+ pr_devel(" LSI ! Asserted=%d\n", state->asserted);
+ }
+
+ /* Mask IRQ to start with */
+ state->act_server = 0;
+ state->act_priority = MASKED;
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
+
+ /* Increment the number of valid sources and mark this one valid */
+ if (!state->valid)
+ xive->src_count++;
+ state->valid = true;
+
+ rc = 0;
+
+unlock:
+ arch_spin_unlock(&sb->lock);
+
+ return rc;
+}
+
+static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state,
+ u32 server, u8 priority, bool masked,
+ u32 eisn)
+{
+ struct kvm *kvm = xive->kvm;
+ u32 hw_num;
+ int rc = 0;
+
+ arch_spin_lock(&sb->lock);
+
+ if (state->act_server == server && state->act_priority == priority &&
+ state->eisn == eisn)
+ goto unlock;
+
+ pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
+ priority, server, masked, state->act_server,
+ state->act_priority);
+
+ kvmppc_xive_select_irq(state, &hw_num, NULL);
+
+ if (priority != MASKED && !masked) {
+ rc = kvmppc_xive_select_target(kvm, &server, priority);
+ if (rc)
+ goto unlock;
+
+ state->act_priority = priority;
+ state->act_server = server;
+ state->eisn = eisn;
+
+ rc = xive_native_configure_irq(hw_num,
+ kvmppc_xive_vp(xive, server),
+ priority, eisn);
+ } else {
+ state->act_priority = MASKED;
+ state->act_server = 0;
+ state->eisn = 0;
+
+ rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
+ }
+
+unlock:
+ arch_spin_unlock(&sb->lock);
+ return rc;
+}
+
+static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive,
+ long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u16 src;
+ u64 kvm_cfg;
+ u32 server;
+ u8 priority;
+ bool masked;
+ u32 eisn;
+
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb)
+ return -ENOENT;
+
+ state = &sb->irq_state[src];
+
+ if (!state->valid)
+ return -EINVAL;
+
+ if (get_user(kvm_cfg, ubufp))
+ return -EFAULT;
+
+ pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg);
+
+ priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >>
+ KVM_XIVE_SOURCE_PRIORITY_SHIFT;
+ server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >>
+ KVM_XIVE_SOURCE_SERVER_SHIFT;
+ masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >>
+ KVM_XIVE_SOURCE_MASKED_SHIFT;
+ eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >>
+ KVM_XIVE_SOURCE_EISN_SHIFT;
+
+ if (priority != xive_prio_from_guest(priority)) {
+ pr_err("invalid priority for queue %d for VCPU %d\n",
+ priority, server);
+ return -EINVAL;
+ }
+
+ return kvmppc_xive_native_update_source_config(xive, sb, state, server,
+ priority, masked, eisn);
+}
+
+static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
+ long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct xive_irq_data *xd;
+ u32 hw_num;
+ u16 src;
+ int rc = 0;
+
+ pr_devel("%s irq=0x%lx", __func__, irq);
+
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb)
+ return -ENOENT;
+
+ state = &sb->irq_state[src];
+
+ rc = -EINVAL;
+
+ arch_spin_lock(&sb->lock);
+
+ if (state->valid) {
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+ xive_native_sync_source(hw_num);
+ rc = 0;
+ }
+
+ arch_spin_unlock(&sb->lock);
+ return rc;
+}
+
+static int xive_native_validate_queue_size(u32 qshift)
+{
+ /*
+ * We only support 64K pages for the moment. This is also
+ * advertised in the DT property "ibm,xive-eq-sizes"
+ */
+ switch (qshift) {
+ case 0: /* EQ reset */
+ case 16:
+ return 0;
+ case 12:
+ case 21:
+ case 24:
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
+ long eq_idx, u64 addr)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_xive_vcpu *xc;
+ void __user *ubufp = (void __user *) addr;
+ u32 server;
+ u8 priority;
+ struct kvm_ppc_xive_eq kvm_eq;
+ int rc;
+ __be32 *qaddr = 0;
+ struct page *page;
+ struct xive_q *q;
+ gfn_t gfn;
+ unsigned long page_size;
+
+ /*
+ * Demangle priority/server tuple from the EQ identifier
+ */
+ priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
+ KVM_XIVE_EQ_PRIORITY_SHIFT;
+ server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
+ KVM_XIVE_EQ_SERVER_SHIFT;
+
+ if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
+ return -EFAULT;
+
+ vcpu = kvmppc_xive_find_server(kvm, server);
+ if (!vcpu) {
+ pr_err("Can't find server %d\n", server);
+ return -ENOENT;
+ }
+ xc = vcpu->arch.xive_vcpu;
+
+ if (priority != xive_prio_from_guest(priority)) {
+ pr_err("Trying to restore invalid queue %d for VCPU %d\n",
+ priority, server);
+ return -EINVAL;
+ }
+ q = &xc->queues[priority];
+
+ pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
+ __func__, server, priority, kvm_eq.flags,
+ kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
+
+ /*
+ * sPAPR specifies a "Unconditional Notify (n) flag" for the
+ * H_INT_SET_QUEUE_CONFIG hcall which forces notification
+ * without using the coalescing mechanisms provided by the
+ * XIVE END ESBs. This is required on KVM as notification
+ * using the END ESBs is not supported.
+ */
+ if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
+ pr_err("invalid flags %d\n", kvm_eq.flags);
+ return -EINVAL;
+ }
+
+ rc = xive_native_validate_queue_size(kvm_eq.qshift);
+ if (rc) {
+ pr_err("invalid queue size %d\n", kvm_eq.qshift);
+ return rc;
+ }
+
+ /* reset queue and disable queueing */
+ if (!kvm_eq.qshift) {
+ q->guest_qaddr = 0;
+ q->guest_qshift = 0;
+
+ rc = xive_native_configure_queue(xc->vp_id, q, priority,
+ NULL, 0, true);
+ if (rc) {
+ pr_err("Failed to reset queue %d for VCPU %d: %d\n",
+ priority, xc->server_num, rc);
+ return rc;
+ }
+
+ if (q->qpage) {
+ put_page(virt_to_page(q->qpage));
+ q->qpage = NULL;
+ }
+
+ return 0;
+ }
+
+ if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
+ pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
+ 1ull << kvm_eq.qshift);
+ return -EINVAL;
+ }
+
+ gfn = gpa_to_gfn(kvm_eq.qaddr);
+ page = gfn_to_page(kvm, gfn);
+ if (is_error_page(page)) {
+ pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
+ return -EINVAL;
+ }
+
+ page_size = kvm_host_page_size(kvm, gfn);
+ if (1ull << kvm_eq.qshift > page_size) {
+ pr_warn("Incompatible host page size %lx!\n", page_size);
+ return -EINVAL;
+ }
+
+ qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
+
+ /*
+ * Backup the queue page guest address to the mark EQ page
+ * dirty for migration.
+ */
+ q->guest_qaddr = kvm_eq.qaddr;
+ q->guest_qshift = kvm_eq.qshift;
+
+ /*
+ * Unconditional Notification is forced by default at the
+ * OPAL level because the use of END ESBs is not supported by
+ * Linux.
+ */
+ rc = xive_native_configure_queue(xc->vp_id, q, priority,
+ (__be32 *) qaddr, kvm_eq.qshift, true);
+ if (rc) {
+ pr_err("Failed to configure queue %d for VCPU %d: %d\n",
+ priority, xc->server_num, rc);
+ put_page(page);
+ return rc;
+ }
+
+ /*
+ * Only restore the queue state when needed. When doing the
+ * H_INT_SET_SOURCE_CONFIG hcall, it should not.
+ */
+ if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) {
+ rc = xive_native_set_queue_state(xc->vp_id, priority,
+ kvm_eq.qtoggle,
+ kvm_eq.qindex);
+ if (rc)
+ goto error;
+ }
+
+ rc = kvmppc_xive_attach_escalation(vcpu, priority,
+ xive->single_escalation);
+error:
+ if (rc)
+ kvmppc_xive_native_cleanup_queue(vcpu, priority);
+ return rc;
+}
+
+static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive,
+ long eq_idx, u64 addr)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_xive_vcpu *xc;
+ struct xive_q *q;
+ void __user *ubufp = (u64 __user *) addr;
+ u32 server;
+ u8 priority;
+ struct kvm_ppc_xive_eq kvm_eq;
+ u64 qaddr;
+ u64 qshift;
+ u64 qeoi_page;
+ u32 escalate_irq;
+ u64 qflags;
+ int rc;
+
+ /*
+ * Demangle priority/server tuple from the EQ identifier
+ */
+ priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
+ KVM_XIVE_EQ_PRIORITY_SHIFT;
+ server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
+ KVM_XIVE_EQ_SERVER_SHIFT;
+
+ vcpu = kvmppc_xive_find_server(kvm, server);
+ if (!vcpu) {
+ pr_err("Can't find server %d\n", server);
+ return -ENOENT;
+ }
+ xc = vcpu->arch.xive_vcpu;
+
+ if (priority != xive_prio_from_guest(priority)) {
+ pr_err("invalid priority for queue %d for VCPU %d\n",
+ priority, server);
+ return -EINVAL;
+ }
+ q = &xc->queues[priority];
+
+ memset(&kvm_eq, 0, sizeof(kvm_eq));
+
+ if (!q->qpage)
+ return 0;
+
+ rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift,
+ &qeoi_page, &escalate_irq, &qflags);
+ if (rc)
+ return rc;
+
+ kvm_eq.flags = 0;
+ if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
+ kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
+
+ kvm_eq.qshift = q->guest_qshift;
+ kvm_eq.qaddr = q->guest_qaddr;
+
+ rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle,
+ &kvm_eq.qindex);
+ if (rc)
+ return rc;
+
+ pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
+ __func__, server, priority, kvm_eq.flags,
+ kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
+
+ if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb)
+{
+ int i;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
+
+ if (!state->valid)
+ continue;
+
+ if (state->act_priority == MASKED)
+ continue;
+
+ state->eisn = 0;
+ state->act_server = 0;
+ state->act_priority = MASKED;
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
+ if (state->pt_number) {
+ xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->pt_number,
+ 0, MASKED, 0);
+ }
+ }
+}
+
+static int kvmppc_xive_reset(struct kvmppc_xive *xive)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ unsigned int i;
+
+ pr_devel("%s\n", __func__);
+
+ mutex_lock(&kvm->lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ unsigned int prio;
+
+ if (!xc)
+ continue;
+
+ kvmppc_xive_disable_vcpu_interrupts(vcpu);
+
+ for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
+
+ /* Single escalation, no queue 7 */
+ if (prio == 7 && xive->single_escalation)
+ break;
+
+ if (xc->esc_virq[prio]) {
+ free_irq(xc->esc_virq[prio], vcpu);
+ irq_dispose_mapping(xc->esc_virq[prio]);
+ kfree(xc->esc_virq_names[prio]);
+ xc->esc_virq[prio] = 0;
+ }
+
+ kvmppc_xive_native_cleanup_queue(vcpu, prio);
+ }
+ }
+
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+
+ if (sb) {
+ arch_spin_lock(&sb->lock);
+ kvmppc_xive_reset_sources(sb);
+ arch_spin_unlock(&sb->lock);
+ }
+ }
+
+ mutex_unlock(&kvm->lock);
+
+ return 0;
+}
+
+static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb)
+{
+ int j;
+
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
+ struct xive_irq_data *xd;
+ u32 hw_num;
+
+ if (!state->valid)
+ continue;
+
+ /*
+ * The struct kvmppc_xive_irq_state reflects the state
+ * of the EAS configuration and not the state of the
+ * source. The source is masked setting the PQ bits to
+ * '-Q', which is what is being done before calling
+ * the KVM_DEV_XIVE_EQ_SYNC control.
+ *
+ * If a source EAS is configured, OPAL syncs the XIVE
+ * IC of the source and the XIVE IC of the previous
+ * target if any.
+ *
+ * So it should be fine ignoring MASKED sources as
+ * they have been synced already.
+ */
+ if (state->act_priority == MASKED)
+ continue;
+
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+ xive_native_sync_source(hw_num);
+ xive_native_sync_queue(hw_num);
+ }
+}
+
+static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ unsigned int prio;
+
+ if (!xc)
+ return -ENOENT;
+
+ for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
+ struct xive_q *q = &xc->queues[prio];
+
+ if (!q->qpage)
+ continue;
+
+ /* Mark EQ page dirty for migration */
+ mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
+ }
+ return 0;
+}
+
+static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ unsigned int i;
+
+ pr_devel("%s\n", __func__);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+
+ if (sb) {
+ arch_spin_lock(&sb->lock);
+ kvmppc_xive_native_sync_sources(sb);
+ arch_spin_unlock(&sb->lock);
+ }
+ }
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvmppc_xive_native_vcpu_eq_sync(vcpu);
+ }
+ mutex_unlock(&kvm->lock);
+
+ return 0;
+}
+
+static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_XIVE_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_XIVE_RESET:
+ return kvmppc_xive_reset(xive);
+ case KVM_DEV_XIVE_EQ_SYNC:
+ return kvmppc_xive_native_eq_sync(xive);
+ }
+ break;
+ case KVM_DEV_XIVE_GRP_SOURCE:
+ return kvmppc_xive_native_set_source(xive, attr->attr,
+ attr->addr);
+ case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
+ return kvmppc_xive_native_set_source_config(xive, attr->attr,
+ attr->addr);
+ case KVM_DEV_XIVE_GRP_EQ_CONFIG:
+ return kvmppc_xive_native_set_queue_config(xive, attr->attr,
+ attr->addr);
+ case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
+ return kvmppc_xive_native_sync_source(xive, attr->attr,
+ attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_XIVE_GRP_EQ_CONFIG:
+ return kvmppc_xive_native_get_queue_config(xive, attr->attr,
+ attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_XIVE_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_XIVE_RESET:
+ case KVM_DEV_XIVE_EQ_SYNC:
+ return 0;
+ }
+ break;
+ case KVM_DEV_XIVE_GRP_SOURCE:
+ case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
+ case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
+ if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ &&
+ attr->attr < KVMPPC_XIVE_NR_IRQS)
+ return 0;
+ break;
+ case KVM_DEV_XIVE_GRP_EQ_CONFIG:
+ return 0;
+ }
+ return -ENXIO;
+}
+
+/*
+ * Called when device fd is closed
+ */
+static void kvmppc_xive_native_release(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ int i;
+ int was_ready;
+
+ debugfs_remove(xive->dentry);
+
+ pr_devel("Releasing xive native device\n");
+
+ /*
+ * Clearing mmu_ready temporarily while holding kvm->lock
+ * is a way of ensuring that no vcpus can enter the guest
+ * until we drop kvm->lock. Doing kick_all_cpus_sync()
+ * ensures that any vcpu executing inside the guest has
+ * exited the guest. Once kick_all_cpus_sync() has finished,
+ * we know that no vcpu can be executing the XIVE push or
+ * pull code or accessing the XIVE MMIO regions.
+ *
+ * Since this is the device release function, we know that
+ * userspace does not have any open fd or mmap referring to
+ * the device. Therefore there can not be any of the
+ * device attribute set/get, mmap, or page fault functions
+ * being executed concurrently, and similarly, the
+ * connect_vcpu and set/clr_mapped functions also cannot
+ * be being executed.
+ */
+ was_ready = kvm->arch.mmu_ready;
+ kvm->arch.mmu_ready = 0;
+ kick_all_cpus_sync();
+
+ /*
+ * We should clean up the vCPU interrupt presenters first.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ /*
+ * Take vcpu->mutex to ensure that no one_reg get/set ioctl
+ * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
+ */
+ mutex_lock(&vcpu->mutex);
+ kvmppc_xive_native_cleanup_vcpu(vcpu);
+ mutex_unlock(&vcpu->mutex);
+ }
+
+ kvm->arch.xive = NULL;
+
+ for (i = 0; i <= xive->max_sbid; i++) {
+ if (xive->src_blocks[i])
+ kvmppc_xive_free_sources(xive->src_blocks[i]);
+ kfree(xive->src_blocks[i]);
+ xive->src_blocks[i] = NULL;
+ }
+
+ if (xive->vp_base != XIVE_INVALID_VP)
+ xive_native_free_vp_block(xive->vp_base);
+
+ kvm->arch.mmu_ready = was_ready;
+
+ /*
+ * A reference of the kvmppc_xive pointer is now kept under
+ * the xive_devices struct of the machine for reuse. It is
+ * freed when the VM is destroyed for now until we fix all the
+ * execution paths.
+ */
+
+ kfree(dev);
+}
+
+/*
+ * Create a XIVE device. kvm->lock is held.
+ */
+static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
+{
+ struct kvmppc_xive *xive;
+ struct kvm *kvm = dev->kvm;
+ int ret = 0;
+
+ pr_devel("Creating xive native device\n");
+
+ if (kvm->arch.xive)
+ return -EEXIST;
+
+ xive = kvmppc_xive_get_device(kvm, type);
+ if (!xive)
+ return -ENOMEM;
+
+ dev->private = xive;
+ xive->dev = dev;
+ xive->kvm = kvm;
+ kvm->arch.xive = xive;
+ mutex_init(&xive->mapping_lock);
+
+ /*
+ * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for
+ * a default. Getting the max number of CPUs the VM was
+ * configured with would improve our usage of the XIVE VP space.
+ */
+ xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
+ pr_devel("VP_Base=%x\n", xive->vp_base);
+
+ if (xive->vp_base == XIVE_INVALID_VP)
+ ret = -ENXIO;
+
+ xive->single_escalation = xive_native_has_single_escalation();
+ xive->ops = &kvmppc_xive_native_ops;
+
+ if (ret)
+ kfree(xive);
+
+ return ret;
+}
+
+/*
+ * Interrupt Pending Buffer (IPB) offset
+ */
+#define TM_IPB_SHIFT 40
+#define TM_IPB_MASK (((u64) 0xFF) << TM_IPB_SHIFT)
+
+int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u64 opal_state;
+ int rc;
+
+ if (!kvmppc_xive_enabled(vcpu))
+ return -EPERM;
+
+ if (!xc)
+ return -ENOENT;
+
+ /* Thread context registers. We only care about IPB and CPPR */
+ val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01;
+
+ /* Get the VP state from OPAL */
+ rc = xive_native_get_vp_state(xc->vp_id, &opal_state);
+ if (rc)
+ return rc;
+
+ /*
+ * Capture the backup of IPB register in the NVT structure and
+ * merge it in our KVM VP state.
+ */
+ val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK);
+
+ pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n",
+ __func__,
+ vcpu->arch.xive_saved_state.nsr,
+ vcpu->arch.xive_saved_state.cppr,
+ vcpu->arch.xive_saved_state.ipb,
+ vcpu->arch.xive_saved_state.pipr,
+ vcpu->arch.xive_saved_state.w01,
+ (u32) vcpu->arch.xive_cam_word, opal_state);
+
+ return 0;
+}
+
+int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+
+ pr_devel("%s w01=%016llx vp=%016llx\n", __func__,
+ val->xive_timaval[0], val->xive_timaval[1]);
+
+ if (!kvmppc_xive_enabled(vcpu))
+ return -EPERM;
+
+ if (!xc || !xive)
+ return -ENOENT;
+
+ /* We can't update the state of a "pushed" VCPU */
+ if (WARN_ON(vcpu->arch.xive_pushed))
+ return -EBUSY;
+
+ /*
+ * Restore the thread context registers. IPB and CPPR should
+ * be the only ones that matter.
+ */
+ vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0];
+
+ /*
+ * There is no need to restore the XIVE internal state (IPB
+ * stored in the NVT) as the IPB register was merged in KVM VP
+ * state when captured.
+ */
+ return 0;
+}
+
+static int xive_native_debug_show(struct seq_file *m, void *private)
+{
+ struct kvmppc_xive *xive = m->private;
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ unsigned int i;
+
+ if (!kvm)
+ return 0;
+
+ seq_puts(m, "=========\nVCPU state\n=========\n");
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ continue;
+
+ seq_printf(m, "cpu server %#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
+ xc->server_num,
+ vcpu->arch.xive_saved_state.nsr,
+ vcpu->arch.xive_saved_state.cppr,
+ vcpu->arch.xive_saved_state.ipb,
+ vcpu->arch.xive_saved_state.pipr,
+ vcpu->arch.xive_saved_state.w01,
+ (u32) vcpu->arch.xive_cam_word);
+
+ kvmppc_xive_debug_show_queues(m, vcpu);
+ }
+
+ return 0;
+}
+
+static int xive_native_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xive_native_debug_show, inode->i_private);
+}
+
+static const struct file_operations xive_native_debug_fops = {
+ .open = xive_native_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xive_native_debugfs_init(struct kvmppc_xive *xive)
+{
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
+ if (!name) {
+ pr_err("%s: no memory for name\n", __func__);
+ return;
+ }
+
+ xive->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
+ xive, &xive_native_debug_fops);
+
+ pr_debug("%s: created %s\n", __func__, name);
+ kfree(name);
+}
+
+static void kvmppc_xive_native_init(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
+
+ /* Register some debug interfaces */
+ xive_native_debugfs_init(xive);
+}
+
+struct kvm_device_ops kvm_xive_native_ops = {
+ .name = "kvm-xive-native",
+ .create = kvmppc_xive_native_create,
+ .init = kvmppc_xive_native_init,
+ .release = kvmppc_xive_native_release,
+ .set_attr = kvmppc_xive_native_set_attr,
+ .get_attr = kvmppc_xive_native_get_attr,
+ .has_attr = kvmppc_xive_native_has_attr,
+ .mmap = kvmppc_xive_native_mmap,
+};
+
+void kvmppc_xive_native_init_module(void)
+{
+ ;
+}
+
+void kvmppc_xive_native_exit_module(void)
+{
+ ;
+}
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 033363d6e764..0737acfd17f1 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -130,24 +130,14 @@ static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
*/
prio = ffs(pending) - 1;
- /*
- * If the most favoured prio we found pending is less
- * favored (or equal) than a pending IPI, we return
- * the IPI instead.
- *
- * Note: If pending was 0 and mfrr is 0xff, we will
- * not spurriously take an IPI because mfrr cannot
- * then be smaller than cppr.
- */
- if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
- prio = xc->mfrr;
- hirq = XICS_IPI;
- break;
- }
-
/* Don't scan past the guest cppr */
- if (prio >= xc->cppr || prio > 7)
+ if (prio >= xc->cppr || prio > 7) {
+ if (xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ }
break;
+ }
/* Grab queue and pointers */
q = &xc->queues[prio];
@@ -184,9 +174,12 @@ skip_ipi:
* been set and another occurrence of the IPI will trigger.
*/
if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
- if (scan_type == scan_fetch)
+ if (scan_type == scan_fetch) {
GLUE(X_PFX,source_eoi)(xc->vp_ipi,
&xc->vp_ipi_data);
+ q->idx = idx;
+ q->toggle = toggle;
+ }
/* Loop back on same queue with updated idx/toggle */
#ifdef XIVE_RUNTIME_CHECKS
WARN_ON(hirq && hirq != XICS_IPI);
@@ -199,32 +192,41 @@ skip_ipi:
if (hirq == XICS_DUMMY)
goto skip_ipi;
- /* If fetching, update queue pointers */
- if (scan_type == scan_fetch) {
- q->idx = idx;
- q->toggle = toggle;
- }
-
- /* Something found, stop searching */
- if (hirq)
- break;
-
- /* Clear the pending bit on the now empty queue */
- pending &= ~(1 << prio);
+ /* Clear the pending bit if the queue is now empty */
+ if (!hirq) {
+ pending &= ~(1 << prio);
- /*
- * Check if the queue count needs adjusting due to
- * interrupts being moved away.
- */
- if (atomic_read(&q->pending_count)) {
- int p = atomic_xchg(&q->pending_count, 0);
- if (p) {
+ /*
+ * Check if the queue count needs adjusting due to
+ * interrupts being moved away.
+ */
+ if (atomic_read(&q->pending_count)) {
+ int p = atomic_xchg(&q->pending_count, 0);
+ if (p) {
#ifdef XIVE_RUNTIME_CHECKS
- WARN_ON(p > atomic_read(&q->count));
+ WARN_ON(p > atomic_read(&q->count));
#endif
- atomic_sub(p, &q->count);
+ atomic_sub(p, &q->count);
+ }
}
}
+
+ /*
+ * If the most favoured prio we found pending is less
+ * favored (or equal) than a pending IPI, we return
+ * the IPI instead.
+ */
+ if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ break;
+ }
+
+ /* If fetching, update queue pointers */
+ if (scan_type == scan_fetch) {
+ q->idx = idx;
+ q->toggle = toggle;
+ }
}
/* If we are just taking a "peek", do nothing else */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 8885377ec3e0..3393b166817a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -570,6 +570,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PPC_GET_CPU_CHAR:
r = 1;
break;
+#ifdef CONFIG_KVM_XIVE
+ case KVM_CAP_PPC_IRQ_XIVE:
+ /*
+ * We need XIVE to be enabled on the platform (implies
+ * a POWER9 processor) and the PowerNV platform, as
+ * nested is not yet supported.
+ */
+ r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE);
+ break;
+#endif
case KVM_CAP_PPC_ALLOC_HTAB:
r = hv_enabled;
@@ -644,9 +654,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
else
r = num_online_cpus();
break;
- case KVM_CAP_NR_MEMSLOTS:
- r = KVM_USER_MEM_SLOTS;
- break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
@@ -753,6 +760,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
else
kvmppc_xics_free_icp(vcpu);
break;
+ case KVMPPC_IRQ_XIVE:
+ kvmppc_xive_native_cleanup_vcpu(vcpu);
+ break;
}
kvmppc_core_vcpu_free(vcpu);
@@ -1941,6 +1951,30 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
break;
}
#endif /* CONFIG_KVM_XICS */
+#ifdef CONFIG_KVM_XIVE
+ case KVM_CAP_PPC_IRQ_XIVE: {
+ struct fd f;
+ struct kvm_device *dev;
+
+ r = -EBADF;
+ f = fdget(cap->args[0]);
+ if (!f.file)
+ break;
+
+ r = -ENXIO;
+ if (!xive_enabled())
+ break;
+
+ r = -EPERM;
+ dev = kvm_device_from_filp(f.file);
+ if (dev)
+ r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
+ cap->args[1]);
+
+ fdput(f);
+ break;
+ }
+#endif /* CONFIG_KVM_XIVE */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_FWNMI:
r = -EINVAL;
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index e27792d0b744..8366c2abeafc 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -539,7 +539,8 @@ _GLOBAL(flush_hash_pages)
#ifdef CONFIG_SMP
lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha
addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
- lwz r8,TASK_CPU(r2)
+ tophys (r8, r2)
+ lwz r8, TASK_CPU(r8)
oris r8,r8,9
10: lwarx r0,0,r9
cmpi 0,r0,0
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index c5c9ff2d7afc..b5d92dc32844 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -556,7 +556,7 @@ static int __init add_huge_page_size(unsigned long long size)
if (size <= PAGE_SIZE || !is_power_of_2(size))
return -EINVAL;
- mmu_psize = check_and_get_huge_psize(size);
+ mmu_psize = check_and_get_huge_psize(shift);
if (mmu_psize < 0)
return -EINVAL;
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 0c037e933e55..7782201e5fe8 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -521,6 +521,9 @@ u32 xive_native_default_eq_shift(void)
}
EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
+unsigned long xive_tima_os;
+EXPORT_SYMBOL_GPL(xive_tima_os);
+
bool __init xive_native_init(void)
{
struct device_node *np;
@@ -573,6 +576,14 @@ bool __init xive_native_init(void)
for_each_possible_cpu(cpu)
kvmppc_set_xive_tima(cpu, r.start, tima);
+ /* Resource 2 is OS window */
+ if (of_address_to_resource(np, 2, &r)) {
+ pr_err("Failed to get thread mgmnt area resource\n");
+ return false;
+ }
+
+ xive_tima_os = r.start;
+
/* Grab size of provisionning pages */
xive_parse_provisioning(np);
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index e66745decea1..ee32c66e1af3 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -27,7 +27,7 @@ config RISCV
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
+ select GENERIC_ATOMIC64 if !64BIT
select HAVE_ARCH_AUDITSYSCALL
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
@@ -35,7 +35,6 @@ config RISCV
select HAVE_PERF_EVENTS
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
- select RISCV_ISA_A if SMP
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select HAVE_ARCH_TRACEHOOK
@@ -195,9 +194,6 @@ config RISCV_ISA_C
If you don't know what to do here, say Y.
-config RISCV_ISA_A
- def_bool y
-
menu "supported PMU type"
depends on PERF_EVENTS
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index c6342e638ef7..6b0741c9f348 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -39,9 +39,8 @@ endif
KBUILD_CFLAGS += -Wall
# ISA string setting
-riscv-march-$(CONFIG_ARCH_RV32I) := rv32im
-riscv-march-$(CONFIG_ARCH_RV64I) := rv64im
-riscv-march-$(CONFIG_RISCV_ISA_A) := $(riscv-march-y)a
+riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
+riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index cccd12cf27d4..5a7a19d9aa7f 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += compat.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
+generic-y += extable.h
generic-y += dma.h
generic-y += dma-contiguous.h
generic-y += dma-mapping.h
diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h
index bfc7f099ab1f..52a1fbdeab3b 100644
--- a/arch/riscv/include/asm/bug.h
+++ b/arch/riscv/include/asm/bug.h
@@ -21,7 +21,12 @@
#include <asm/asm.h>
#ifdef CONFIG_GENERIC_BUG
-#define __BUG_INSN _AC(0x00100073, UL) /* ebreak */
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_32 _UL(0x3)
+#define __COMPRESSED_INSN_MASK _UL(0xffff)
+
+#define __BUG_INSN_32 _UL(0x00100073) /* ebreak */
+#define __BUG_INSN_16 _UL(0x9002) /* c.ebreak */
#ifndef __ASSEMBLY__
typedef u32 bug_insn_t;
@@ -38,38 +43,46 @@ typedef u32 bug_insn_t;
#define __BUG_ENTRY \
__BUG_ENTRY_ADDR "\n\t" \
__BUG_ENTRY_FILE "\n\t" \
- RISCV_SHORT " %1"
+ RISCV_SHORT " %1\n\t" \
+ RISCV_SHORT " %2"
#else
#define __BUG_ENTRY \
- __BUG_ENTRY_ADDR
+ __BUG_ENTRY_ADDR "\n\t" \
+ RISCV_SHORT " %2"
#endif
-#define BUG() \
+#define __BUG_FLAGS(flags) \
do { \
__asm__ __volatile__ ( \
"1:\n\t" \
"ebreak\n" \
- ".pushsection __bug_table,\"a\"\n\t" \
+ ".pushsection __bug_table,\"aw\"\n\t" \
"2:\n\t" \
__BUG_ENTRY "\n\t" \
- ".org 2b + %2\n\t" \
+ ".org 2b + %3\n\t" \
".popsection" \
: \
: "i" (__FILE__), "i" (__LINE__), \
- "i" (sizeof(struct bug_entry))); \
- unreachable(); \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry))); \
} while (0)
+
#endif /* !__ASSEMBLY__ */
#else /* CONFIG_GENERIC_BUG */
#ifndef __ASSEMBLY__
-#define BUG() \
-do { \
+#define __BUG_FLAGS(flags) do { \
__asm__ __volatile__ ("ebreak\n"); \
- unreachable(); \
} while (0)
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_GENERIC_BUG */
+#define BUG() do { \
+ __BUG_FLAGS(0); \
+ unreachable(); \
+} while (0)
+
+#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
+
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8f13074413a7..1f4ba68ab9aa 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page)
#else /* CONFIG_SMP */
-#define flush_icache_all() sbi_remote_fence_i(NULL)
+void flush_icache_all(void);
void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 28a0d1cb374c..3c3c26c3a1f1 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -14,64 +14,95 @@
#ifndef _ASM_RISCV_CSR_H
#define _ASM_RISCV_CSR_H
+#include <asm/asm.h>
#include <linux/const.h>
/* Status register flags */
-#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
-#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
-#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
-#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
-
-#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
-#define SR_FS_OFF _AC(0x00000000, UL)
-#define SR_FS_INITIAL _AC(0x00002000, UL)
-#define SR_FS_CLEAN _AC(0x00004000, UL)
-#define SR_FS_DIRTY _AC(0x00006000, UL)
-
-#define SR_XS _AC(0x00018000, UL) /* Extension Status */
-#define SR_XS_OFF _AC(0x00000000, UL)
-#define SR_XS_INITIAL _AC(0x00008000, UL)
-#define SR_XS_CLEAN _AC(0x00010000, UL)
-#define SR_XS_DIRTY _AC(0x00018000, UL)
+#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
+
+#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
+#define SR_FS_OFF _AC(0x00000000, UL)
+#define SR_FS_INITIAL _AC(0x00002000, UL)
+#define SR_FS_CLEAN _AC(0x00004000, UL)
+#define SR_FS_DIRTY _AC(0x00006000, UL)
+
+#define SR_XS _AC(0x00018000, UL) /* Extension Status */
+#define SR_XS_OFF _AC(0x00000000, UL)
+#define SR_XS_INITIAL _AC(0x00008000, UL)
+#define SR_XS_CLEAN _AC(0x00010000, UL)
+#define SR_XS_DIRTY _AC(0x00018000, UL)
#ifndef CONFIG_64BIT
-#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */
+#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */
#else
-#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
+#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
#endif
/* SATP flags */
-#if __riscv_xlen == 32
-#define SATP_PPN _AC(0x003FFFFF, UL)
-#define SATP_MODE_32 _AC(0x80000000, UL)
-#define SATP_MODE SATP_MODE_32
+#ifndef CONFIG_64BIT
+#define SATP_PPN _AC(0x003FFFFF, UL)
+#define SATP_MODE_32 _AC(0x80000000, UL)
+#define SATP_MODE SATP_MODE_32
#else
-#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
-#define SATP_MODE_39 _AC(0x8000000000000000, UL)
-#define SATP_MODE SATP_MODE_39
+#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
+#define SATP_MODE_39 _AC(0x8000000000000000, UL)
+#define SATP_MODE SATP_MODE_39
#endif
-/* Interrupt Enable and Interrupt Pending flags */
-#define SIE_SSIE _AC(0x00000002, UL) /* Software Interrupt Enable */
-#define SIE_STIE _AC(0x00000020, UL) /* Timer Interrupt Enable */
-#define SIE_SEIE _AC(0x00000200, UL) /* External Interrupt Enable */
-
-#define EXC_INST_MISALIGNED 0
-#define EXC_INST_ACCESS 1
-#define EXC_BREAKPOINT 3
-#define EXC_LOAD_ACCESS 5
-#define EXC_STORE_ACCESS 7
-#define EXC_SYSCALL 8
-#define EXC_INST_PAGE_FAULT 12
-#define EXC_LOAD_PAGE_FAULT 13
-#define EXC_STORE_PAGE_FAULT 15
+/* SCAUSE */
+#define SCAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+
+#define IRQ_U_SOFT 0
+#define IRQ_S_SOFT 1
+#define IRQ_M_SOFT 3
+#define IRQ_U_TIMER 4
+#define IRQ_S_TIMER 5
+#define IRQ_M_TIMER 7
+#define IRQ_U_EXT 8
+#define IRQ_S_EXT 9
+#define IRQ_M_EXT 11
+
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+
+/* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */
+#define SIE_SSIE (_AC(0x1, UL) << IRQ_S_SOFT)
+#define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
+#define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
+
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_SSTATUS 0x100
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_STVAL 0x143
+#define CSR_SIP 0x144
+#define CSR_SATP 0x180
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
#ifndef __ASSEMBLY__
#define csr_swap(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrrw %0, " #csr ", %1" \
+ __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \
: "memory"); \
__v; \
@@ -80,7 +111,7 @@
#define csr_read(csr) \
({ \
register unsigned long __v; \
- __asm__ __volatile__ ("csrr %0, " #csr \
+ __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
: "=r" (__v) : \
: "memory"); \
__v; \
@@ -89,7 +120,7 @@
#define csr_write(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrw " #csr ", %0" \
+ __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \
: "memory"); \
})
@@ -97,7 +128,7 @@
#define csr_read_set(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrrs %0, " #csr ", %1" \
+ __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \
: "memory"); \
__v; \
@@ -106,7 +137,7 @@
#define csr_set(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrs " #csr ", %0" \
+ __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \
: "memory"); \
})
@@ -114,7 +145,7 @@
#define csr_read_clear(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrrc %0, " #csr ", %1" \
+ __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \
: "memory"); \
__v; \
@@ -123,7 +154,7 @@
#define csr_clear(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrc " #csr ", %0" \
+ __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \
: "memory"); \
})
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index 697fc23b0d5a..ce0cd7d77eb0 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -27,13 +27,7 @@
#define ELF_CLASS ELFCLASS32
#endif
-#if defined(__LITTLE_ENDIAN)
#define ELF_DATA ELFDATA2LSB
-#elif defined(__BIG_ENDIAN)
-#define ELF_DATA ELFDATA2MSB
-#else
-#error "Unknown endianness"
-#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index 66641624d8a5..4ad6409c4647 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -7,18 +7,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifndef CONFIG_RISCV_ISA_A
-/*
- * Use the generic interrupt disabling versions if the A extension
- * is not supported.
- */
-#ifdef CONFIG_SMP
-#error "Can't support generic futex calls without A extension on SMP"
-#endif
-#include <asm-generic/futex.h>
-
-#else /* CONFIG_RISCV_ISA_A */
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
@@ -124,5 +112,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return ret;
}
-#endif /* CONFIG_RISCV_ISA_A */
#endif /* _ASM_FUTEX_H */
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index 07a3c6d5706f..1a69b3bcd371 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -21,25 +21,25 @@
/* read interrupt enabled status */
static inline unsigned long arch_local_save_flags(void)
{
- return csr_read(sstatus);
+ return csr_read(CSR_SSTATUS);
}
/* unconditionally enable interrupts */
static inline void arch_local_irq_enable(void)
{
- csr_set(sstatus, SR_SIE);
+ csr_set(CSR_SSTATUS, SR_SIE);
}
/* unconditionally disable interrupts */
static inline void arch_local_irq_disable(void)
{
- csr_clear(sstatus, SR_SIE);
+ csr_clear(CSR_SSTATUS, SR_SIE);
}
/* get status and disable interrupts */
static inline unsigned long arch_local_irq_save(void)
{
- return csr_read_clear(sstatus, SR_SIE);
+ return csr_read_clear(CSR_SSTATUS, SR_SIE);
}
/* test flags */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
/* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags)
{
- csr_set(sstatus, flags & SR_SIE);
+ csr_set(CSR_SSTATUS, flags & SR_SIE);
}
#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 336d60ec5698..bf4f097a9051 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -20,8 +20,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *task)
@@ -39,61 +37,8 @@ static inline void destroy_context(struct mm_struct *mm)
{
}
-/*
- * When necessary, performs a deferred icache flush for the given MM context,
- * on the local CPU. RISC-V has no direct mechanism for instruction cache
- * shoot downs, so instead we send an IPI that informs the remote harts they
- * need to flush their local instruction caches. To avoid pathologically slow
- * behavior in a common case (a bunch of single-hart processes on a many-hart
- * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
- * executing a MM context and instead schedule a deferred local instruction
- * cache flush to be performed before execution resumes on each hart. This
- * actually performs that local instruction cache flush, which implicitly only
- * refers to the current hart.
- */
-static inline void flush_icache_deferred(struct mm_struct *mm)
-{
-#ifdef CONFIG_SMP
- unsigned int cpu = smp_processor_id();
- cpumask_t *mask = &mm->context.icache_stale_mask;
-
- if (cpumask_test_cpu(cpu, mask)) {
- cpumask_clear_cpu(cpu, mask);
- /*
- * Ensure the remote hart's writes are visible to this hart.
- * This pairs with a barrier in flush_icache_mm.
- */
- smp_mb();
- local_flush_icache_all();
- }
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
- struct mm_struct *next, struct task_struct *task)
-{
- if (likely(prev != next)) {
- /*
- * Mark the current MM context as inactive, and the next as
- * active. This is at least used by the icache flushing
- * routines in order to determine who should
- */
- unsigned int cpu = smp_processor_id();
-
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
- cpumask_set_cpu(cpu, mm_cpumask(next));
-
- /*
- * Use the old spbtr name instead of using the current satp
- * name to support binutils 2.29 which doesn't know about the
- * privileged ISA 1.10 yet.
- */
- csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
- local_flush_tlb_all();
-
- flush_icache_deferred(next);
- }
-}
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *task);
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index d35ec2f41381..9c867a4bac83 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -70,47 +70,38 @@ struct pt_regs {
/* Helpers for working with the instruction pointer */
-#define GET_IP(regs) ((regs)->sepc)
-#define SET_IP(regs, val) (GET_IP(regs) = (val))
-
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
- return GET_IP(regs);
+ return regs->sepc;
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- SET_IP(regs, val);
+ regs->sepc = val;
}
#define profile_pc(regs) instruction_pointer(regs)
/* Helpers for working with the user stack pointer */
-#define GET_USP(regs) ((regs)->sp)
-#define SET_USP(regs, val) (GET_USP(regs) = (val))
-
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
- return GET_USP(regs);
+ return regs->sp;
}
static inline void user_stack_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- SET_USP(regs, val);
+ regs->sp = val;
}
/* Helpers for working with the frame pointer */
-#define GET_FP(regs) ((regs)->s0)
-#define SET_FP(regs, val) (GET_FP(regs) = (val))
-
static inline unsigned long frame_pointer(struct pt_regs *regs)
{
- return GET_FP(regs);
+ return regs->s0;
}
static inline void frame_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- SET_FP(regs, val);
+ regs->s0 = val;
}
static inline unsigned long regs_return_value(struct pt_regs *regs)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index b6bb10b92fe2..19f231615510 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -26,22 +26,27 @@
#define SBI_REMOTE_SFENCE_VMA_ASID 7
#define SBI_SHUTDOWN 8
-#define SBI_CALL(which, arg0, arg1, arg2) ({ \
+#define SBI_CALL(which, arg0, arg1, arg2, arg3) ({ \
register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); \
register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); \
register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); \
+ register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); \
register uintptr_t a7 asm ("a7") = (uintptr_t)(which); \
asm volatile ("ecall" \
: "+r" (a0) \
- : "r" (a1), "r" (a2), "r" (a7) \
+ : "r" (a1), "r" (a2), "r" (a3), "r" (a7) \
: "memory"); \
a0; \
})
/* Lazy implementations until SBI is finalized */
-#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0)
-#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0)
-#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0)
+#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0, 0)
+#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0, 0)
+#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0, 0)
+#define SBI_CALL_3(which, arg0, arg1, arg2) \
+ SBI_CALL(which, arg0, arg1, arg2, 0)
+#define SBI_CALL_4(which, arg0, arg1, arg2, arg3) \
+ SBI_CALL(which, arg0, arg1, arg2, arg3)
static inline void sbi_console_putchar(int ch)
{
@@ -86,7 +91,7 @@ static inline void sbi_remote_sfence_vma(const unsigned long *hart_mask,
unsigned long start,
unsigned long size)
{
- SBI_CALL_1(SBI_REMOTE_SFENCE_VMA, hart_mask);
+ SBI_CALL_3(SBI_REMOTE_SFENCE_VMA, hart_mask, start, size);
}
static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
@@ -94,7 +99,7 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
unsigned long size,
unsigned long asid)
{
- SBI_CALL_1(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask);
+ SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
}
#endif
diff --git a/arch/riscv/include/asm/sifive_l2_cache.h b/arch/riscv/include/asm/sifive_l2_cache.h
new file mode 100644
index 000000000000..04f6748fc50b
--- /dev/null
+++ b/arch/riscv/include/asm/sifive_l2_cache.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SiFive L2 Cache Controller header file
+ *
+ */
+
+#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H
+#define _ASM_RISCV_SIFIVE_L2_CACHE_H
+
+extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
+extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
+
+#define SIFIVE_L2_ERR_TYPE_CE 0
+#define SIFIVE_L2_ERR_TYPE_UE 1
+
+#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 1c9cc8389928..9c039870019b 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -28,7 +28,9 @@
#include <asm/processor.h>
#include <asm/csr.h>
-typedef unsigned long mm_segment_t;
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
/*
* low level task data that entry.S needs immediate access to
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index fb53a8089e76..b26f407be5c8 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -23,6 +23,7 @@
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <asm/byteorder.h>
+#include <asm/extable.h>
#include <asm/asm.h>
#define __enable_user_access() \
@@ -38,8 +39,10 @@
* For historical reasons, these macros are grossly misnamed.
*/
-#define KERNEL_DS (~0UL)
-#define USER_DS (TASK_SIZE)
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(~0UL)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE)
#define get_fs() (current_thread_info()->addr_limit)
@@ -48,9 +51,9 @@ static inline void set_fs(mm_segment_t fs)
current_thread_info()->addr_limit = fs;
}
-#define segment_eq(a, b) ((a) == (b))
+#define segment_eq(a, b) ((a).seg == (b).seg)
-#define user_addr_max() (get_fs())
+#define user_addr_max() (get_fs().seg)
/**
@@ -82,7 +85,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
{
const mm_segment_t fs = get_fs();
- return (size <= fs) && (addr <= (fs - size));
+ return size <= fs.seg && addr <= fs.seg - size;
}
/*
@@ -98,21 +101,8 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
* on our cache or tlb entries.
*/
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *state);
-
-#if defined(__LITTLE_ENDIAN)
-#define __MSW 1
#define __LSW 0
-#elif defined(__BIG_ENDIAN)
-#define __MSW 0
-#define __LSW 1
-#else
-#error "Unknown endianness"
-#endif
+#define __MSW 1
/*
* The "__xxx" versions of the user access functions do not verify the address
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index dac98348c6a3..578bb5efc085 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -312,9 +312,6 @@ void asm_offsets(void)
- offsetof(struct task_struct, thread.fstate.f[0])
);
- /* The assembler needs access to THREAD_SIZE as well. */
- DEFINE(ASM_THREAD_SIZE, THREAD_SIZE);
-
/*
* We allocate a pt_regs on the stack when entering the kernel. This
* ensures the alignment is sane.
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index cf2fca12414a..c8d2a3223099 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -136,8 +136,7 @@ static void c_stop(struct seq_file *m, void *v)
static int c_show(struct seq_file *m, void *v)
{
unsigned long cpu_id = (unsigned long)v - 1;
- struct device_node *node = of_get_cpu_node(cpuid_to_hartid_map(cpu_id),
- NULL);
+ struct device_node *node = of_get_cpu_node(cpu_id, NULL);
const char *compat, *isa, *mmu;
seq_printf(m, "processor\t: %lu\n", cpu_id);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index fd9b57c8b4ce..1c1ecc238cfa 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -37,11 +37,11 @@
* the kernel thread pointer. If we came from the kernel, sscratch
* will contain 0, and we should continue on the current TP.
*/
- csrrw tp, sscratch, tp
+ csrrw tp, CSR_SSCRATCH, tp
bnez tp, _save_context
_restore_kernel_tpsp:
- csrr tp, sscratch
+ csrr tp, CSR_SSCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)
_save_context:
REG_S sp, TASK_TI_USER_SP(tp)
@@ -87,11 +87,11 @@ _save_context:
li t0, SR_SUM | SR_FS
REG_L s0, TASK_TI_USER_SP(tp)
- csrrc s1, sstatus, t0
- csrr s2, sepc
- csrr s3, sbadaddr
- csrr s4, scause
- csrr s5, sscratch
+ csrrc s1, CSR_SSTATUS, t0
+ csrr s2, CSR_SEPC
+ csrr s3, CSR_STVAL
+ csrr s4, CSR_SCAUSE
+ csrr s5, CSR_SSCRATCH
REG_S s0, PT_SP(sp)
REG_S s1, PT_SSTATUS(sp)
REG_S s2, PT_SEPC(sp)
@@ -107,8 +107,8 @@ _save_context:
.macro RESTORE_ALL
REG_L a0, PT_SSTATUS(sp)
REG_L a2, PT_SEPC(sp)
- csrw sstatus, a0
- csrw sepc, a2
+ csrw CSR_SSTATUS, a0
+ csrw CSR_SEPC, a2
REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp)
@@ -155,7 +155,7 @@ ENTRY(handle_exception)
* Set sscratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel
*/
- csrw sscratch, x0
+ csrw CSR_SSCRATCH, x0
/* Load the global pointer */
.option push
@@ -248,7 +248,7 @@ resume_userspace:
* Save TP into sscratch, so we can find the kernel data structures
* again.
*/
- csrw sscratch, tp
+ csrw CSR_SSCRATCH, tp
restore_all:
RESTORE_ALL
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index fe884cd69abd..370c66ce187a 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -23,7 +23,8 @@
__INIT
ENTRY(_start)
/* Mask all interrupts */
- csrw sie, zero
+ csrw CSR_SIE, zero
+ csrw CSR_SIP, zero
/* Load the global pointer */
.option push
@@ -68,14 +69,10 @@ clear_bss_done:
/* Restore C environment */
la tp, init_task
sw zero, TASK_TI_CPU(tp)
-
- la sp, init_thread_union
- li a0, ASM_THREAD_SIZE
- add sp, sp, a0
+ la sp, init_thread_union + THREAD_SIZE
/* Start the kernel */
- mv a0, s0
- mv a1, s1
+ mv a0, s1
call parse_dtb
tail start_kernel
@@ -89,7 +86,7 @@ relocate:
/* Point stvec to virtual address of intruction after satp write */
la a0, 1f
add a0, a0, a1
- csrw stvec, a0
+ csrw CSR_STVEC, a0
/* Compute satp for kernel page tables, but don't load it yet */
la a2, swapper_pg_dir
@@ -99,18 +96,20 @@ relocate:
/*
* Load trampoline page directory, which will cause us to trap to
- * stvec if VA != PA, or simply fall through if VA == PA
+ * stvec if VA != PA, or simply fall through if VA == PA. We need a
+ * full fence here because setup_vm() just wrote these PTEs and we need
+ * to ensure the new translations are in use.
*/
la a0, trampoline_pg_dir
srl a0, a0, PAGE_SHIFT
or a0, a0, a1
sfence.vma
- csrw sptbr, a0
+ csrw CSR_SATP, a0
.align 2
1:
/* Set trap vector to spin forever to help debug */
la a0, .Lsecondary_park
- csrw stvec, a0
+ csrw CSR_STVEC, a0
/* Reload the global pointer */
.option push
@@ -118,8 +117,14 @@ relocate:
la gp, __global_pointer$
.option pop
- /* Switch to kernel page tables */
- csrw sptbr, a2
+ /*
+ * Switch to kernel page tables. A full fence is necessary in order to
+ * avoid using the trampoline translations, which are only correct for
+ * the first superpage. Fetching the fence is guarnteed to work
+ * because that first superpage is translated the same way.
+ */
+ csrw CSR_SATP, a2
+ sfence.vma
ret
@@ -130,7 +135,7 @@ relocate:
/* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park
- csrw stvec, a3
+ csrw CSR_STVEC, a3
slli a3, a0, LGREG
la a1, __cpu_up_stack_pointer
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 48e6b7db83a1..6d8659388c49 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -14,17 +14,9 @@
/*
* Possible interrupt causes:
*/
-#define INTERRUPT_CAUSE_SOFTWARE 1
-#define INTERRUPT_CAUSE_TIMER 5
-#define INTERRUPT_CAUSE_EXTERNAL 9
-
-/*
- * The high order bit of the trap cause register is always set for
- * interrupts, which allows us to differentiate them from exceptions
- * quickly. The INTERRUPT_CAUSE_* macros don't contain that bit, so we
- * need to mask it off.
- */
-#define INTERRUPT_CAUSE_FLAG (1UL << (__riscv_xlen - 1))
+#define INTERRUPT_CAUSE_SOFTWARE IRQ_S_SOFT
+#define INTERRUPT_CAUSE_TIMER IRQ_S_TIMER
+#define INTERRUPT_CAUSE_EXTERNAL IRQ_S_EXT
int arch_show_interrupts(struct seq_file *p, int prec)
{
@@ -37,7 +29,7 @@ asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
- switch (regs->scause & ~INTERRUPT_CAUSE_FLAG) {
+ switch (regs->scause & ~SCAUSE_IRQ_FLAG) {
case INTERRUPT_CAUSE_TIMER:
riscv_timer_interrupt();
break;
@@ -54,7 +46,8 @@ asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
handle_arch_irq(regs);
break;
default:
- panic("unexpected interrupt cause");
+ pr_alert("unexpected interrupt cause 0x%lx", regs->scause);
+ BUG();
}
irq_exit();
diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c
index 667ee70defea..91626d9ae5f2 100644
--- a/arch/riscv/kernel/perf_event.c
+++ b/arch/riscv/kernel/perf_event.c
@@ -185,10 +185,10 @@ static inline u64 read_counter(int idx)
switch (idx) {
case RISCV_PMU_CYCLE:
- val = csr_read(cycle);
+ val = csr_read(CSR_CYCLE);
break;
case RISCV_PMU_INSTRET:
- val = csr_read(instret);
+ val = csr_read(CSR_INSTRET);
break;
default:
WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c
index 2a53d26ffdd6..ed637aee514b 100644
--- a/arch/riscv/kernel/reset.c
+++ b/arch/riscv/kernel/reset.c
@@ -12,11 +12,15 @@
*/
#include <linux/reboot.h>
-#include <linux/export.h>
#include <asm/sbi.h>
-void (*pm_power_off)(void) = machine_power_off;
-EXPORT_SYMBOL(pm_power_off);
+static void default_power_off(void)
+{
+ sbi_shutdown();
+ while (1);
+}
+
+void (*pm_power_off)(void) = default_power_off;
void machine_restart(char *cmd)
{
@@ -26,11 +30,10 @@ void machine_restart(char *cmd)
void machine_halt(void)
{
- machine_power_off();
+ pm_power_off();
}
void machine_power_off(void)
{
- sbi_shutdown();
- while (1);
+ pm_power_off();
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 540a331d1376..d93bcce004e3 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -52,9 +52,11 @@ struct screen_info screen_info = {
atomic_t hart_lottery;
unsigned long boot_cpu_hartid;
-void __init parse_dtb(unsigned int hartid, void *dtb)
+void __init parse_dtb(phys_addr_t dtb_phys)
{
- if (early_init_dt_scan(__va(dtb)))
+ void *dtb = __va(dtb_phys);
+
+ if (early_init_dt_scan(dtb))
return;
pr_err("No DTB passed to the kernel\n");
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 837e1646091a..804d6ee4f3c5 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -234,6 +234,9 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* Are we from a system call? */
if (regs->scause == EXC_SYSCALL) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ regs->scause = -1UL;
+
/* If so, check system call restarting.. */
switch (regs->a0) {
case -ERESTART_RESTARTBLOCK:
@@ -272,6 +275,9 @@ static void do_signal(struct pt_regs *regs)
/* Did we come from a system call? */
if (regs->scause == EXC_SYSCALL) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ regs->scause = -1UL;
+
/* Restart the system call - no handlers present */
switch (regs->a0) {
case -ERESTARTNOHAND:
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 0c41d07ec281..b2537ffa855c 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -42,7 +42,7 @@ unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
void __init smp_setup_processor_id(void)
{
- cpuid_to_hartid_map(0) = boot_cpu_hartid;
+ cpuid_to_hartid_map(0) = boot_cpu_hartid;
}
/* A collection of single bit ipi messages. */
@@ -53,7 +53,7 @@ static struct {
int riscv_hartid_to_cpuid(int hartid)
{
- int i = -1;
+ int i;
for (i = 0; i < NR_CPUS; i++)
if (cpuid_to_hartid_map(i) == hartid)
@@ -70,6 +70,12 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
for_each_cpu(cpu, in)
cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
}
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpuid_to_hartid_map(cpu);
+}
+
/* Unsupported */
int setup_profiling_timer(unsigned int multiplier)
{
@@ -89,7 +95,7 @@ void riscv_software_interrupt(void)
unsigned long *stats = ipi_data[smp_processor_id()].stats;
/* Clear pending IPI */
- csr_clear(sip, SIE_SSIE);
+ csr_clear(CSR_SIP, SIE_SSIE);
while (true) {
unsigned long ops;
@@ -199,52 +205,3 @@ void smp_send_reschedule(int cpu)
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
-/*
- * Performs an icache flush for the given MM context. RISC-V has no direct
- * mechanism for instruction cache shoot downs, so instead we send an IPI that
- * informs the remote harts they need to flush their local instruction caches.
- * To avoid pathologically slow behavior in a common case (a bunch of
- * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
- * IPIs for harts that are not currently executing a MM context and instead
- * schedule a deferred local instruction cache flush to be performed before
- * execution resumes on each hart.
- */
-void flush_icache_mm(struct mm_struct *mm, bool local)
-{
- unsigned int cpu;
- cpumask_t others, hmask, *mask;
-
- preempt_disable();
-
- /* Mark every hart's icache as needing a flush for this MM. */
- mask = &mm->context.icache_stale_mask;
- cpumask_setall(mask);
- /* Flush this hart's I$ now, and mark it as flushed. */
- cpu = smp_processor_id();
- cpumask_clear_cpu(cpu, mask);
- local_flush_icache_all();
-
- /*
- * Flush the I$ of other harts concurrently executing, and mark them as
- * flushed.
- */
- cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
- local |= cpumask_empty(&others);
- if (mm != current->active_mm || !local) {
- cpumask_clear(&hmask);
- riscv_cpuid_to_hartid_mask(&others, &hmask);
- sbi_remote_fence_i(hmask.bits);
- } else {
- /*
- * It's assumed that at least one strongly ordered operation is
- * performed on this hart between setting a hart's cpumask bit
- * and scheduling this MM context on that hart. Sending an SBI
- * remote message will do this, but in the case where no
- * messages are sent we still need to order this hart's writes
- * with flush_icache_deferred().
- */
- smp_mb();
- }
-
- preempt_enable();
-}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index eb533b5c2c8c..7a0b62252524 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -47,6 +47,17 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ int cpuid;
+
+ /* This covers non-smp usecase mandated by "nosmp" option */
+ if (max_cpus == 0)
+ return;
+
+ for_each_possible_cpu(cpuid) {
+ if (cpuid == smp_processor_id())
+ continue;
+ set_cpu_present(cpuid, true);
+ }
}
void __init setup_smp(void)
@@ -73,12 +84,19 @@ void __init setup_smp(void)
}
cpuid_to_hartid_map(cpuid) = hart;
- set_cpu_possible(cpuid, true);
- set_cpu_present(cpuid, true);
cpuid++;
}
BUG_ON(!found_boot_cpu);
+
+ if (cpuid > nr_cpu_ids)
+ pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
+ cpuid, nr_cpu_ids);
+
+ for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
+ if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
+ set_cpu_possible(cpuid, true);
+ }
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 4d403274c2e8..e80a5e8da119 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -33,9 +33,9 @@ static void notrace walk_stackframe(struct task_struct *task,
unsigned long fp, sp, pc;
if (regs) {
- fp = GET_FP(regs);
- sp = GET_USP(regs);
- pc = GET_IP(regs);
+ fp = frame_pointer(regs);
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
const register unsigned long current_sp __asm__ ("sp");
fp = (unsigned long)__builtin_frame_address(0);
@@ -64,12 +64,8 @@ static void notrace walk_stackframe(struct task_struct *task,
frame = (struct stackframe *)fp - 1;
sp = fp;
fp = frame->fp;
-#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
(unsigned long *)(fp - 8));
-#else
- pc = frame->ra - 0x4;
-#endif
}
}
@@ -82,8 +78,8 @@ static void notrace walk_stackframe(struct task_struct *task,
unsigned long *ksp;
if (regs) {
- sp = GET_USP(regs);
- pc = GET_IP(regs);
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
const register unsigned long current_sp __asm__ ("sp");
sp = current_sp;
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 24a9333dda2c..3d1a651dc54c 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -70,7 +70,7 @@ void do_trap(struct pt_regs *regs, int signo, int code,
&& printk_ratelimit()) {
pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
tsk->comm, task_pid_nr(tsk), signo, code, addr);
- print_vma_addr(KERN_CONT " in ", GET_IP(regs));
+ print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
pr_cont("\n");
show_regs(regs);
}
@@ -118,6 +118,17 @@ DO_ERROR_INFO(do_trap_ecall_s,
DO_ERROR_INFO(do_trap_ecall_m,
SIGILL, ILL_ILLTRP, "environment call from M-mode");
+#ifdef CONFIG_GENERIC_BUG
+static inline unsigned long get_break_insn_length(unsigned long pc)
+{
+ bug_insn_t insn;
+
+ if (probe_kernel_address((bug_insn_t *)pc, insn))
+ return 0;
+ return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
+}
+#endif /* CONFIG_GENERIC_BUG */
+
asmlinkage void do_trap_break(struct pt_regs *regs)
{
#ifdef CONFIG_GENERIC_BUG
@@ -129,8 +140,8 @@ asmlinkage void do_trap_break(struct pt_regs *regs)
case BUG_TRAP_TYPE_NONE:
break;
case BUG_TRAP_TYPE_WARN:
- regs->sepc += sizeof(bug_insn_t);
- return;
+ regs->sepc += get_break_insn_length(regs->sepc);
+ break;
case BUG_TRAP_TYPE_BUG:
die(regs, "Kernel BUG");
}
@@ -145,11 +156,14 @@ int is_valid_bugaddr(unsigned long pc)
{
bug_insn_t insn;
- if (pc < PAGE_OFFSET)
+ if (pc < VMALLOC_START)
return 0;
if (probe_kernel_address((bug_insn_t *)pc, insn))
return 0;
- return (insn == __BUG_INSN);
+ if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
+ return (insn == __BUG_INSN_32);
+ else
+ return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
}
#endif /* CONFIG_GENERIC_BUG */
@@ -159,9 +173,9 @@ void __init trap_init(void)
* Set sup0 scratch register to 0, indicating to exception vector
* that we are presently executing in the kernel
*/
- csr_write(sscratch, 0);
+ csr_write(CSR_SSCRATCH, 0);
/* Set the exception vector address */
- csr_write(stvec, &handle_exception);
+ csr_write(CSR_STVEC, &handle_exception);
/* Enable all interrupts */
- csr_write(sie, -1);
+ csr_write(CSR_SIE, -1);
}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index fec62b24df89..b07b765f312a 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -36,7 +36,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
# these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ -Wl,--hash-style=both
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
$(call if_changed,vdsold)
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index b68aac701803..8db569141485 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -9,3 +9,5 @@ obj-y += fault.o
obj-y += extable.o
obj-y += ioremap.o
obj-y += cacheflush.o
+obj-y += context.o
+obj-y += sifive_l2_cache.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 498c0a0814fe..497b7d07af0c 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -14,6 +14,67 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#ifdef CONFIG_SMP
+
+#include <asm/sbi.h>
+
+void flush_icache_all(void)
+{
+ sbi_remote_fence_i(NULL);
+}
+
+/*
+ * Performs an icache flush for the given MM context. RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+ unsigned int cpu;
+ cpumask_t others, hmask, *mask;
+
+ preempt_disable();
+
+ /* Mark every hart's icache as needing a flush for this MM. */
+ mask = &mm->context.icache_stale_mask;
+ cpumask_setall(mask);
+ /* Flush this hart's I$ now, and mark it as flushed. */
+ cpu = smp_processor_id();
+ cpumask_clear_cpu(cpu, mask);
+ local_flush_icache_all();
+
+ /*
+ * Flush the I$ of other harts concurrently executing, and mark them as
+ * flushed.
+ */
+ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+ local |= cpumask_empty(&others);
+ if (mm != current->active_mm || !local) {
+ cpumask_clear(&hmask);
+ riscv_cpuid_to_hartid_mask(&others, &hmask);
+ sbi_remote_fence_i(hmask.bits);
+ } else {
+ /*
+ * It's assumed that at least one strongly ordered operation is
+ * performed on this hart between setting a hart's cpumask bit
+ * and scheduling this MM context on that hart. Sending an SBI
+ * remote message will do this, but in the case where no
+ * messages are sent we still need to order this hart's writes
+ * with flush_icache_deferred().
+ */
+ smp_mb();
+ }
+
+ preempt_enable();
+}
+
+#endif /* CONFIG_SMP */
+
void flush_icache_pte(pte_t pte)
{
struct page *page = pte_page(pte);
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
new file mode 100644
index 000000000000..89ceb3cbe218
--- /dev/null
+++ b/arch/riscv/mm/context.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/mm.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * When necessary, performs a deferred icache flush for the given MM context,
+ * on the local CPU. RISC-V has no direct mechanism for instruction cache
+ * shoot downs, so instead we send an IPI that informs the remote harts they
+ * need to flush their local instruction caches. To avoid pathologically slow
+ * behavior in a common case (a bunch of single-hart processes on a many-hart
+ * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
+ * executing a MM context and instead schedule a deferred local instruction
+ * cache flush to be performed before execution resumes on each hart. This
+ * actually performs that local instruction cache flush, which implicitly only
+ * refers to the current hart.
+ */
+static inline void flush_icache_deferred(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+ unsigned int cpu = smp_processor_id();
+ cpumask_t *mask = &mm->context.icache_stale_mask;
+
+ if (cpumask_test_cpu(cpu, mask)) {
+ cpumask_clear_cpu(cpu, mask);
+ /*
+ * Ensure the remote hart's writes are visible to this hart.
+ * This pairs with a barrier in flush_icache_mm.
+ */
+ smp_mb();
+ local_flush_icache_all();
+ }
+
+#endif
+}
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *task)
+{
+ unsigned int cpu;
+
+ if (unlikely(prev == next))
+ return;
+
+ /*
+ * Mark the current MM context as inactive, and the next as
+ * active. This is at least used by the icache flushing
+ * routines in order to determine who should be flushed.
+ */
+ cpu = smp_processor_id();
+
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /*
+ * Use the old spbtr name instead of using the current satp
+ * name to support binutils 2.29 which doesn't know about the
+ * privileged ISA 1.10 yet.
+ */
+ csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
+ local_flush_tlb_all();
+
+ flush_icache_deferred(next);
+}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 88401d5125bc..cec8be9e2d6a 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -229,8 +229,9 @@ vmalloc_fault:
pte_t *pte_k;
int index;
+ /* User mode accesses just cause a SIGSEGV */
if (user_mode(regs))
- goto bad_area;
+ return do_trap(regs, SIGSEGV, code, addr, tsk);
/*
* Synchronize this task's top level page-table
@@ -239,13 +240,9 @@ vmalloc_fault:
* Do _not_ use "tsk->active_mm->pgd" here.
* We might be inside an interrupt in the middle
* of a task switch.
- *
- * Note: Use the old spbtr name instead of using the current
- * satp name to support binutils 2.29 which doesn't know about
- * the privileged ISA 1.10 yet.
*/
index = pgd_index(addr);
- pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
+ pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
diff --git a/arch/riscv/mm/sifive_l2_cache.c b/arch/riscv/mm/sifive_l2_cache.c
new file mode 100644
index 000000000000..4eb64619b3f4
--- /dev/null
+++ b/arch/riscv/mm/sifive_l2_cache.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive L2 cache controller Driver
+ *
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <asm/sifive_l2_cache.h>
+
+#define SIFIVE_L2_DIRECCFIX_LOW 0x100
+#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
+#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
+
+#define SIFIVE_L2_DATECCFIX_LOW 0x140
+#define SIFIVE_L2_DATECCFIX_HIGH 0x144
+#define SIFIVE_L2_DATECCFIX_COUNT 0x148
+
+#define SIFIVE_L2_DATECCFAIL_LOW 0x160
+#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
+#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
+
+#define SIFIVE_L2_CONFIG 0x00
+#define SIFIVE_L2_WAYENABLE 0x08
+#define SIFIVE_L2_ECCINJECTERR 0x40
+
+#define SIFIVE_L2_MAX_ECCINTR 3
+
+static void __iomem *l2_base;
+static int g_irq[SIFIVE_L2_MAX_ECCINTR];
+
+enum {
+ DIR_CORR = 0,
+ DATA_CORR,
+ DATA_UNCORR,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *sifive_test;
+
+static ssize_t l2_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ unsigned int val;
+
+ if (kstrtouint_from_user(data, count, 0, &val))
+ return -EINVAL;
+ if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+ writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
+ else
+ return -EINVAL;
+ return count;
+}
+
+static const struct file_operations l2_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = l2_write
+};
+
+static void setup_sifive_debug(void)
+{
+ sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
+
+ debugfs_create_file("sifive_debug_inject_error", 0200,
+ sifive_test, NULL, &l2_fops);
+}
+#endif
+
+static void l2_config_read(void)
+{
+ u32 regval, val;
+
+ regval = readl(l2_base + SIFIVE_L2_CONFIG);
+ val = regval & 0xFF;
+ pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
+ val = (regval & 0xFF00) >> 8;
+ pr_info("L2CACHE: No. of ways per bank: %d\n", val);
+ val = (regval & 0xFF0000) >> 16;
+ pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
+ val = (regval & 0xFF000000) >> 24;
+ pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
+
+ regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
+ pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
+}
+
+static const struct of_device_id sifive_l2_ids[] = {
+ { .compatible = "sifive,fu540-c000-ccache" },
+ { /* end of table */ },
+};
+
+static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
+
+int register_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
+
+int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
+
+static irqreturn_t l2_int_handler(int irq, void *device)
+{
+ unsigned int regval, add_h, add_l;
+
+ if (irq == g_irq[DIR_CORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
+ pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
+ regval = readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+ "DirECCFix");
+ }
+ if (irq == g_irq[DATA_CORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
+ pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
+ regval = readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+ "DatECCFix");
+ }
+ if (irq == g_irq[DATA_UNCORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
+ pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
+ regval = readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
+ "DatECCFail");
+ }
+
+ return IRQ_HANDLED;
+}
+
+int __init sifive_l2_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int i, rc;
+
+ np = of_find_matching_node(NULL, sifive_l2_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ l2_base = ioremap(res.start, resource_size(&res));
+ if (!l2_base)
+ return -ENOMEM;
+
+ for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) {
+ g_irq[i] = irq_of_parse_and_map(np, i);
+ rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
+ if (rc) {
+ pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
+ return rc;
+ }
+ }
+
+ l2_config_read();
+
+#ifdef CONFIG_DEBUG_FS
+ setup_sifive_debug();
+#endif
+ return 0;
+}
+device_initcall(sifive_l2_init);
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index df1d6a150f30..de8521fc9de5 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -10,6 +10,8 @@
# Copyright (C) 1994 by Linus Torvalds
#
+KBUILD_DEFCONFIG := defconfig
+
LD_BFD := elf64-s390
KBUILD_LDFLAGS := -m elf64_s390
KBUILD_AFLAGS_MODULE += -fPIC
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index c51496bbac19..7cba96e7587b 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -58,7 +58,6 @@ define cmd_section_cmp
touch $@
endef
-OBJCOPYFLAGS_bzImage := --pad-to $$(readelf -s $(obj)/compressed/vmlinux | awk '/\<_end\>/ {print or(strtonum("0x"$$2),4095)+1}')
$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE
$(call if_changed,objcopy)
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index 112b8d9f1e4c..635217eb3d91 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -77,6 +77,8 @@ SECTIONS
_compressed_start = .;
*(.vmlinux.bin.compressed)
_compressed_end = .;
+ FILL(0xff);
+ . = ALIGN(4096);
}
. = ALIGN(256);
.bss : {
diff --git a/arch/s390/defconfig b/arch/s390/configs/defconfig
index c59b922cb6c5..c59b922cb6c5 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/configs/defconfig
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index f316de40e51b..27696755daa9 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -28,6 +28,7 @@
#define CPACF_KMCTR 0xb92d /* MSA4 */
#define CPACF_PRNO 0xb93c /* MSA5 */
#define CPACF_KMA 0xb929 /* MSA8 */
+#define CPACF_KDSA 0xb93a /* MSA9 */
/*
* En/decryption modifier bits
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index c47e22bba87f..bdbc81b5bc91 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -278,6 +278,7 @@ struct kvm_s390_sie_block {
#define ECD_HOSTREGMGMT 0x20000000
#define ECD_MEF 0x08000000
#define ECD_ETOKENF 0x02000000
+#define ECD_ECC 0x00200000
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
@@ -312,6 +313,7 @@ struct kvm_vcpu_stat {
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_invalid;
+ u64 halt_no_poll_steal;
u64 halt_wakeup;
u64 instruction_lctl;
u64 instruction_lctlg;
diff --git a/arch/s390/include/asm/segment.h b/arch/s390/include/asm/segment.h
deleted file mode 100644
index 97a0582b8d0f..000000000000
--- a/arch/s390/include/asm/segment.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_SEGMENT_H
-#define _ASM_SEGMENT_H
-
-#endif
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 16511d97e8dc..47104e5b47fd 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -152,7 +152,10 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 pcc[16]; /* with MSA4 */
__u8 ppno[16]; /* with MSA5 */
__u8 kma[16]; /* with MSA8 */
- __u8 reserved[1808];
+ __u8 kdsa[16]; /* with MSA9 */
+ __u8 sortl[32]; /* with STFLE.150 */
+ __u8 dfltcc[32]; /* with STFLE.151 */
+ __u8 reserved[1728];
};
/* kvm attributes for crypto */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index cd3df5514552..ad71132374f0 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -24,7 +24,6 @@
#include <linux/seccomp.h>
#include <linux/compat.h>
#include <trace/syscall.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 061418f787c3..e822b2964a83 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -430,3 +430,9 @@
425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree sys_open_tree
+429 common move_mount sys_move_mount sys_move_mount
+430 common fsopen sys_fsopen sys_fsopen
+431 common fsconfig sys_fsconfig sys_fsconfig
+432 common fsmount sys_fsmount sys_fsmount
+433 common fspick sys_fspick sys_fspick
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 1816ee48eadd..d3db3d7ed077 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -30,6 +30,7 @@ config KVM
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_INVALID_WAKEUPS
+ select HAVE_KVM_NO_POLL
select SRCU
select KVM_VFIO
---help---
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 1fd706f6206c..9dde4d7d8704 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -14,6 +14,7 @@
#include <linux/kvm_host.h>
#include <linux/hrtimer.h>
#include <linux/mmu_context.h>
+#include <linux/nospec.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
@@ -2307,6 +2308,7 @@ static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
{
if (id >= MAX_S390_IO_ADAPTERS)
return NULL;
+ id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
return kvm->arch.adapters[id];
}
@@ -2320,8 +2322,13 @@ static int register_io_adapter(struct kvm_device *dev,
(void __user *)attr->addr, sizeof(adapter_info)))
return -EFAULT;
- if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
- (dev->kvm->arch.adapters[adapter_info.id] != NULL))
+ if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
+ return -EINVAL;
+
+ adapter_info.id = array_index_nospec(adapter_info.id,
+ MAX_S390_IO_ADAPTERS);
+
+ if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
return -EINVAL;
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 4638303ba6a8..8d6d75db8de6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -75,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
+ { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -177,6 +178,11 @@ static int hpage;
module_param(hpage, int, 0444);
MODULE_PARM_DESC(hpage, "1m huge page backing support");
+/* maximum percentage of steal time for polling. >100 is treated like 100 */
+static u8 halt_poll_max_steal = 10;
+module_param(halt_poll_max_steal, byte, 0644);
+MODULE_PARM_DESC(hpage, "Maximum percentage of steal time to allow polling");
+
/*
* For now we handle at most 16 double words as this is what the s390 base
* kernel handles and stores in the prefix page. If we ever need to go beyond
@@ -321,6 +327,22 @@ static inline int plo_test_bit(unsigned char nr)
return cc == 0;
}
+static inline void __insn32_query(unsigned int opcode, u8 query[32])
+{
+ register unsigned long r0 asm("0") = 0; /* query function */
+ register unsigned long r1 asm("1") = (unsigned long) query;
+
+ asm volatile(
+ /* Parameter regs are ignored */
+ " .insn rrf,%[opc] << 16,2,4,6,0\n"
+ : "=m" (*query)
+ : "d" (r0), "a" (r1), [opc] "i" (opcode)
+ : "cc");
+}
+
+#define INSN_SORTL 0xb938
+#define INSN_DFLTCC 0xb939
+
static void kvm_s390_cpu_feat_init(void)
{
int i;
@@ -368,6 +390,16 @@ static void kvm_s390_cpu_feat_init(void)
__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
kvm_s390_available_subfunc.kma);
+ if (test_facility(155)) /* MSA9 */
+ __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kdsa);
+
+ if (test_facility(150)) /* SORTL */
+ __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
+
+ if (test_facility(151)) /* DFLTCC */
+ __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
+
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
/*
@@ -513,9 +545,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
else if (sclp.has_esca && sclp.has_64bscao)
r = KVM_S390_ESCA_CPU_SLOTS;
break;
- case KVM_CAP_NR_MEMSLOTS:
- r = KVM_USER_MEM_SLOTS;
- break;
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;
break;
@@ -657,6 +686,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
set_kvm_facility(kvm->arch.model.fac_mask, 135);
set_kvm_facility(kvm->arch.model.fac_list, 135);
}
+ if (test_facility(148)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 148);
+ set_kvm_facility(kvm->arch.model.fac_list, 148);
+ }
+ if (test_facility(152)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 152);
+ set_kvm_facility(kvm->arch.model.fac_list, 152);
+ }
r = 0;
} else
r = -EINVAL;
@@ -1323,6 +1360,19 @@ static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
+ VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
+ VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
return 0;
}
@@ -1491,6 +1541,19 @@ static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
+ VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
+ VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
return 0;
}
@@ -1546,6 +1609,19 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
+ VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
+ VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
+ ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
+ ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
+ VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
+ ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
+ ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
return 0;
}
@@ -2817,6 +2893,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu->arch.enabled_gmap = vcpu->arch.gmap;
}
+static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
+{
+ if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
+ test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
+ return true;
+ return false;
+}
+
+static bool kvm_has_pckmo_ecc(struct kvm *kvm)
+{
+ /* At least one ECC subfunction must be present */
+ return kvm_has_pckmo_subfunc(kvm, 32) ||
+ kvm_has_pckmo_subfunc(kvm, 33) ||
+ kvm_has_pckmo_subfunc(kvm, 34) ||
+ kvm_has_pckmo_subfunc(kvm, 40) ||
+ kvm_has_pckmo_subfunc(kvm, 41);
+
+}
+
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
/*
@@ -2829,13 +2924,19 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
vcpu->arch.sie_block->eca &= ~ECA_APIE;
+ vcpu->arch.sie_block->ecd &= ~ECD_ECC;
if (vcpu->kvm->arch.crypto.apie)
vcpu->arch.sie_block->eca |= ECA_APIE;
/* Set up protected key support */
- if (vcpu->kvm->arch.crypto.aes_kw)
+ if (vcpu->kvm->arch.crypto.aes_kw) {
vcpu->arch.sie_block->ecb3 |= ECB3_AES;
+ /* ecc is also wrapped with AES key */
+ if (kvm_has_pckmo_ecc(vcpu->kvm))
+ vcpu->arch.sie_block->ecd |= ECD_ECC;
+ }
+
if (vcpu->kvm->arch.crypto.dea_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
}
@@ -3068,6 +3169,17 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
}
}
+bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+{
+ /* do not poll with more than halt_poll_max_steal percent of steal time */
+ if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
+ halt_poll_max_steal) {
+ vcpu->stat.halt_no_poll_steal++;
+ return true;
+ }
+ return false;
+}
+
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
/* kvm common code refers to this, but never calls it */
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index d62fa148558b..076090f9e666 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -288,7 +288,9 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2;
u8 ecb3_flags;
+ u32 ecd_flags;
int apie_h;
+ int apie_s;
int key_msk = test_kvm_facility(vcpu->kvm, 76);
int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
@@ -297,7 +299,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->crycbd = 0;
apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
- if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
+ apie_s = apie_h & scb_o->eca;
+ if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
return 0;
if (!crycb_addr)
@@ -308,7 +311,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
((crycb_addr + 128) & PAGE_MASK))
return set_validity_icpt(scb_s, 0x003CU);
- if (apie_h && (scb_o->eca & ECA_APIE)) {
+ if (apie_s) {
ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
vcpu->kvm->arch.crypto.crycb,
fmt_o, fmt_h);
@@ -320,7 +323,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* we may only allow it if enabled for guest 2 */
ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
(ECB3_AES | ECB3_DEA);
- if (!ecb3_flags)
+ ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
+ if (!ecb3_flags && !ecd_flags)
goto end;
/* copy only the wrapping keys */
@@ -329,6 +333,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags;
+ scb_s->ecd |= ecd_flags;
/* xor both blocks in one run */
b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
@@ -339,7 +344,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
end:
switch (ret) {
case -EINVAL:
- return set_validity_icpt(scb_s, 0x0020U);
+ return set_validity_icpt(scb_s, 0x0022U);
case -EFAULT:
return set_validity_icpt(scb_s, 0x0035U);
case -EACCES:
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index 01892dcf4029..0c1f257be422 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -28,7 +28,7 @@ static void __init kasan_early_panic(const char *reason)
{
sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
sclp_early_printk(reason);
- disabled_wait(0);
+ disabled_wait();
}
static void * __init kasan_early_alloc_segment(void)
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index fd788e0f2e5b..cead9e0dcffb 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -93,6 +93,9 @@ static struct facility_def facility_defs[] = {
131, /* enhanced-SOP 2 and side-effect */
139, /* multiple epoch facility */
146, /* msa extension 8 */
+ 150, /* enhanced sort */
+ 151, /* deflate conversion */
+ 155, /* msa extension 9 */
-1 /* END */
}
},
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index 480b057556ee..016a727d4357 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -430,3 +430,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index a1dd24307b00..e047480b1605 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -473,3 +473,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/unicore32/configs/unicore32_defconfig b/arch/unicore32/configs/unicore32_defconfig
index aebd01fc28e5..360cc9abcdb0 100644
--- a/arch/unicore32/configs/unicore32_defconfig
+++ b/arch/unicore32/configs/unicore32_defconfig
@@ -119,7 +119,7 @@ CONFIG_I2C_PUV3=y
# Hardware Monitoring support
#CONFIG_SENSORS_LM75=m
# Generic Thermal sysfs driver
-#CONFIG_THERMAL=m
+#CONFIG_THERMAL=y
#CONFIG_THERMAL_HWMON=y
# Multimedia support
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index c93dc6478cb2..5fe2426bb7a5 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -28,7 +28,6 @@ generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
-generic-y += segment.h
generic-y += serial.h
generic-y += shmparam.h
generic-y += syscalls.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 21e9f2fac04b..2bbbd4d1ba31 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -270,9 +270,6 @@ config GENERIC_BUG
config GENERIC_BUG_RELATIVE_POINTERS
bool
-config GENERIC_HWEIGHT
- def_bool y
-
config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 4cd5f982b1e5..ad968b7bac72 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -398,12 +398,6 @@
384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl
385 i386 io_pgetevents sys_io_pgetevents_time32 __ia32_compat_sys_io_pgetevents
386 i386 rseq sys_rseq __ia32_sys_rseq
-387 i386 open_tree sys_open_tree __ia32_sys_open_tree
-388 i386 move_mount sys_move_mount __ia32_sys_move_mount
-389 i386 fsopen sys_fsopen __ia32_sys_fsopen
-390 i386 fsconfig sys_fsconfig __ia32_sys_fsconfig
-391 i386 fsmount sys_fsmount __ia32_sys_fsmount
-392 i386 fspick sys_fspick __ia32_sys_fspick
393 i386 semget sys_semget __ia32_sys_semget
394 i386 semctl sys_semctl __ia32_compat_sys_semctl
395 i386 shmget sys_shmget __ia32_sys_shmget
@@ -438,3 +432,9 @@
425 i386 io_uring_setup sys_io_uring_setup __ia32_sys_io_uring_setup
426 i386 io_uring_enter sys_io_uring_enter __ia32_sys_io_uring_enter
427 i386 io_uring_register sys_io_uring_register __ia32_sys_io_uring_register
+428 i386 open_tree sys_open_tree __ia32_sys_open_tree
+429 i386 move_mount sys_move_mount __ia32_sys_move_mount
+430 i386 fsopen sys_fsopen __ia32_sys_fsopen
+431 i386 fsconfig sys_fsconfig __ia32_sys_fsconfig
+432 i386 fsmount sys_fsmount __ia32_sys_fsmount
+433 i386 fspick sys_fspick __ia32_sys_fspick
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 64ca0d06259a..b4e6f9e6204a 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -343,18 +343,18 @@
332 common statx __x64_sys_statx
333 common io_pgetevents __x64_sys_io_pgetevents
334 common rseq __x64_sys_rseq
-335 common open_tree __x64_sys_open_tree
-336 common move_mount __x64_sys_move_mount
-337 common fsopen __x64_sys_fsopen
-338 common fsconfig __x64_sys_fsconfig
-339 common fsmount __x64_sys_fsmount
-340 common fspick __x64_sys_fspick
# don't use numbers 387 through 423, add new calls after the last
# 'common' entry
424 common pidfd_send_signal __x64_sys_pidfd_send_signal
425 common io_uring_setup __x64_sys_io_uring_setup
426 common io_uring_enter __x64_sys_io_uring_enter
427 common io_uring_register __x64_sys_io_uring_register
+428 common open_tree __x64_sys_open_tree
+429 common move_mount __x64_sys_move_mount
+430 common fsopen __x64_sys_fsopen
+431 common fsconfig __x64_sys_fsconfig
+432 common fsmount __x64_sys_fsmount
+433 common fspick __x64_sys_fspick
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 8e470b018512..3a4d8d4d39f8 100644
--- a/arch/x86/entry/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
@@ -73,14 +73,12 @@ const char *outfilename;
enum {
sym_vvar_start,
sym_vvar_page,
- sym_hpet_page,
sym_pvclock_page,
sym_hvclock_page,
};
const int special_pages[] = {
sym_vvar_page,
- sym_hpet_page,
sym_pvclock_page,
sym_hvclock_page,
};
@@ -93,7 +91,6 @@ struct vdso_sym {
struct vdso_sym required_syms[] = {
[sym_vvar_start] = {"vvar_start", true},
[sym_vvar_page] = {"vvar_page", true},
- [sym_hpet_page] = {"hpet_page", true},
[sym_pvclock_page] = {"pvclock_page", true},
[sym_hvclock_page] = {"hvclock_page", true},
{"VDSO32_NOTE_MASK", true},
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index 7635c23f7d82..58a6993d7eb3 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -393,7 +393,7 @@ static __init int _init_events_attrs(void)
return 0;
}
-const struct attribute_group *amd_iommu_attr_groups[] = {
+static const struct attribute_group *amd_iommu_attr_groups[] = {
&amd_iommu_format_group,
&amd_iommu_cpumask_group,
&amd_iommu_events_group,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ef763f535e3a..546d13e436aa 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2384,7 +2384,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
*/
if (__test_and_clear_bit(55, (unsigned long *)&status)) {
handled++;
- intel_pt_interrupt();
+ if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
+ perf_guest_cbs->handle_intel_pt_intr))
+ perf_guest_cbs->handle_intel_pt_intr();
+ else
+ intel_pt_interrupt();
}
/*
@@ -3265,7 +3269,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
return ret;
if (event->attr.precise_ip) {
- if (!(event->attr.freq || event->attr.wakeup_events)) {
+ if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
~intel_pmu_large_pebs_flags(event)))
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 07fc84bb85c1..a6ac2f4f76fc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -394,10 +394,10 @@ struct cpu_hw_events {
/* Event constraint, but match on all event flags too. */
#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+ EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
- EVENT_CONSTRAINT_RANGE(c, e, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+ EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
/* Check only flags, but allow all event/umask */
#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index fc0693569f7a..ba88edd0d58b 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -12,8 +12,6 @@
#define REG_OUT "a"
#endif
-#define __HAVE_ARCH_SW_HWEIGHT
-
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h
index 62be73b23d5c..e8f58ddd06d9 100644
--- a/arch/x86/include/asm/e820/api.h
+++ b/arch/x86/include/asm/e820/api.h
@@ -10,6 +10,7 @@ extern struct e820_table *e820_table_firmware;
extern unsigned long pci_mem_start;
+extern bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type);
extern bool e820__mapped_any(u64 start, u64 end, enum e820_type type);
extern bool e820__mapped_all(u64 start, u64 end, enum e820_type type);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c79abe7ca093..450d69a1e6fa 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -470,6 +470,7 @@ struct kvm_pmu {
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
+ u64 global_ovf_ctrl_mask;
u64 reserved_bits;
u8 version;
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
@@ -781,6 +782,9 @@ struct kvm_vcpu_arch {
/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
bool l1tf_flush_l1d;
+
+ /* AMD MSRC001_0015 Hardware Configuration */
+ u64 msr_hwcr;
};
struct kvm_lpage_info {
@@ -1168,7 +1172,8 @@ struct kvm_x86_ops {
uint32_t guest_irq, bool set);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
- int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
+ int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired);
void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
void (*setup_mce)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 88dd202c8b00..979ef971cc78 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -789,6 +789,14 @@
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+/* PERF_GLOBAL_OVF_CTL bits */
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT)
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF_BIT 62
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF_BIT)
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD_BIT 63
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD_BIT)
+
/* Geode defined MSRs */
#define MSR_GEODE_BUSCONT_CONF0 0x00001900
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 27566e57e87d..230474e2ddb5 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -19,7 +19,6 @@ struct vdso_image {
long sym_vvar_start; /* Negative offset to the vvar area */
long sym_vvar_page;
- long sym_hpet_page;
long sym_pvclock_page;
long sym_hvclock_page;
long sym_VDSO32_NOTE_MASK;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 2879e234e193..76dd605ee2a3 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -73,12 +73,13 @@ EXPORT_SYMBOL(pci_mem_start);
* This function checks if any part of the range <start,end> is mapped
* with type.
*/
-bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
+static bool _e820__mapped_any(struct e820_table *table,
+ u64 start, u64 end, enum e820_type type)
{
int i;
- for (i = 0; i < e820_table->nr_entries; i++) {
- struct e820_entry *entry = &e820_table->entries[i];
+ for (i = 0; i < table->nr_entries; i++) {
+ struct e820_entry *entry = &table->entries[i];
if (type && entry->type != type)
continue;
@@ -88,6 +89,17 @@ bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
}
return 0;
}
+
+bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type)
+{
+ return _e820__mapped_any(e820_table_firmware, start, end, type);
+}
+EXPORT_SYMBOL_GPL(e820__mapped_raw_any);
+
+bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
+{
+ return _e820__mapped_any(e820_table, start, end, type);
+}
EXPORT_SYMBOL_GPL(e820__mapped_any);
/*
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index cf52ee0d8711..9e4fa2484d10 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -768,7 +768,7 @@ static struct kprobe kretprobe_kprobe = {
/*
* Called from kretprobe_trampoline
*/
-static __used void *trampoline_handler(struct pt_regs *regs)
+__used __visible void *trampoline_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb;
struct kretprobe_instance *ri = NULL;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 7de466eb960b..8b6d03e55d2f 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -58,7 +58,6 @@
#include <asm/alternative.h>
#include <asm/fpu/xstate.h>
#include <asm/trace/mpx.h>
-#include <asm/nospec-branch.h>
#include <asm/mpx.h>
#include <asm/vm86.h>
#include <asm/umip.h>
@@ -368,13 +367,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
regs->ip = (unsigned long)general_protection;
regs->sp = (unsigned long)&gpregs->orig_ax;
- /*
- * This situation can be triggered by userspace via
- * modify_ldt(2) and the return does not take the regular
- * user space exit, so a CPU buffer clear is required when
- * MDS mitigation is enabled.
- */
- mds_user_clear_cpu_buffers();
return;
}
#endif
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index bbbe611f0c49..80a642a0143d 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -963,13 +963,13 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
return 1;
- eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
- ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ eax = kvm_rax_read(vcpu);
+ ecx = kvm_rcx_read(vcpu);
kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
- kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
- kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
- kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
+ kvm_rax_write(vcpu, eax);
+ kvm_rbx_write(vcpu, ebx);
+ kvm_rcx_write(vcpu, ecx);
+ kvm_rdx_write(vcpu, edx);
return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index cc24b3a32c44..8ca4b39918e0 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1535,10 +1535,10 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
longmode = is_64_bit_mode(vcpu);
if (longmode)
- kvm_register_write(vcpu, VCPU_REGS_RAX, result);
+ kvm_rax_write(vcpu, result);
else {
- kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
- kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
+ kvm_rdx_write(vcpu, result >> 32);
+ kvm_rax_write(vcpu, result & 0xffffffff);
}
}
@@ -1611,18 +1611,18 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
longmode = is_64_bit_mode(vcpu);
if (!longmode) {
- param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
- (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
- ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
- (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
- outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
- (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
+ param = ((u64)kvm_rdx_read(vcpu) << 32) |
+ (kvm_rax_read(vcpu) & 0xffffffff);
+ ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
+ (kvm_rcx_read(vcpu) & 0xffffffff);
+ outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
+ (kvm_rsi_read(vcpu) & 0xffffffff);
}
#ifdef CONFIG_X86_64
else {
- param = kvm_register_read(vcpu, VCPU_REGS_RCX);
- ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
- outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
+ param = kvm_rcx_read(vcpu);
+ ingpa = kvm_rdx_read(vcpu);
+ outgpa = kvm_r8_read(vcpu);
}
#endif
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index f8f56a93358b..1cc6c47dc77e 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -9,6 +9,34 @@
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
+#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
+static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
+{ \
+ return vcpu->arch.regs[VCPU_REGS_##uname]; \
+} \
+static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
+ unsigned long val) \
+{ \
+ vcpu->arch.regs[VCPU_REGS_##uname] = val; \
+}
+BUILD_KVM_GPR_ACCESSORS(rax, RAX)
+BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
+BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
+BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
+BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
+BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
+BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
+#ifdef CONFIG_X86_64
+BUILD_KVM_GPR_ACCESSORS(r8, R8)
+BUILD_KVM_GPR_ACCESSORS(r9, R9)
+BUILD_KVM_GPR_ACCESSORS(r10, R10)
+BUILD_KVM_GPR_ACCESSORS(r11, R11)
+BUILD_KVM_GPR_ACCESSORS(r12, R12)
+BUILD_KVM_GPR_ACCESSORS(r13, R13)
+BUILD_KVM_GPR_ACCESSORS(r14, R14)
+BUILD_KVM_GPR_ACCESSORS(r15, R15)
+#endif
+
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
@@ -37,6 +65,16 @@ static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
kvm_register_write(vcpu, VCPU_REGS_RIP, val);
}
+static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
+{
+ return kvm_register_read(vcpu, VCPU_REGS_RSP);
+}
+
+static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ kvm_register_write(vcpu, VCPU_REGS_RSP, val);
+}
+
static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
{
might_sleep(); /* on svm */
@@ -83,8 +121,8 @@ static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
{
- return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
- | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
+ return (kvm_rax_read(vcpu) & -1u)
+ | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
}
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bd13fdddbdc4..4924f83ed4f3 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1454,7 +1454,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
if (swait_active(q))
swake_up_one(q);
- if (apic_lvtt_tscdeadline(apic))
+ if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
ktimer->expired_tscdeadline = ktimer->tscdeadline;
}
@@ -1696,37 +1696,42 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
static bool start_hv_timer(struct kvm_lapic *apic)
{
struct kvm_timer *ktimer = &apic->lapic_timer;
- int r;
+ struct kvm_vcpu *vcpu = apic->vcpu;
+ bool expired;
WARN_ON(preemptible());
if (!kvm_x86_ops->set_hv_timer)
return false;
- if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
- return false;
-
if (!ktimer->tscdeadline)
return false;
- r = kvm_x86_ops->set_hv_timer(apic->vcpu, ktimer->tscdeadline);
- if (r < 0)
+ if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
return false;
ktimer->hv_timer_in_use = true;
hrtimer_cancel(&ktimer->timer);
/*
- * Also recheck ktimer->pending, in case the sw timer triggered in
- * the window. For periodic timer, leave the hv timer running for
- * simplicity, and the deadline will be recomputed on the next vmexit.
+ * To simplify handling the periodic timer, leave the hv timer running
+ * even if the deadline timer has expired, i.e. rely on the resulting
+ * VM-Exit to recompute the periodic timer's target expiration.
*/
- if (!apic_lvtt_period(apic) && (r || atomic_read(&ktimer->pending))) {
- if (r)
+ if (!apic_lvtt_period(apic)) {
+ /*
+ * Cancel the hv timer if the sw timer fired while the hv timer
+ * was being programmed, or if the hv timer itself expired.
+ */
+ if (atomic_read(&ktimer->pending)) {
+ cancel_hv_timer(apic);
+ } else if (expired) {
apic_timer_expired(apic);
- return false;
+ cancel_hv_timer(apic);
+ }
}
- trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true);
+ trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
+
return true;
}
@@ -1750,8 +1755,13 @@ static void start_sw_timer(struct kvm_lapic *apic)
static void restart_apic_timer(struct kvm_lapic *apic)
{
preempt_disable();
+
+ if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
+ goto out;
+
if (!start_hv_timer(apic))
start_sw_timer(apic);
+out:
preempt_enable();
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d9c7b45d231f..1e9ba81accba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -44,6 +44,7 @@
#include <asm/page.h>
#include <asm/pat.h>
#include <asm/cmpxchg.h>
+#include <asm/e820/api.h>
#include <asm/io.h>
#include <asm/vmx.h>
#include <asm/kvm_page_track.h>
@@ -487,16 +488,24 @@ static void kvm_mmu_reset_all_pte_masks(void)
* If the CPU has 46 or less physical address bits, then set an
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
+ *
+ * Some Intel CPUs address the L1 cache using more PA bits than are
+ * reported by CPUID. Use the PA width of the L1 cache when possible
+ * to achieve more effective mitigation, e.g. if system RAM overlaps
+ * the most significant bits of legal physical address space.
*/
- low_phys_bits = boot_cpu_data.x86_phys_bits;
- if (boot_cpu_data.x86_phys_bits <
+ shadow_nonpresent_or_rsvd_mask = 0;
+ low_phys_bits = boot_cpu_data.x86_cache_bits;
+ if (boot_cpu_data.x86_cache_bits <
52 - shadow_nonpresent_or_rsvd_mask_len) {
shadow_nonpresent_or_rsvd_mask =
- rsvd_bits(boot_cpu_data.x86_phys_bits -
+ rsvd_bits(boot_cpu_data.x86_cache_bits -
shadow_nonpresent_or_rsvd_mask_len,
- boot_cpu_data.x86_phys_bits - 1);
+ boot_cpu_data.x86_cache_bits - 1);
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
- }
+ } else
+ WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
+
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
}
@@ -2892,7 +2901,9 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
*/
(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
- return true;
+ return !e820__mapped_raw_any(pfn_to_hpa(pfn),
+ pfn_to_hpa(pfn + 1) - 1,
+ E820_TYPE_RAM);
}
/* Bits which may be returned by set_spte() */
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e9ea2d45ae66..9f72cc427158 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -48,11 +48,6 @@ static bool msr_mtrr_valid(unsigned msr)
return false;
}
-static bool valid_pat_type(unsigned t)
-{
- return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
-}
-
static bool valid_mtrr_type(unsigned t)
{
return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
@@ -67,10 +62,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return false;
if (msr == MSR_IA32_CR_PAT) {
- for (i = 0; i < 8; i++)
- if (!valid_pat_type((data >> (i * 8)) & 0xff))
- return false;
- return true;
+ return kvm_pat_valid(data);
} else if (msr == MSR_MTRRdefType) {
if (data & ~0xcff)
return false;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 08715034e315..367a47df4ba0 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -141,15 +141,35 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
struct page *page;
npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
- /* Check if the user is doing something meaningless. */
- if (unlikely(npages != 1))
- return -EFAULT;
-
- table = kmap_atomic(page);
- ret = CMPXCHG(&table[index], orig_pte, new_pte);
- kunmap_atomic(table);
-
- kvm_release_page_dirty(page);
+ if (likely(npages == 1)) {
+ table = kmap_atomic(page);
+ ret = CMPXCHG(&table[index], orig_pte, new_pte);
+ kunmap_atomic(table);
+
+ kvm_release_page_dirty(page);
+ } else {
+ struct vm_area_struct *vma;
+ unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK;
+ unsigned long pfn;
+ unsigned long paddr;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
+ if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+ pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ paddr = pfn << PAGE_SHIFT;
+ table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
+ if (!table) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+ ret = CMPXCHG(&table[index], orig_pte, new_pte);
+ memunmap(table);
+ up_read(&current->mm->mmap_sem);
+ }
return (ret != orig_pte);
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6b92eaf4a3b1..a849dcb7fbc5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2091,7 +2091,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
init_vmcb(svm);
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
- kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
+ kvm_rdx_write(vcpu, eax);
if (kvm_vcpu_apicv_active(vcpu) && !init_event)
avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
@@ -3071,32 +3071,6 @@ static inline bool nested_svm_nmi(struct vcpu_svm *svm)
return false;
}
-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
-{
- struct page *page;
-
- might_sleep();
-
- page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
- if (is_error_page(page))
- goto error;
-
- *_page = page;
-
- return kmap(page);
-
-error:
- kvm_inject_gp(&svm->vcpu, 0);
-
- return NULL;
-}
-
-static void nested_svm_unmap(struct page *page)
-{
- kunmap(page);
- kvm_release_page_dirty(page);
-}
-
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{
unsigned port, size, iopm_len;
@@ -3299,10 +3273,11 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
static int nested_svm_vmexit(struct vcpu_svm *svm)
{
+ int rc;
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
- struct page *page;
+ struct kvm_host_map map;
trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
vmcb->control.exit_info_1,
@@ -3311,9 +3286,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb->control.exit_int_info_err,
KVM_ISA_SVM);
- nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
- if (!nested_vmcb)
+ rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(svm->nested.vmcb), &map);
+ if (rc) {
+ if (rc == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
return 1;
+ }
+
+ nested_vmcb = map.hva;
/* Exit Guest-Mode */
leave_guest_mode(&svm->vcpu);
@@ -3408,16 +3388,16 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
} else {
(void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
}
- kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
+ kvm_rax_write(&svm->vcpu, hsave->save.rax);
+ kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
+ kvm_rip_write(&svm->vcpu, hsave->save.rip);
svm->vmcb->save.dr7 = 0;
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;
mark_all_dirty(svm->vmcb);
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
nested_svm_uninit_mmu_context(&svm->vcpu);
kvm_mmu_reset_context(&svm->vcpu);
@@ -3483,7 +3463,7 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
}
static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
- struct vmcb *nested_vmcb, struct page *page)
+ struct vmcb *nested_vmcb, struct kvm_host_map *map)
{
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
@@ -3516,9 +3496,9 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
kvm_mmu_reset_context(&svm->vcpu);
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
- kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
+ kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
+ kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
+ kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
/* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax;
@@ -3567,7 +3547,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vmcb->control.pause_filter_thresh =
nested_vmcb->control.pause_filter_thresh;
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, map, true);
/* Enter Guest-Mode */
enter_guest_mode(&svm->vcpu);
@@ -3587,17 +3567,23 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
static bool nested_svm_vmrun(struct vcpu_svm *svm)
{
+ int rc;
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
- struct page *page;
+ struct kvm_host_map map;
u64 vmcb_gpa;
vmcb_gpa = svm->vmcb->save.rax;
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
- if (!nested_vmcb)
+ rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(vmcb_gpa), &map);
+ if (rc) {
+ if (rc == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
return false;
+ }
+
+ nested_vmcb = map.hva;
if (!nested_vmcb_checks(nested_vmcb)) {
nested_vmcb->control.exit_code = SVM_EXIT_ERR;
@@ -3605,7 +3591,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
nested_vmcb->control.exit_info_1 = 0;
nested_vmcb->control.exit_info_2 = 0;
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
return false;
}
@@ -3649,7 +3635,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
copy_vmcb_control_area(hsave, vmcb);
- enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
+ enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
return true;
}
@@ -3673,21 +3659,26 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
static int vmload_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
- struct page *page;
+ struct kvm_host_map map;
int ret;
if (nested_svm_check_permissions(svm))
return 1;
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
- if (!nested_vmcb)
+ ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
+ if (ret) {
+ if (ret == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
return 1;
+ }
+
+ nested_vmcb = map.hva;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
return ret;
}
@@ -3695,21 +3686,26 @@ static int vmload_interception(struct vcpu_svm *svm)
static int vmsave_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
- struct page *page;
+ struct kvm_host_map map;
int ret;
if (nested_svm_check_permissions(svm))
return 1;
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
- if (!nested_vmcb)
+ ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
+ if (ret) {
+ if (ret == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
return 1;
+ }
+
+ nested_vmcb = map.hva;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
return ret;
}
@@ -3791,11 +3787,11 @@ static int invlpga_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
- kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
+ trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
+ kvm_rax_read(&svm->vcpu));
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
- kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
+ kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
return kvm_skip_emulated_instruction(&svm->vcpu);
@@ -3803,7 +3799,7 @@ static int invlpga_interception(struct vcpu_svm *svm)
static int skinit_interception(struct vcpu_svm *svm)
{
- trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
+ trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
@@ -3817,7 +3813,7 @@ static int wbinvd_interception(struct vcpu_svm *svm)
static int xsetbv_interception(struct vcpu_svm *svm)
{
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
- u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+ u32 index = kvm_rcx_read(&svm->vcpu);
if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
@@ -4213,7 +4209,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int rdmsr_interception(struct vcpu_svm *svm)
{
- u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+ u32 ecx = kvm_rcx_read(&svm->vcpu);
struct msr_data msr_info;
msr_info.index = ecx;
@@ -4225,10 +4221,8 @@ static int rdmsr_interception(struct vcpu_svm *svm)
} else {
trace_kvm_msr_read(ecx, msr_info.data);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
- msr_info.data & 0xffffffff);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
- msr_info.data >> 32);
+ kvm_rax_write(&svm->vcpu, msr_info.data & 0xffffffff);
+ kvm_rdx_write(&svm->vcpu, msr_info.data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
return kvm_skip_emulated_instruction(&svm->vcpu);
}
@@ -4422,7 +4416,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
static int wrmsr_interception(struct vcpu_svm *svm)
{
struct msr_data msr;
- u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+ u32 ecx = kvm_rcx_read(&svm->vcpu);
u64 data = kvm_read_edx_eax(&svm->vcpu);
msr.data = data;
@@ -6236,7 +6230,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *nested_vmcb;
- struct page *page;
+ struct kvm_host_map map;
u64 guest;
u64 vmcb;
@@ -6244,10 +6238,10 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
if (guest) {
- nested_vmcb = nested_svm_map(svm, vmcb, &page);
- if (!nested_vmcb)
+ if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
return 1;
- enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
+ nested_vmcb = map.hva;
+ enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
}
return 0;
}
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 854e144131c6..d6664ee3d127 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -2,6 +2,8 @@
#ifndef __KVM_X86_VMX_CAPS_H
#define __KVM_X86_VMX_CAPS_H
+#include <asm/vmx.h>
+
#include "lapic.h"
extern bool __read_mostly enable_vpid;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 0c601d079cd2..f1a69117ac0f 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -193,10 +193,8 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
if (!vmx->nested.hv_evmcs)
return;
- kunmap(vmx->nested.hv_evmcs_page);
- kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
+ kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
vmx->nested.hv_evmcs_vmptr = -1ull;
- vmx->nested.hv_evmcs_page = NULL;
vmx->nested.hv_evmcs = NULL;
}
@@ -229,16 +227,9 @@ static void free_nested(struct kvm_vcpu *vcpu)
kvm_release_page_dirty(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
- if (vmx->nested.virtual_apic_page) {
- kvm_release_page_dirty(vmx->nested.virtual_apic_page);
- vmx->nested.virtual_apic_page = NULL;
- }
- if (vmx->nested.pi_desc_page) {
- kunmap(vmx->nested.pi_desc_page);
- kvm_release_page_dirty(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc_page = NULL;
- vmx->nested.pi_desc = NULL;
- }
+ kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
+ kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
+ vmx->nested.pi_desc = NULL;
kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
@@ -519,39 +510,19 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
int msr;
- struct page *page;
unsigned long *msr_bitmap_l1;
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
- /*
- * pred_cmd & spec_ctrl are trying to verify two things:
- *
- * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
- * ensures that we do not accidentally generate an L02 MSR bitmap
- * from the L12 MSR bitmap that is too permissive.
- * 2. That L1 or L2s have actually used the MSR. This avoids
- * unnecessarily merging of the bitmap if the MSR is unused. This
- * works properly because we only update the L01 MSR bitmap lazily.
- * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
- * updated to reflect this when L1 (or its L2s) actually write to
- * the MSR.
- */
- bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
- bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
+ struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
/* Nothing to do if the MSR bitmap is not in use. */
if (!cpu_has_vmx_msr_bitmap() ||
!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return false;
- if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
- !pred_cmd && !spec_ctrl)
- return false;
-
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
- if (is_error_page(page))
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
return false;
- msr_bitmap_l1 = (unsigned long *)kmap(page);
+ msr_bitmap_l1 = (unsigned long *)map->hva;
/*
* To keep the control flow simple, pay eight 8-byte writes (sixteen
@@ -592,20 +563,42 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
}
}
- if (spec_ctrl)
+ /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
+ nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ MSR_FS_BASE, MSR_TYPE_RW);
+
+ nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ MSR_GS_BASE, MSR_TYPE_RW);
+
+ nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+
+ /*
+ * Checking the L0->L1 bitmap is trying to verify two things:
+ *
+ * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
+ * ensures that we do not accidentally generate an L02 MSR bitmap
+ * from the L12 MSR bitmap that is too permissive.
+ * 2. That L1 or L2s have actually used the MSR. This avoids
+ * unnecessarily merging of the bitmap if the MSR is unused. This
+ * works properly because we only update the L01 MSR bitmap lazily.
+ * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
+ * updated to reflect this when L1 (or its L2s) actually write to
+ * the MSR.
+ */
+ if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
MSR_IA32_SPEC_CTRL,
MSR_TYPE_R | MSR_TYPE_W);
- if (pred_cmd)
+ if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
MSR_IA32_PRED_CMD,
MSR_TYPE_W);
- kunmap(page);
- kvm_release_page_clean(page);
+ kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
return true;
}
@@ -613,20 +606,20 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
+ struct kvm_host_map map;
struct vmcs12 *shadow;
- struct page *page;
if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
vmcs12->vmcs_link_pointer == -1ull)
return;
shadow = get_shadow_vmcs12(vcpu);
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
- memcpy(shadow, kmap(page), VMCS12_SIZE);
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
+ return;
- kunmap(page);
- kvm_release_page_clean(page);
+ memcpy(shadow, map.hva, VMCS12_SIZE);
+ kvm_vcpu_unmap(vcpu, &map, false);
}
static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
@@ -930,7 +923,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
if (!nested_cr3_valid(vcpu, cr3)) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
- return 1;
+ return -EINVAL;
}
/*
@@ -941,7 +934,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
!nested_ept) {
if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
*entry_failure_code = ENTRY_FAIL_PDPTE;
- return 1;
+ return -EINVAL;
}
}
}
@@ -1794,13 +1787,11 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
nested_release_evmcs(vcpu);
- vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
- vcpu, assist_page.current_nested_vmcs);
-
- if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(assist_page.current_nested_vmcs),
+ &vmx->nested.hv_evmcs_map))
return 0;
- vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
+ vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
/*
* Currently, KVM only supports eVMCS version 1
@@ -2373,19 +2364,19 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
*/
if (vmx->emulation_required) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
- return 1;
+ return -EINVAL;
}
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code))
- return 1;
+ return -EINVAL;
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
- kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
- kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+ kvm_rsp_write(vcpu, vmcs12->guest_rsp);
+ kvm_rip_write(vcpu, vmcs12->guest_rip);
return 0;
}
@@ -2589,11 +2580,19 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
return 0;
}
-/*
- * Checks related to Host Control Registers and MSRs
- */
-static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
+static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
+ nested_check_vm_exit_controls(vcpu, vmcs12) ||
+ nested_check_vm_entry_controls(vcpu, vmcs12))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
{
bool ia32e;
@@ -2606,6 +2605,10 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
return -EINVAL;
+ if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
+ !kvm_pat_valid(vmcs12->host_ia32_pat))
+ return -EINVAL;
+
/*
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
* IA32_EFER MSR must be 0 in the field for that register. In addition,
@@ -2624,41 +2627,12 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
return 0;
}
-/*
- * Checks related to Guest Non-register State
- */
-static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
-{
- if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
- vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
- return -EINVAL;
-
- return 0;
-}
-
-static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
- nested_check_vm_exit_controls(vcpu, vmcs12) ||
- nested_check_vm_entry_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_check_host_control_regs(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
-
- if (nested_check_guest_non_reg_state(vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- return 0;
-}
-
static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- int r;
- struct page *page;
+ int r = 0;
struct vmcs12 *shadow;
+ struct kvm_host_map map;
if (vmcs12->vmcs_link_pointer == -1ull)
return 0;
@@ -2666,23 +2640,34 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
return -EINVAL;
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
- if (is_error_page(page))
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
return -EINVAL;
- r = 0;
- shadow = kmap(page);
+ shadow = map.hva;
+
if (shadow->hdr.revision_id != VMCS12_REVISION ||
shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
r = -EINVAL;
- kunmap(page);
- kvm_release_page_clean(page);
+
+ kvm_vcpu_unmap(vcpu, &map, false);
return r;
}
-static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12,
- u32 *exit_qual)
+/*
+ * Checks related to Guest Non-register State
+ */
+static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
+{
+ if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
+ vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12,
+ u32 *exit_qual)
{
bool ia32e;
@@ -2690,11 +2675,15 @@ static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
- return 1;
+ return -EINVAL;
+
+ if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
+ !kvm_pat_valid(vmcs12->guest_ia32_pat))
+ return -EINVAL;
if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
*exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
- return 1;
+ return -EINVAL;
}
/*
@@ -2713,13 +2702,16 @@ static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
((vmcs12->guest_cr0 & X86_CR0_PG) &&
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
- return 1;
+ return -EINVAL;
}
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
- (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
- (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
- return 1;
+ (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
+ (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
+ return -EINVAL;
+
+ if (nested_check_guest_non_reg_state(vmcs12))
+ return -EINVAL;
return 0;
}
@@ -2832,6 +2824,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_host_map *map;
struct page *page;
u64 hpa;
@@ -2864,20 +2857,14 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
}
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
- if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
- kvm_release_page_dirty(vmx->nested.virtual_apic_page);
- vmx->nested.virtual_apic_page = NULL;
- }
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
+ map = &vmx->nested.virtual_apic_map;
/*
* If translation failed, VM entry will fail because
* prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
*/
- if (!is_error_page(page)) {
- vmx->nested.virtual_apic_page = page;
- hpa = page_to_phys(vmx->nested.virtual_apic_page);
- vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
+ if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
} else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
!nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
@@ -2898,26 +2885,15 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
}
if (nested_cpu_has_posted_intr(vmcs12)) {
- if (vmx->nested.pi_desc_page) { /* shouldn't happen */
- kunmap(vmx->nested.pi_desc_page);
- kvm_release_page_dirty(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc_page = NULL;
- vmx->nested.pi_desc = NULL;
- vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
+ map = &vmx->nested.pi_desc_map;
+
+ if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
+ vmx->nested.pi_desc =
+ (struct pi_desc *)(((void *)map->hva) +
+ offset_in_page(vmcs12->posted_intr_desc_addr));
+ vmcs_write64(POSTED_INTR_DESC_ADDR,
+ pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
}
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
- if (is_error_page(page))
- return;
- vmx->nested.pi_desc_page = page;
- vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc =
- (struct pi_desc *)((void *)vmx->nested.pi_desc +
- (unsigned long)(vmcs12->posted_intr_desc_addr &
- (PAGE_SIZE - 1)));
- vmcs_write64(POSTED_INTR_DESC_ADDR,
- page_to_phys(vmx->nested.pi_desc_page) +
- (unsigned long)(vmcs12->posted_intr_desc_addr &
- (PAGE_SIZE - 1)));
}
if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
@@ -3000,7 +2976,7 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
return -1;
}
- if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+ if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
goto vmentry_fail_vmexit;
}
@@ -3145,9 +3121,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
- ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12);
- if (ret)
- return nested_vmx_failValid(vcpu, ret);
+ if (nested_vmx_check_controls(vcpu, vmcs12))
+ return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
+ if (nested_vmx_check_host_state(vcpu, vmcs12))
+ return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
/*
* We're finally done with prerequisite checking, and can start with
@@ -3310,11 +3288,12 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
if (max_irr != 256) {
- vapic_page = kmap(vmx->nested.virtual_apic_page);
+ vapic_page = vmx->nested.virtual_apic_map.hva;
+ if (!vapic_page)
+ return;
+
__kvm_apic_update_irr(vmx->nested.pi_desc->pir,
vapic_page, &max_irr);
- kunmap(vmx->nested.virtual_apic_page);
-
status = vmcs_read16(GUEST_INTR_STATUS);
if ((u8)max_irr > ((u8)status & 0xff)) {
status &= ~0xff;
@@ -3425,8 +3404,8 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
- vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
- vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
+ vmcs12->guest_rsp = kvm_rsp_read(vcpu);
+ vmcs12->guest_rip = kvm_rip_read(vcpu);
vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
@@ -3609,8 +3588,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
vmx_set_efer(vcpu, vcpu->arch.efer);
- kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
- kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
+ kvm_rsp_write(vcpu, vmcs12->host_rsp);
+ kvm_rip_write(vcpu, vmcs12->host_rip);
vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
vmx_set_interrupt_shadow(vcpu, 0);
@@ -3955,16 +3934,9 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
kvm_release_page_dirty(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
- if (vmx->nested.virtual_apic_page) {
- kvm_release_page_dirty(vmx->nested.virtual_apic_page);
- vmx->nested.virtual_apic_page = NULL;
- }
- if (vmx->nested.pi_desc_page) {
- kunmap(vmx->nested.pi_desc_page);
- kvm_release_page_dirty(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc_page = NULL;
- vmx->nested.pi_desc = NULL;
- }
+ kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
+ kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
+ vmx->nested.pi_desc = NULL;
/*
* We are now running in L2, mmu_notifier will force to reload the
@@ -4260,7 +4232,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
{
int ret;
gpa_t vmptr;
- struct page *page;
+ uint32_t revision;
struct vcpu_vmx *vmx = to_vmx(vcpu);
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -4306,20 +4278,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
* which replaces physical address width with 32
*/
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
- return nested_vmx_failInvalid(vcpu);
-
- page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page))
+ if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failInvalid(vcpu);
- if (*(u32 *)kmap(page) != VMCS12_REVISION) {
- kunmap(page);
- kvm_release_page_clean(page);
+ if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
+ revision != VMCS12_REVISION)
return nested_vmx_failInvalid(vcpu);
- }
- kunmap(page);
- kvm_release_page_clean(page);
vmx->nested.vmxon_ptr = vmptr;
ret = enter_vmx_operation(vcpu);
@@ -4377,7 +4341,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_INVALID_ADDRESS);
@@ -4385,7 +4349,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
return nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
- if (vmx->nested.hv_evmcs_page) {
+ if (vmx->nested.hv_evmcs_map.hva) {
if (vmptr == vmx->nested.hv_evmcs_vmptr)
nested_release_evmcs(vcpu);
} else {
@@ -4584,7 +4548,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INVALID_ADDRESS);
@@ -4597,11 +4561,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return 1;
if (vmx->nested.current_vmptr != vmptr) {
+ struct kvm_host_map map;
struct vmcs12 *new_vmcs12;
- struct page *page;
- page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page)) {
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
/*
* Reads from an unbacked page return all 1s,
* which means that the 32 bits located at the
@@ -4611,12 +4574,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
}
- new_vmcs12 = kmap(page);
+
+ new_vmcs12 = map.hva;
+
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
(new_vmcs12->hdr.shadow_vmcs &&
!nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
- kunmap(page);
- kvm_release_page_clean(page);
+ kvm_vcpu_unmap(vcpu, &map, false);
return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
}
@@ -4628,8 +4592,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
* cached.
*/
memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
- kunmap(page);
- kvm_release_page_clean(page);
+ kvm_vcpu_unmap(vcpu, &map, false);
set_current_vmptr(vmx, vmptr);
}
@@ -4804,7 +4767,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
+ u32 index = kvm_rcx_read(vcpu);
u64 address;
bool accessed_dirty;
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -4850,7 +4813,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12;
- u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
+ u32 function = kvm_rax_read(vcpu);
/*
* VMFUNC is only supported for nested guests, but we always enable the
@@ -4936,7 +4899,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12, u32 exit_reason)
{
- u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
+ u32 msr_index = kvm_rcx_read(vcpu);
gpa_t bitmap;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
@@ -5373,9 +5336,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->format != 0)
return -EINVAL;
- if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
- nested_enable_evmcs(vcpu, NULL);
-
if (!nested_vmx_allowed(vcpu))
return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
@@ -5417,6 +5377,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->vmx.vmxon_pa == -1ull)
return 0;
+ if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
+ nested_enable_evmcs(vcpu, NULL);
+
vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
ret = enter_vmx_operation(vcpu);
if (ret)
@@ -5460,9 +5423,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
return 0;
- vmx->nested.nested_run_pending =
- !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
-
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) {
struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
@@ -5480,14 +5440,20 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
}
- if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) ||
- nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+ if (nested_vmx_check_controls(vcpu, vmcs12) ||
+ nested_vmx_check_host_state(vcpu, vmcs12) ||
+ nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
return -EINVAL;
vmx->nested.dirty_vmcs12 = true;
+ vmx->nested.nested_run_pending =
+ !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
ret = nested_vmx_enter_non_root_mode(vcpu, false);
- if (ret)
+ if (ret) {
+ vmx->nested.nested_run_pending = 0;
return -EINVAL;
+ }
return 0;
}
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 5ab4a364348e..f8502c376b37 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -227,7 +227,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
+ if (!(data & pmu->global_ovf_ctrl_mask)) {
if (!msr_info->host_initiated)
pmu->global_status &= ~data;
pmu->global_ovf_ctrl = data;
@@ -297,6 +297,12 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
+ pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
+ & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
+ MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
+ if (kvm_x86_ops->pt_supported())
+ pmu->global_ovf_ctrl_mask &=
+ ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
if (entry &&
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index e1fa935a545f..1ac167614032 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1692,6 +1692,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_SYSENTER_ESP:
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
+ case MSR_IA32_POWER_CTL:
+ msr_info->data = vmx->msr_ia32_power_ctl;
+ break;
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated &&
@@ -1822,6 +1825,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_SYSENTER_ESP:
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
+ case MSR_IA32_POWER_CTL:
+ vmx->msr_ia32_power_ctl = data;
+ break;
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated &&
@@ -1891,7 +1897,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
- if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+ if (!kvm_pat_valid(data))
return 1;
vmcs_write64(GUEST_IA32_PAT, data);
vcpu->arch.pat = data;
@@ -2288,7 +2294,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
#endif
opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
- VM_EXIT_SAVE_IA32_PAT |
VM_EXIT_LOAD_IA32_PAT |
VM_EXIT_LOAD_IA32_EFER |
VM_EXIT_CLEAR_BNDCFGS |
@@ -3619,14 +3624,13 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
- WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
+ WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
return false;
rvi = vmx_get_rvi();
- vapic_page = kmap(vmx->nested.virtual_apic_page);
+ vapic_page = vmx->nested.virtual_apic_map.hva;
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
- kunmap(vmx->nested.virtual_apic_page);
return ((rvi & 0xf0) > (vppr & 0xf0));
}
@@ -4827,7 +4831,7 @@ static int handle_cpuid(struct kvm_vcpu *vcpu)
static int handle_rdmsr(struct kvm_vcpu *vcpu)
{
- u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
+ u32 ecx = kvm_rcx_read(vcpu);
struct msr_data msr_info;
msr_info.index = ecx;
@@ -4840,18 +4844,16 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
trace_kvm_msr_read(ecx, msr_info.data);
- /* FIXME: handling of bits 32:63 of rax, rdx */
- vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
- vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
+ kvm_rax_write(vcpu, msr_info.data & -1u);
+ kvm_rdx_write(vcpu, (msr_info.data >> 32) & -1u);
return kvm_skip_emulated_instruction(vcpu);
}
static int handle_wrmsr(struct kvm_vcpu *vcpu)
{
struct msr_data msr;
- u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
- u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
- | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+ u32 ecx = kvm_rcx_read(vcpu);
+ u64 data = kvm_read_edx_eax(vcpu);
msr.data = data;
msr.index = ecx;
@@ -4922,7 +4924,7 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu)
static int handle_xsetbv(struct kvm_vcpu *vcpu)
{
u64 new_bv = kvm_read_edx_eax(vcpu);
- u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ u32 index = kvm_rcx_read(vcpu);
if (kvm_set_xcr(vcpu, index, new_bv) == 0)
return kvm_skip_emulated_instruction(vcpu);
@@ -5723,8 +5725,16 @@ void dump_vmcs(void)
if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
pr_err("TSC Multiplier = 0x%016llx\n",
vmcs_read64(TSC_MULTIPLIER));
- if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
- pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
+ if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
+ if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
+ u16 status = vmcs_read16(GUEST_INTR_STATUS);
+ pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
+ }
+ pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
+ if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
+ pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR));
+ pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
+ }
if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
@@ -6856,30 +6866,6 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
}
}
-static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *entry;
- union cpuid10_eax eax;
-
- entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
- if (!entry)
- return false;
-
- eax.full = entry->eax;
- return (eax.split.version_id > 0);
-}
-
-static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- bool pmu_enabled = guest_cpuid_has_pmu(vcpu);
-
- if (pmu_enabled)
- vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING;
- else
- vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING;
-}
-
static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6968,7 +6954,6 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
if (nested_vmx_allowed(vcpu)) {
nested_vmx_cr_fixed1_bits_update(vcpu);
nested_vmx_entry_exit_ctls_update(vcpu);
- nested_vmx_procbased_ctls_update(vcpu);
}
if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
@@ -7028,7 +7013,8 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift,
return 0;
}
-static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
+static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired)
{
struct vcpu_vmx *vmx;
u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
@@ -7051,10 +7037,9 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
/* Convert to host delta tsc if tsc scaling is enabled */
if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
- u64_shl_div_u64(delta_tsc,
+ delta_tsc && u64_shl_div_u64(delta_tsc,
kvm_tsc_scaling_ratio_frac_bits,
- vcpu->arch.tsc_scaling_ratio,
- &delta_tsc))
+ vcpu->arch.tsc_scaling_ratio, &delta_tsc))
return -ERANGE;
/*
@@ -7067,7 +7052,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
return -ERANGE;
vmx->hv_deadline_tsc = tscl + delta_tsc;
- return delta_tsc == 0;
+ *expired = !delta_tsc;
+ return 0;
}
static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
@@ -7104,9 +7090,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- gpa_t gpa;
- struct page *page = NULL;
- u64 *pml_address;
+ gpa_t gpa, dst;
if (is_guest_mode(vcpu)) {
WARN_ON_ONCE(vmx->nested.pml_full);
@@ -7126,15 +7110,13 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
}
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+ dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
- if (is_error_page(page))
+ if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
+ offset_in_page(dst), sizeof(gpa)))
return 0;
- pml_address = kmap(page);
- pml_address[vmcs12->guest_pml_index--] = gpa;
- kunmap(page);
- kvm_release_page_clean(page);
+ vmcs12->guest_pml_index--;
}
return 0;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index f879529906b4..63d37ccce3dc 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -142,8 +142,11 @@ struct nested_vmx {
* pointers, so we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
- struct page *virtual_apic_page;
- struct page *pi_desc_page;
+ struct kvm_host_map virtual_apic_map;
+ struct kvm_host_map pi_desc_map;
+
+ struct kvm_host_map msr_bitmap_map;
+
struct pi_desc *pi_desc;
bool pi_pending;
u16 posted_intr_nv;
@@ -169,7 +172,7 @@ struct nested_vmx {
} smm;
gpa_t hv_evmcs_vmptr;
- struct page *hv_evmcs_page;
+ struct kvm_host_map hv_evmcs_map;
struct hv_enlightened_vmcs *hv_evmcs;
};
@@ -257,6 +260,8 @@ struct vcpu_vmx {
unsigned long host_debugctlmsr;
+ u64 msr_ia32_power_ctl;
+
/*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b9591abde62a..536b78c4af6e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1100,15 +1100,15 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
bool kvm_rdpmc(struct kvm_vcpu *vcpu)
{
- u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ u32 ecx = kvm_rcx_read(vcpu);
u64 data;
int err;
err = kvm_pmu_rdpmc(vcpu, ecx, &data);
if (err)
return err;
- kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
- kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
+ kvm_rax_write(vcpu, (u32)data);
+ kvm_rdx_write(vcpu, data >> 32);
return err;
}
EXPORT_SYMBOL_GPL(kvm_rdpmc);
@@ -1174,6 +1174,9 @@ static u32 emulated_msrs[] = {
MSR_PLATFORM_INFO,
MSR_MISC_FEATURES_ENABLES,
MSR_AMD64_VIRT_SPEC_CTRL,
+ MSR_IA32_POWER_CTL,
+
+ MSR_K7_HWCR,
};
static unsigned num_emulated_msrs;
@@ -1262,31 +1265,49 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
return 0;
}
-bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (efer & efer_reserved_bits)
- return false;
-
if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
- return false;
+ return false;
if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
- return false;
+ return false;
+
+ if (efer & (EFER_LME | EFER_LMA) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ return false;
+
+ if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
+ return false;
return true;
+
+}
+bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+ if (efer & efer_reserved_bits)
+ return false;
+
+ return __kvm_valid_efer(vcpu, efer);
}
EXPORT_SYMBOL_GPL(kvm_valid_efer);
-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u64 old_efer = vcpu->arch.efer;
+ u64 efer = msr_info->data;
- if (!kvm_valid_efer(vcpu, efer))
- return 1;
+ if (efer & efer_reserved_bits)
+ return false;
- if (is_paging(vcpu)
- && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
- return 1;
+ if (!msr_info->host_initiated) {
+ if (!__kvm_valid_efer(vcpu, efer))
+ return 1;
+
+ if (is_paging(vcpu) &&
+ (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+ return 1;
+ }
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
@@ -2279,6 +2300,18 @@ static void kvmclock_sync_fn(struct work_struct *work)
KVMCLOCK_SYNC_PERIOD);
}
+/*
+ * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
+ */
+static bool can_set_mci_status(struct kvm_vcpu *vcpu)
+{
+ /* McStatusWrEn enabled? */
+ if (guest_cpuid_is_amd(vcpu))
+ return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
+
+ return false;
+}
+
static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u64 mcg_cap = vcpu->arch.mcg_cap;
@@ -2310,9 +2343,14 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if ((offset & 0x3) == 0 &&
data != 0 && (data | (1 << 10)) != ~(u64)0)
return -1;
+
+ /* MCi_STATUS */
if (!msr_info->host_initiated &&
- (offset & 0x3) == 1 && data != 0)
- return -1;
+ (offset & 0x3) == 1 && data != 0) {
+ if (!can_set_mci_status(vcpu))
+ return -1;
+ }
+
vcpu->arch.mce_banks[offset] = data;
break;
}
@@ -2456,13 +2494,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.arch_capabilities = data;
break;
case MSR_EFER:
- return set_efer(vcpu, data);
+ return set_efer(vcpu, msr_info);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
- data &= ~(u64)0x40000; /* ignore Mc status write enable */
- if (data != 0) {
+
+ /* Handle McStatusWrEn */
+ if (data == BIT_ULL(18)) {
+ vcpu->arch.msr_hwcr = data;
+ } else if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
@@ -2736,7 +2777,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_K8_SYSCFG:
case MSR_K8_TSEG_ADDR:
case MSR_K8_TSEG_MASK:
- case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
case MSR_K8_INT_PENDING_MSG:
case MSR_AMD64_NB_CFG:
@@ -2900,6 +2940,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_MISC_FEATURES_ENABLES:
msr_info->data = vcpu->arch.msr_misc_features_enables;
break;
+ case MSR_K7_HWCR:
+ msr_info->data = vcpu->arch.msr_hwcr;
+ break;
default:
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -3079,9 +3122,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
- case KVM_CAP_NR_MEMSLOTS:
- r = KVM_USER_MEM_SLOTS;
- break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
@@ -5521,9 +5561,9 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int bytes,
struct x86_exception *exception)
{
+ struct kvm_host_map map;
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
gpa_t gpa;
- struct page *page;
char *kaddr;
bool exchanged;
@@ -5540,12 +5580,11 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
goto emul_write;
- page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
- if (is_error_page(page))
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
goto emul_write;
- kaddr = kmap_atomic(page);
- kaddr += offset_in_page(gpa);
+ kaddr = map.hva + offset_in_page(gpa);
+
switch (bytes) {
case 1:
exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
@@ -5562,13 +5601,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
default:
BUG();
}
- kunmap_atomic(kaddr);
- kvm_release_page_dirty(page);
+
+ kvm_vcpu_unmap(vcpu, &map, true);
if (!exchanged)
return X86EMUL_CMPXCHG_FAILED;
- kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
kvm_page_track_write(vcpu, gpa, new, bytes);
return X86EMUL_CONTINUE;
@@ -6558,7 +6596,7 @@ static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port)
{
- unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ unsigned long val = kvm_rax_read(vcpu);
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
size, port, &val, 1);
if (ret)
@@ -6593,8 +6631,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
}
/* For size less than 4 we merge, else we zero extend */
- val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
- : 0;
+ val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
/*
* Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
@@ -6602,7 +6639,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
*/
emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
vcpu->arch.pio.port, &val, 1);
- kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+ kvm_rax_write(vcpu, val);
return kvm_skip_emulated_instruction(vcpu);
}
@@ -6614,12 +6651,12 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
int ret;
/* For size less than 4 we merge, else we zero extend */
- val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
+ val = (size < 4) ? kvm_rax_read(vcpu) : 0;
ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
&val, 1);
if (ret) {
- kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+ kvm_rax_write(vcpu, val);
return ret;
}
@@ -6854,10 +6891,20 @@ static unsigned long kvm_get_guest_ip(void)
return ip;
}
+static void kvm_handle_intel_pt_intr(void)
+{
+ struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
+
+ kvm_make_request(KVM_REQ_PMI, vcpu);
+ __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
+ (unsigned long *)&vcpu->arch.pmu.global_status);
+}
+
static struct perf_guest_info_callbacks kvm_guest_cbs = {
.is_in_guest = kvm_is_in_guest,
.is_user_mode = kvm_is_user_mode,
.get_guest_ip = kvm_get_guest_ip,
+ .handle_intel_pt_intr = kvm_handle_intel_pt_intr,
};
static void kvm_set_mmio_spte_mask(void)
@@ -7133,11 +7180,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
- nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
- a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
- a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
- a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
- a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ nr = kvm_rax_read(vcpu);
+ a0 = kvm_rbx_read(vcpu);
+ a1 = kvm_rcx_read(vcpu);
+ a2 = kvm_rdx_read(vcpu);
+ a3 = kvm_rsi_read(vcpu);
trace_kvm_hypercall(nr, a0, a1, a2, a3);
@@ -7178,7 +7225,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
out:
if (!op_64_bit)
ret = (u32)ret;
- kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+ kvm_rax_write(vcpu, ret);
++vcpu->stat.hypercalls;
return kvm_skip_emulated_instruction(vcpu);
@@ -8280,23 +8327,23 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
}
- regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
- regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
- regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
- regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
- regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
- regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
- regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
- regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+ regs->rax = kvm_rax_read(vcpu);
+ regs->rbx = kvm_rbx_read(vcpu);
+ regs->rcx = kvm_rcx_read(vcpu);
+ regs->rdx = kvm_rdx_read(vcpu);
+ regs->rsi = kvm_rsi_read(vcpu);
+ regs->rdi = kvm_rdi_read(vcpu);
+ regs->rsp = kvm_rsp_read(vcpu);
+ regs->rbp = kvm_rbp_read(vcpu);
#ifdef CONFIG_X86_64
- regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
- regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
- regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
- regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
- regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
- regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
- regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
- regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
+ regs->r8 = kvm_r8_read(vcpu);
+ regs->r9 = kvm_r9_read(vcpu);
+ regs->r10 = kvm_r10_read(vcpu);
+ regs->r11 = kvm_r11_read(vcpu);
+ regs->r12 = kvm_r12_read(vcpu);
+ regs->r13 = kvm_r13_read(vcpu);
+ regs->r14 = kvm_r14_read(vcpu);
+ regs->r15 = kvm_r15_read(vcpu);
#endif
regs->rip = kvm_rip_read(vcpu);
@@ -8316,23 +8363,23 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
- kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
- kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
- kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
- kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
- kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
- kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
+ kvm_rax_write(vcpu, regs->rax);
+ kvm_rbx_write(vcpu, regs->rbx);
+ kvm_rcx_write(vcpu, regs->rcx);
+ kvm_rdx_write(vcpu, regs->rdx);
+ kvm_rsi_write(vcpu, regs->rsi);
+ kvm_rdi_write(vcpu, regs->rdi);
+ kvm_rsp_write(vcpu, regs->rsp);
+ kvm_rbp_write(vcpu, regs->rbp);
#ifdef CONFIG_X86_64
- kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
- kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
- kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
- kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
- kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
- kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
- kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
- kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
+ kvm_r8_write(vcpu, regs->r8);
+ kvm_r9_write(vcpu, regs->r9);
+ kvm_r10_write(vcpu, regs->r10);
+ kvm_r11_write(vcpu, regs->r11);
+ kvm_r12_write(vcpu, regs->r12);
+ kvm_r13_write(vcpu, regs->r13);
+ kvm_r14_write(vcpu, regs->r14);
+ kvm_r15_write(vcpu, regs->r15);
#endif
kvm_rip_write(vcpu, regs->rip);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 534d3f28bb01..a470ff0868c5 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -345,6 +345,16 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
__this_cpu_write(current_vcpu, NULL);
}
+
+static inline bool kvm_pat_valid(u64 data)
+{
+ if (data & 0xF8F8F8F8F8F8F8F8ull)
+ return false;
+ /* 0, 1, 4, 5, 6, 7 are valid values. */
+ return (data | ((data & 0x0202020202020202ull) << 1)) == data;
+}
+
void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 20d14254b686..62fc457f3849 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -58,6 +58,37 @@
#include "ident_map.c"
+#define DEFINE_POPULATE(fname, type1, type2, init) \
+static inline void fname##_init(struct mm_struct *mm, \
+ type1##_t *arg1, type2##_t *arg2, bool init) \
+{ \
+ if (init) \
+ fname##_safe(mm, arg1, arg2); \
+ else \
+ fname(mm, arg1, arg2); \
+}
+
+DEFINE_POPULATE(p4d_populate, p4d, pud, init)
+DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
+DEFINE_POPULATE(pud_populate, pud, pmd, init)
+DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
+
+#define DEFINE_ENTRY(type1, type2, init) \
+static inline void set_##type1##_init(type1##_t *arg1, \
+ type2##_t arg2, bool init) \
+{ \
+ if (init) \
+ set_##type1##_safe(arg1, arg2); \
+ else \
+ set_##type1(arg1, arg2); \
+}
+
+DEFINE_ENTRY(p4d, p4d, init)
+DEFINE_ENTRY(pud, pud, init)
+DEFINE_ENTRY(pmd, pmd, init)
+DEFINE_ENTRY(pte, pte, init)
+
+
/*
* NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
* physical space so we can cache the place of the first one and move
@@ -414,7 +445,7 @@ void __init cleanup_highmap(void)
*/
static unsigned long __meminit
phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
- pgprot_t prot)
+ pgprot_t prot, bool init)
{
unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end;
@@ -432,7 +463,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PAGE_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_pte_safe(pte, __pte(0));
+ set_pte_init(pte, __pte(0), init);
continue;
}
@@ -452,7 +483,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
pages++;
- set_pte_safe(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
+ set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init);
paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
}
@@ -468,7 +499,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
*/
static unsigned long __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
- unsigned long page_size_mask, pgprot_t prot)
+ unsigned long page_size_mask, pgprot_t prot, bool init)
{
unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end;
@@ -487,7 +518,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_pmd_safe(pmd, __pmd(0));
+ set_pmd_init(pmd, __pmd(0), init);
continue;
}
@@ -496,7 +527,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
spin_lock(&init_mm.page_table_lock);
pte = (pte_t *)pmd_page_vaddr(*pmd);
paddr_last = phys_pte_init(pte, paddr,
- paddr_end, prot);
+ paddr_end, prot,
+ init);
spin_unlock(&init_mm.page_table_lock);
continue;
}
@@ -524,19 +556,20 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
if (page_size_mask & (1<<PG_LEVEL_2M)) {
pages++;
spin_lock(&init_mm.page_table_lock);
- set_pte_safe((pte_t *)pmd,
- pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
- __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+ set_pte_init((pte_t *)pmd,
+ pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
+ __pgprot(pgprot_val(prot) | _PAGE_PSE)),
+ init);
spin_unlock(&init_mm.page_table_lock);
paddr_last = paddr_next;
continue;
}
pte = alloc_low_page();
- paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
+ paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init);
spin_lock(&init_mm.page_table_lock);
- pmd_populate_kernel_safe(&init_mm, pmd, pte);
+ pmd_populate_kernel_init(&init_mm, pmd, pte, init);
spin_unlock(&init_mm.page_table_lock);
}
update_page_count(PG_LEVEL_2M, pages);
@@ -551,7 +584,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
*/
static unsigned long __meminit
phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
- unsigned long page_size_mask)
+ unsigned long page_size_mask, bool init)
{
unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end;
@@ -573,7 +606,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PUD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_pud_safe(pud, __pud(0));
+ set_pud_init(pud, __pud(0), init);
continue;
}
@@ -583,7 +616,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
paddr_last = phys_pmd_init(pmd, paddr,
paddr_end,
page_size_mask,
- prot);
+ prot, init);
continue;
}
/*
@@ -610,9 +643,10 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
if (page_size_mask & (1<<PG_LEVEL_1G)) {
pages++;
spin_lock(&init_mm.page_table_lock);
- set_pte_safe((pte_t *)pud,
- pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
- PAGE_KERNEL_LARGE));
+ set_pte_init((pte_t *)pud,
+ pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
+ PAGE_KERNEL_LARGE),
+ init);
spin_unlock(&init_mm.page_table_lock);
paddr_last = paddr_next;
continue;
@@ -620,10 +654,10 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
pmd = alloc_low_page();
paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
- page_size_mask, prot);
+ page_size_mask, prot, init);
spin_lock(&init_mm.page_table_lock);
- pud_populate_safe(&init_mm, pud, pmd);
+ pud_populate_init(&init_mm, pud, pmd, init);
spin_unlock(&init_mm.page_table_lock);
}
@@ -634,14 +668,15 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
static unsigned long __meminit
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
- unsigned long page_size_mask)
+ unsigned long page_size_mask, bool init)
{
unsigned long paddr_next, paddr_last = paddr_end;
unsigned long vaddr = (unsigned long)__va(paddr);
int i = p4d_index(vaddr);
if (!pgtable_l5_enabled())
- return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);
+ return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
+ page_size_mask, init);
for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
p4d_t *p4d;
@@ -657,39 +692,34 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_p4d_safe(p4d, __p4d(0));
+ set_p4d_init(p4d, __p4d(0), init);
continue;
}
if (!p4d_none(*p4d)) {
pud = pud_offset(p4d, 0);
- paddr_last = phys_pud_init(pud, paddr,
- paddr_end,
- page_size_mask);
+ paddr_last = phys_pud_init(pud, paddr, paddr_end,
+ page_size_mask, init);
continue;
}
pud = alloc_low_page();
paddr_last = phys_pud_init(pud, paddr, paddr_end,
- page_size_mask);
+ page_size_mask, init);
spin_lock(&init_mm.page_table_lock);
- p4d_populate_safe(&init_mm, p4d, pud);
+ p4d_populate_init(&init_mm, p4d, pud, init);
spin_unlock(&init_mm.page_table_lock);
}
return paddr_last;
}
-/*
- * Create page table mapping for the physical memory for specific physical
- * addresses. The virtual and physical addresses have to be aligned on PMD level
- * down. It returns the last physical address mapped.
- */
-unsigned long __meminit
-kernel_physical_mapping_init(unsigned long paddr_start,
- unsigned long paddr_end,
- unsigned long page_size_mask)
+static unsigned long __meminit
+__kernel_physical_mapping_init(unsigned long paddr_start,
+ unsigned long paddr_end,
+ unsigned long page_size_mask,
+ bool init)
{
bool pgd_changed = false;
unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
@@ -709,19 +739,22 @@ kernel_physical_mapping_init(unsigned long paddr_start,
p4d = (p4d_t *)pgd_page_vaddr(*pgd);
paddr_last = phys_p4d_init(p4d, __pa(vaddr),
__pa(vaddr_end),
- page_size_mask);
+ page_size_mask,
+ init);
continue;
}
p4d = alloc_low_page();
paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
- page_size_mask);
+ page_size_mask, init);
spin_lock(&init_mm.page_table_lock);
if (pgtable_l5_enabled())
- pgd_populate_safe(&init_mm, pgd, p4d);
+ pgd_populate_init(&init_mm, pgd, p4d, init);
else
- p4d_populate_safe(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
+ p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr),
+ (pud_t *) p4d, init);
+
spin_unlock(&init_mm.page_table_lock);
pgd_changed = true;
}
@@ -732,6 +765,37 @@ kernel_physical_mapping_init(unsigned long paddr_start,
return paddr_last;
}
+
+/*
+ * Create page table mapping for the physical memory for specific physical
+ * addresses. Note that it can only be used to populate non-present entries.
+ * The virtual and physical addresses have to be aligned on PMD level
+ * down. It returns the last physical address mapped.
+ */
+unsigned long __meminit
+kernel_physical_mapping_init(unsigned long paddr_start,
+ unsigned long paddr_end,
+ unsigned long page_size_mask)
+{
+ return __kernel_physical_mapping_init(paddr_start, paddr_end,
+ page_size_mask, true);
+}
+
+/*
+ * This function is similar to kernel_physical_mapping_init() above with the
+ * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
+ * when updating the mapping. The caller is responsible to flush the TLBs after
+ * the function returns.
+ */
+unsigned long __meminit
+kernel_physical_mapping_change(unsigned long paddr_start,
+ unsigned long paddr_end,
+ unsigned long page_size_mask)
+{
+ return __kernel_physical_mapping_init(paddr_start, paddr_end,
+ page_size_mask, false);
+}
+
#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 385afa2b9e17..51f50a7a07ef 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -301,9 +301,13 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
else
split_page_size_mask = 1 << PG_LEVEL_2M;
- kernel_physical_mapping_init(__pa(vaddr & pmask),
- __pa((vaddr_end & pmask) + psize),
- split_page_size_mask);
+ /*
+ * kernel_physical_mapping_change() does not flush the TLBs, so
+ * a TLB flush is required after we exit from the for loop.
+ */
+ kernel_physical_mapping_change(__pa(vaddr & pmask),
+ __pa((vaddr_end & pmask) + psize),
+ split_page_size_mask);
}
ret = 0;
diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h
index 319bde386d5f..eeae142062ed 100644
--- a/arch/x86/mm/mm_internal.h
+++ b/arch/x86/mm/mm_internal.h
@@ -13,6 +13,9 @@ void early_ioremap_page_table_range_init(void);
unsigned long kernel_physical_mapping_init(unsigned long start,
unsigned long end,
unsigned long page_size_mask);
+unsigned long kernel_physical_mapping_change(unsigned long start,
+ unsigned long end,
+ unsigned long page_size_mask);
void zone_sizes_init(void);
extern int after_bootmem;
diff --git a/arch/xtensa/include/asm/segment.h b/arch/xtensa/include/asm/segment.h
deleted file mode 100644
index 98964ad15ca2..000000000000
--- a/arch/xtensa/include/asm/segment.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/segment.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SEGMENT_H
-#define _XTENSA_SEGMENT_H
-
-#include <linux/uaccess.h>
-
-#endif /* _XTENSA_SEGEMENT_H */
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 30084eaf8422..5fa0ee1c8e00 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -398,3 +398,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 820e8738af11..b1506376d502 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -18,6 +18,7 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/kdev_t.h>
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 42536674020a..4db620849515 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -43,8 +43,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
unsigned inline_vecs;
if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
- bip = kmalloc(sizeof(struct bio_integrity_payload) +
- sizeof(struct bio_vec) * nr_vecs, gfp_mask);
+ bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
inline_vecs = nr_vecs;
} else {
bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index ddf598ae8b6b..c16f9460c4a2 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -17,6 +17,7 @@
#include <linux/clkdev.h>
#include <linux/acpi.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/pm.h>
#include "internal.h"
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 59b2317acea9..3495e1733a8e 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -909,7 +909,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
host = ata_host_alloc(dev, 1);
if (!host) {
- dev_err(dev, "ata_host_alloc failed\n");
ret = -ENOMEM;
goto err_pm_put;
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 17defbf4f332..2da615b45b31 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -153,6 +153,12 @@ static void brd_free_pages(struct brd_device *brd)
pos++;
/*
+ * It takes 3.4 seconds to remove 80GiB ramdisk.
+ * So, we need cond_resched to avoid stalling the CPU.
+ */
+ cond_resched();
+
+ /*
* This assumes radix_tree_gang_lookup always returns as
* many pages as possible. If the radix-tree code changes,
* so will this have to.
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2210c1b9491b..e5009a34f9c2 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -934,7 +934,7 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
struct rbd_client *rbdc;
int ret;
- mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
+ mutex_lock(&client_mutex);
rbdc = rbd_client_find(ceph_opts);
if (rbdc) {
ceph_destroy_options(ceph_opts);
@@ -1326,7 +1326,7 @@ static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
zero_bvecs(&obj_req->bvec_pos, off, bytes);
break;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -1581,7 +1581,7 @@ static void rbd_obj_request_destroy(struct kref *kref)
kfree(obj_request->bvec_pos.bvecs);
break;
default:
- rbd_assert(0);
+ BUG();
}
kfree(obj_request->img_extents);
@@ -1781,7 +1781,7 @@ static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
&obj_req->bvec_pos);
break;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -2036,7 +2036,7 @@ static int __rbd_img_fill_request(struct rbd_img_request *img_req)
ret = rbd_obj_setup_zeroout(obj_req);
break;
default:
- rbd_assert(0);
+ BUG();
}
if (ret < 0)
return ret;
@@ -2383,7 +2383,7 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
&obj_req->bvec_pos);
break;
default:
- rbd_assert(0);
+ BUG();
}
} else {
ret = rbd_img_fill_from_bvecs(child_img_req,
@@ -2515,7 +2515,7 @@ static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
num_osd_ops += count_zeroout_ops(obj_req);
break;
default:
- rbd_assert(0);
+ BUG();
}
obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
@@ -2542,7 +2542,7 @@ static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
__rbd_obj_setup_zeroout(obj_req, which);
break;
default:
- rbd_assert(0);
+ BUG();
}
ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
@@ -3842,8 +3842,12 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
- rbd_assert(op_type == OBJ_OP_READ ||
- rbd_dev->spec->snap_id == CEPH_NOSNAP);
+ if (op_type != OBJ_OP_READ && rbd_dev->spec->snap_id != CEPH_NOSNAP) {
+ rbd_warn(rbd_dev, "%s on read-only snapshot",
+ obj_op_name(op_type));
+ result = -EIO;
+ goto err;
+ }
/*
* Quit early if the mapped snapshot no longer exists. It's
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 02d3bcd6216c..71c2e9519ca8 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index c68dada97316..aba787b2e771 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index 2a2c7569336a..b6d07ca0164f 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/bcm2835-aux.h>
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 9fcae932e082..770bb01f523e 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -29,6 +29,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index eee64b9e5d10..cc3b1e1bc087 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -15,8 +15,9 @@
#include "clk-kona.h"
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
diff --git a/drivers/clk/berlin/berlin2-div.c b/drivers/clk/berlin/berlin2-div.c
index 4d0be66aa6a8..eb14a5bc0507 100644
--- a/drivers/clk/berlin/berlin2-div.c
+++ b/drivers/clk/berlin/berlin2-div.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index 0b4b44a2579e..bccdfa00fd37 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 9b9db743df25..e9518d35f262 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index d1a97d971183..51f26619b6a2 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -10,8 +10,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index d81f1d2e9129..b1e556f20911 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index a47c2b600f20..97d1e8c35b71 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index 94470b4eadf4..e507aa958da9 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/slab.h>
diff --git a/drivers/clk/davinci/pll-da850.c b/drivers/clk/davinci/pll-da850.c
index 0f7198191ea2..bf120bec59ae 100644
--- a/drivers/clk/davinci/pll-da850.c
+++ b/drivers/clk/davinci/pll-da850.c
@@ -11,6 +11,7 @@
#include <linux/clkdev.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/da8xx-cfgchip.h>
#include <linux/mfd/syscon.h>
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index d413ade95c99..376be03bb546 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index c7ae653c8a16..67c495b67c18 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -6,8 +6,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index e8b2c43b1bb8..89934bee7c9e 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -24,6 +24,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 574fac1a169f..388bdb94f841 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -3,9 +3,10 @@
* Copyright 2018 NXP
*/
+#include <linux/clk-provider.h>
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/clk-provider.h>
#include "clk.h"
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 76b9eb15604e..fece503e3610 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
diff --git a/drivers/clk/imx/clk-imx21.c b/drivers/clk/imx/clk-imx21.c
index e63188eb08ac..6e93284c397b 100644
--- a/drivers/clk/imx/clk-imx21.c
+++ b/drivers/clk/imx/clk-imx21.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/imx21-clock.h>
diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
index 0a0ab95d16fe..a3753067fc12 100644
--- a/drivers/clk/imx/clk-imx27.c
+++ b/drivers/clk/imx/clk-imx27.c
@@ -3,6 +3,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/imx27-clock.h>
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index fb567dcc2118..a03bbed662c6 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index d7e62c3620d3..8155b12cf0e1 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
index 991bbe63f156..5d65f65c512e 100644
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ b/drivers/clk/imx/clk-sccg-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 510b685212d3..b80af61dc1f3 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -20,6 +20,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index b86edd328249..25f7df028e67 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/jz4740-cgu.h>
#include <asm/mach-jz4740/clock.h>
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index bf46a0df2004..dfce740c25a8 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/syscore_ops.h>
#include <dt-bindings/clock/jz4770-cgu.h>
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index 6427be117ff1..d03b7fcfd82b 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/jz4780-cgu.h>
#include "cgu.h"
diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c
index 3466f7320b40..22a165ef65cf 100644
--- a/drivers/clk/loongson1/clk-loongson1c.c
+++ b/drivers/clk/loongson1/clk-loongson1c.c
@@ -9,6 +9,7 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <loongson1.h>
#include "clk.h"
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index c3b301463425..4680064f1951 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <asm/mach-pic32/pic32.h>
#include <asm/traps.h>
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
index 9f734779be92..e6c05df2d47f 100644
--- a/drivers/clk/microchip/clk-pic32mzda.c
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 1f1cff428d78..5fc6d486a381 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -18,6 +18,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index ee272d4d8c24..585a02e0b330 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 1fc84b0e72ee..818b175391fa 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index 5969f620607a..f2e171a01fb4 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index f5bc8bd192b7..8b686da5577b 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 7524d19fe60b..7f67c1036ff9 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 42627bf8a09e..5326f77eb35a 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/pxa-clock.h>
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 2719c248c67b..cfed11c659d9 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index 5967656c13cc..d8190f007a81 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 2913b4148157..da9fe3f032eb 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 3cda53a97f4e..fbc34beafc78 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index dc8ffc7c727a..5f25a70bc61c 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 97c72477cd54..7d042183aa37 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index b241f9ca3d71..cc90b11a9c25 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 30df0dc853f0..0201809bbd37 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index 784b81e1ea7c..ba9f00dc9740 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -3,8 +3,9 @@
* Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
*/
-#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
#include "clk.h"
#define div_mask(width) ((1 << (width)) - 1)
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
index 601a77f1af78..68d23be18cbc 100644
--- a/drivers/clk/rockchip/clk-px30.c
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index c3001980dbdc..3bf919b6c6e3 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 5970a50671b9..8278a54db343 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 5ecf28854876..378420b8835a 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/rk3188-cru-common.h>
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 7af48184b022..7176003b5c7c 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 24baeb56a1b3..85907f31c63f 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index f12142d9cea2..9b03c1abf19c 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 7c4d242f19c1..d239bbc2fc77 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -13,6 +13,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 5a628148f3f0..2322d712ba88 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/rockchip/clk-rv1108.c b/drivers/clk/rockchip/clk-rv1108.c
index 089cb17925e5..6c051aa04e59 100644
--- a/drivers/clk/rockchip/clk-rv1108.c
+++ b/drivers/clk/rockchip/clk-rv1108.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 0ea8e8080d1a..d5fac5a8a3d7 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index a5fddebbe530..3f80bcd46074 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -33,6 +33,7 @@
*/
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 9c95390d2d77..ce41f36a0e29 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 0e9a41a4cac8..facaad3c56a1 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 54066e6508d3..d2a68a792a21 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 8ae44b5db4c2..91db7894125d 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -4,6 +4,7 @@
// Author: Marek Szyprowski <m.szyprowski@samsung.com>
// Common Clock Framework support for Exynos5 power-domain dependent clocks
+#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f14139bcb0c1..c8265c4cbc4f 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -12,6 +12,7 @@
#include <dt-bindings/clock/exynos5250.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 1c4c7a3039f1..0c6782ceac48 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -13,7 +13,8 @@
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "clk.h"
#include "clk-pll.h"
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index 82f8ae22fd34..0117e40c1d0a 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include "clk.h"
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index dd1159050a5a..ce21b89d1eb1 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index f38f0e24e3b6..b2ea4dfb5b8c 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 1f6e47cd327d..9ad546a5f74c 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -15,6 +15,7 @@
#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
index 0ec8bf7b4b28..6282ee2f361c 100644
--- a/drivers/clk/sifive/fu540-prci.c
+++ b/drivers/clk/sifive/fu540-prci.c
@@ -30,6 +30,7 @@
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_clk.h>
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index eee2d48ab656..54a464fa63e0 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -3,6 +3,7 @@
* Copyright (C) 2017, Intel Corporation
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 568f59b58ddf..5c50e723ecae 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index c4d0b6f6abf2..4705eb544f01 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index c514d39760cb..23497f07ad89 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -14,6 +14,7 @@
*/
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index 129ebd2588fd..2bbfb3343311 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index be0deee70182..d3fc1f5bf396 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 3c32d7798f27..9d3f98962779 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index fa2c2dd77102..813e9bf73cbf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 609970c0b666..b494c4fe0b2c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -16,6 +16,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 4b5f8f4e4ab8..a9c0c5406b85 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index c7bf814dfd2b..25bcf3fd2dfc 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 5f714b4d8ee4..be5920e8a9ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index e71e2451c2e3..0f3df565c6c1 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index a22d11aa38ba..f9625f7b9ec2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index eada0e291859..ec64eb692ecf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index 8936ef87652c..0e23583e4f58 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index dc9f0a365664..e748b8a6f3c5 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index 302a18efd39f..6d407a8a61ee 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_div.h"
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index d1d168d4c4f0..1842603f8f11 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_frac.h"
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index cd069d5da215..9c81644e9dfe 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
index f9869f7353c0..b23410682088 100644
--- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/sunxi-ng.h>
+#include <linux/io.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 0357349eb767..e17fb4c9fcfe 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mp.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 12e0783caee6..c2a672797a74 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mult.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 312664155a54..f9b409c3a89c 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mux.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 2485bda87a9a..50c7e6b1ba13 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nk.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 841840e35e61..aa5beaabc292 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nkm.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index cbcdf664f336..53ec4fb59880 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nkmp.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 424d8635b053..e15413174aa7 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_frac.h"
#include "ccu_gate.h"
diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c
index 400c58ad72fd..0a4a6fd13f5b 100644
--- a/drivers/clk/sunxi-ng/ccu_phase.c
+++ b/drivers/clk/sunxi-ng/ccu_phase.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_phase.h"
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
index 3b3dc9bdf2b0..e510467ea24c 100644
--- a/drivers/clk/sunxi-ng/ccu_sdm.c
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_sdm.h"
diff --git a/drivers/clk/sunxi/clk-a10-mod1.c b/drivers/clk/sunxi/clk-a10-mod1.c
index e2819fa09637..9e6796a7b4c4 100644
--- a/drivers/clk/sunxi/clk-a10-mod1.c
+++ b/drivers/clk/sunxi/clk-a10-mod1.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index d8eab90ae661..a709b6a551af 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index d9ea22ec4e25..d119b453dccd 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 3437f734c9bf..e6d639d9ea70 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index fc0278a1acc7..915954507d0a 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index a085c3bc127c..8130467d647a 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-display.c b/drivers/clk/sunxi/clk-sun4i-display.c
index 9780fac6d029..bb2dc83fc697 100644
--- a/drivers/clk/sunxi/clk-sun4i-display.c
+++ b/drivers/clk/sunxi/clk-sun4i-display.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-pll3.c b/drivers/clk/sunxi/clk-sun4i-pll3.c
index f66267e77d9c..c879d7e25ca0 100644
--- a/drivers/clk/sunxi/clk-sun4i-pll3.c
+++ b/drivers/clk/sunxi/clk-sun4i-pll3.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
index b6d29d1bedca..af8ca5019639 100644
--- a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
+++ b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index d5c31804ee54..5a7d4dd09e85 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -16,6 +16,7 @@
#include <linux/clk-provider.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
index bee305bdddbe..bfbcd71b225d 100644
--- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c
+++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
@@ -18,6 +18,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 56db89b6979f..0e924c9cbd5c 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-cpus.c b/drivers/clk/sunxi/clk-sun9i-cpus.c
index 4d5e14142e15..01255d827fc9 100644
--- a/drivers/clk/sunxi/clk-sun9i-cpus.c
+++ b/drivers/clk/sunxi/clk-sun9i-cpus.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index f00d8758ba24..da264d0f7f4b 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -18,6 +18,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/reset.h>
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 892c29030b7b..f5b1c0067365 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 917fc27a33dd..7d15e0432ed4 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 93ecb538e59b..b7f763f0ecd8 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/tegra/clk-periph-fixed.c b/drivers/clk/tegra/clk-periph-fixed.c
index c57dfb037b10..956f2215c733 100644
--- a/drivers/clk/tegra/clk-periph-fixed.c
+++ b/drivers/clk/tegra/clk-periph-fixed.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
index 473d418533cb..a5cd3e31dbae 100644
--- a/drivers/clk/tegra/clk-sdmmc-mux.c
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/types.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index ffaf17f71860..6f2862eddad7 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk/tegra.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 0c210984765a..fdfb90058504 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -14,6 +14,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index ba17cc5bd04b..e0b8ed3a1e80 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/list.h>
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index ed24f20f63c7..95e36ba64acc 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index c2b6bb814742..4fa0cd951d2e 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 19174835693b..5970edb6d334 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index be89416e2358..21c9ce8526c0 100644
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 47eb4d13ed5f..5e2e0348d460 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -263,8 +263,8 @@ config EDAC_PND2
micro-server but may appear on others in the future.
config EDAC_MPC85XX
- tristate "Freescale MPC83xx / MPC85xx"
- depends on FSL_SOC
+ bool "Freescale MPC83xx / MPC85xx"
+ depends on FSL_SOC && EDAC=y
help
Support for error detection and correction on the Freescale
MPC8349, MPC8560, MPC8540, MPC8548, T4240
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 13594ffadcb3..64922c8fa7e3 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -679,22 +679,18 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
struct mem_ctl_info *edac_mc_find(int idx)
{
- struct mem_ctl_info *mci = NULL;
+ struct mem_ctl_info *mci;
struct list_head *item;
mutex_lock(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
-
- if (mci->mc_idx >= idx) {
- if (mci->mc_idx == idx) {
- goto unlock;
- }
- break;
- }
+ if (mci->mc_idx == idx)
+ goto unlock;
}
+ mci = NULL;
unlock:
mutex_unlock(&mem_ctls_mutex);
return mci;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index fb985ba1a176..2598741a00a6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -11,6 +11,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "sun4i_hdmi.h"
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 9412709067f5..2ea4e20b7b8a 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -41,6 +41,7 @@
#include <linux/component.h>
#include <linux/dmaengine.h>
#include <linux/i2c.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index cd91510a5387..e694c46ff039 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -118,9 +118,7 @@ static DEFINE_IDA(hwmon_ida);
* The complex conditional is necessary to avoid a cyclic dependency
* between hwmon and thermal_sys modules.
*/
-#if IS_REACHABLE(CONFIG_THERMAL) && defined(CONFIG_THERMAL_OF) && \
- (!defined(CONFIG_THERMAL_HWMON) || \
- !(defined(MODULE) && IS_MODULE(CONFIG_THERMAL)))
+#ifdef CONFIG_THERMAL_OF
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 5f82036fe322..0df7454832ef 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -45,6 +45,8 @@ struct nvm_dev_map {
int num_ch;
};
+static void nvm_free(struct kref *ref);
+
static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
{
struct nvm_target *tgt;
@@ -325,6 +327,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
struct nvm_target *t;
struct nvm_tgt_dev *tgt_dev;
void *targetdata;
+ unsigned int mdts;
int ret;
switch (create->conf.type) {
@@ -412,8 +415,12 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
tdisk->private_data = targetdata;
tqueue->queuedata = targetdata;
- blk_queue_max_hw_sectors(tqueue,
- (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
+ mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
+ if (dev->geo.mdts) {
+ mdts = min_t(u32, dev->geo.mdts,
+ (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
+ }
+ blk_queue_max_hw_sectors(tqueue, mdts);
set_capacity(tdisk, tt->capacity(targetdata));
add_disk(tdisk);
@@ -476,7 +483,6 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
/**
* nvm_remove_tgt - Removes a target from the media manager
- * @dev: device
* @remove: ioctl structure with target name to remove.
*
* Returns:
@@ -484,18 +490,28 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
* 1: on not found
* <0: on error
*/
-static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
+static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
{
struct nvm_target *t;
+ struct nvm_dev *dev;
- mutex_lock(&dev->mlock);
- t = nvm_find_target(dev, remove->tgtname);
- if (!t) {
+ down_read(&nvm_lock);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ mutex_lock(&dev->mlock);
+ t = nvm_find_target(dev, remove->tgtname);
+ if (t) {
+ mutex_unlock(&dev->mlock);
+ break;
+ }
mutex_unlock(&dev->mlock);
- return 1;
}
+ up_read(&nvm_lock);
+
+ if (!t)
+ return 1;
+
__nvm_remove_target(t, true);
- mutex_unlock(&dev->mlock);
+ kref_put(&dev->ref, nvm_free);
return 0;
}
@@ -1089,15 +1105,16 @@ err_fmtype:
return ret;
}
-static void nvm_free(struct nvm_dev *dev)
+static void nvm_free(struct kref *ref)
{
- if (!dev)
- return;
+ struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
if (dev->dma_pool)
dev->ops->destroy_dma_pool(dev->dma_pool);
- nvm_unregister_map(dev);
+ if (dev->rmap)
+ nvm_unregister_map(dev);
+
kfree(dev->lun_map);
kfree(dev);
}
@@ -1134,7 +1151,13 @@ err:
struct nvm_dev *nvm_alloc_dev(int node)
{
- return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
+ struct nvm_dev *dev;
+
+ dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
+ if (dev)
+ kref_init(&dev->ref);
+
+ return dev;
}
EXPORT_SYMBOL(nvm_alloc_dev);
@@ -1142,12 +1165,16 @@ int nvm_register(struct nvm_dev *dev)
{
int ret, exp_pool_size;
- if (!dev->q || !dev->ops)
+ if (!dev->q || !dev->ops) {
+ kref_put(&dev->ref, nvm_free);
return -EINVAL;
+ }
ret = nvm_init(dev);
- if (ret)
+ if (ret) {
+ kref_put(&dev->ref, nvm_free);
return ret;
+ }
exp_pool_size = max_t(int, PAGE_SIZE,
(NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
@@ -1157,7 +1184,7 @@ int nvm_register(struct nvm_dev *dev)
exp_pool_size);
if (!dev->dma_pool) {
pr_err("nvm: could not create dma pool\n");
- nvm_free(dev);
+ kref_put(&dev->ref, nvm_free);
return -ENOMEM;
}
@@ -1179,6 +1206,7 @@ void nvm_unregister(struct nvm_dev *dev)
if (t->dev->parent != dev)
continue;
__nvm_remove_target(t, false);
+ kref_put(&dev->ref, nvm_free);
}
mutex_unlock(&dev->mlock);
@@ -1186,13 +1214,14 @@ void nvm_unregister(struct nvm_dev *dev)
list_del(&dev->devices);
up_write(&nvm_lock);
- nvm_free(dev);
+ kref_put(&dev->ref, nvm_free);
}
EXPORT_SYMBOL(nvm_unregister);
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
+ int ret;
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
@@ -1203,7 +1232,12 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- return nvm_create_tgt(dev, create);
+ kref_get(&dev->ref);
+ ret = nvm_create_tgt(dev, create);
+ if (ret)
+ kref_put(&dev->ref, nvm_free);
+
+ return ret;
}
static long nvm_ioctl_info(struct file *file, void __user *arg)
@@ -1322,8 +1356,6 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
{
struct nvm_ioctl_remove remove;
- struct nvm_dev *dev;
- int ret = 0;
if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
return -EFAULT;
@@ -1335,13 +1367,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
return -EINVAL;
}
- list_for_each_entry(dev, &nvm_devices, devices) {
- ret = nvm_remove_tgt(dev, &remove);
- if (!ret)
- break;
- }
-
- return ret;
+ return nvm_remove_tgt(&remove);
}
/* kept for compatibility reasons */
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index c9fa26f95659..5c1034c22197 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -18,7 +18,8 @@
#include "pblk.h"
-int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
+void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
+ unsigned long flags)
{
struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
@@ -43,6 +44,7 @@ retry:
goto retry;
case NVM_IO_ERR:
pblk_pipeline_stop(pblk);
+ bio_io_error(bio);
goto out;
}
@@ -79,7 +81,9 @@ retry:
out:
generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
- return ret;
+
+ if (ret == NVM_IO_DONE)
+ bio_endio(bio);
}
/*
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 6ca868868fee..773537804319 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -562,11 +562,9 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int ret;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io_sync(pblk, rqd);
pblk_up_chunk(pblk, ppa_list[0]);
@@ -725,6 +723,7 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = pblk_line_smeta_start(pblk, line);
int i, ret;
@@ -748,9 +747,10 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < lm->smeta_sec; i++, paddr++)
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
@@ -761,8 +761,10 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
atomic_dec(&pblk->inflight_io);
- if (rqd.error)
+ if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
pblk_log_read_err(pblk, &rqd);
+ ret = -EIO;
+ }
clear_rqd:
pblk_free_rqd_meta(pblk, &rqd);
@@ -775,6 +777,7 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
@@ -799,12 +802,13 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
rqd.opcode = NVM_OP_PWRITE;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
struct pblk_sec_meta *meta = pblk_get_meta(pblk,
rqd.meta_list, i);
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
meta->lba = lba_list[paddr] = addr_empty;
}
@@ -834,8 +838,9 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
struct nvm_geo *geo = &dev->geo;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
- void *ppa_list, *meta_list;
+ void *ppa_list_buf, *meta_list;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = line->emeta_ssec;
dma_addr_t dma_ppa_list, dma_meta_list;
@@ -851,7 +856,7 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
if (!meta_list)
return -ENOMEM;
- ppa_list = meta_list + pblk_dma_meta_size(pblk);
+ ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
next_rq:
@@ -872,11 +877,12 @@ next_rq:
rqd.bio = bio;
rqd.meta_list = meta_list;
- rqd.ppa_list = ppa_list;
+ rqd.ppa_list = ppa_list_buf;
rqd.dma_meta_list = dma_meta_list;
rqd.dma_ppa_list = dma_ppa_list;
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = rq_ppas;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
@@ -904,7 +910,7 @@ next_rq:
}
for (j = 0; j < min; j++, i++, paddr++)
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
}
ret = pblk_submit_io_sync(pblk, &rqd);
@@ -916,8 +922,11 @@ next_rq:
atomic_dec(&pblk->inflight_io);
- if (rqd.error)
+ if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
pblk_log_read_err(pblk, &rqd);
+ ret = -EIO;
+ goto free_rqd_dma;
+ }
emeta_buf += rq_len;
left_ppas -= rq_ppas;
@@ -1162,7 +1171,6 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
off = bit * geo->ws_opt;
bitmap_set(line->map_bitmap, off, lm->smeta_sec);
line->sec_in_line -= lm->smeta_sec;
- line->smeta_ssec = off;
line->cur_sec = off + lm->smeta_sec;
if (init && pblk_line_smeta_write(pblk, line, off)) {
@@ -1521,11 +1529,9 @@ void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int i;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
for (i = 0; i < rqd->nr_ppas; i++)
pblk_ppa_to_line_put(pblk, ppa_list[i]);
}
@@ -1699,6 +1705,14 @@ static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
+ if (line->w_err_gc->has_gc_err) {
+ spin_unlock(&line->lock);
+ pblk_err(pblk, "line %d had errors during GC\n", line->id);
+ pblk_put_line_back(pblk, line);
+ line->w_err_gc->has_gc_err = 0;
+ return;
+ }
+
line->state = PBLK_LINESTATE_FREE;
trace_pblk_line_state(pblk_disk_name(pblk), line->id,
line->state);
@@ -2023,7 +2037,7 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
struct ppa_addr ppa_l2p;
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
return;
}
@@ -2063,7 +2077,7 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
#endif
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
return 0;
}
@@ -2109,7 +2123,7 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
}
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
return;
}
@@ -2135,8 +2149,8 @@ out:
spin_unlock(&pblk->trans_lock);
}
-void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
- sector_t blba, int nr_secs)
+int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
+ sector_t blba, int nr_secs, bool *from_cache)
{
int i;
@@ -2150,10 +2164,19 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
+ if (i > 0 && *from_cache)
+ break;
+ *from_cache = false;
+
kref_get(&line->ref);
+ } else {
+ if (i > 0 && !*from_cache)
+ break;
+ *from_cache = true;
}
}
spin_unlock(&pblk->trans_lock);
+ return i;
}
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
@@ -2167,7 +2190,7 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
lba = lba_list[i];
if (lba != ADDR_EMPTY) {
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
continue;
}
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 26a52ea7ec45..63ee205b41c4 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -59,24 +59,28 @@ static void pblk_gc_writer_kick(struct pblk_gc *gc)
wake_up_process(gc->gc_writer_ts);
}
-static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
+void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct list_head *move_list;
+ spin_lock(&l_mg->gc_lock);
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
line->state = PBLK_LINESTATE_CLOSED;
trace_pblk_line_state(pblk_disk_name(pblk), line->id,
line->state);
+
+ /* We need to reset gc_group in order to ensure that
+ * pblk_line_gc_list will return proper move_list
+ * since right now current line is not on any of the
+ * gc lists.
+ */
+ line->gc_group = PBLK_LINEGC_NONE;
move_list = pblk_line_gc_list(pblk, line);
spin_unlock(&line->lock);
-
- if (move_list) {
- spin_lock(&l_mg->gc_lock);
- list_add_tail(&line->list, move_list);
- spin_unlock(&l_mg->gc_lock);
- }
+ list_add_tail(&line->list, move_list);
+ spin_unlock(&l_mg->gc_lock);
}
static void pblk_gc_line_ws(struct work_struct *work)
@@ -84,8 +88,6 @@ static void pblk_gc_line_ws(struct work_struct *work)
struct pblk_line_ws *gc_rq_ws = container_of(work,
struct pblk_line_ws, ws);
struct pblk *pblk = gc_rq_ws->pblk;
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
struct pblk_gc *gc = &pblk->gc;
struct pblk_line *line = gc_rq_ws->line;
struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
@@ -93,18 +95,10 @@ static void pblk_gc_line_ws(struct work_struct *work)
up(&gc->gc_sem);
- gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
- if (!gc_rq->data) {
- pblk_err(pblk, "could not GC line:%d (%d/%d)\n",
- line->id, *line->vsc, gc_rq->nr_secs);
- goto out;
- }
-
/* Read from GC victim block */
ret = pblk_submit_read_gc(pblk, gc_rq);
if (ret) {
- pblk_err(pblk, "failed GC read in line:%d (err:%d)\n",
- line->id, ret);
+ line->w_err_gc->has_gc_err = 1;
goto out;
}
@@ -189,6 +183,8 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
struct pblk_line *line = line_ws->line;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
struct pblk_gc *gc = &pblk->gc;
struct pblk_line_ws *gc_rq_ws;
struct pblk_gc_rq *gc_rq;
@@ -247,9 +243,13 @@ next_rq:
gc_rq->nr_secs = nr_secs;
gc_rq->line = line;
+ gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
+ if (!gc_rq->data)
+ goto fail_free_gc_rq;
+
gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
if (!gc_rq_ws)
- goto fail_free_gc_rq;
+ goto fail_free_gc_data;
gc_rq_ws->pblk = pblk;
gc_rq_ws->line = line;
@@ -281,6 +281,8 @@ out:
return;
+fail_free_gc_data:
+ vfree(gc_rq->data);
fail_free_gc_rq:
kfree(gc_rq);
fail_free_lba_list:
@@ -290,8 +292,11 @@ fail_free_invalid_bitmap:
fail_free_ws:
kfree(line_ws);
+ /* Line goes back to closed state, so we cannot release additional
+ * reference for line, since we do that only when we want to do
+ * gc to free line state transition.
+ */
pblk_put_line_back(pblk, line);
- kref_put(&line->ref, pblk_line_put);
atomic_dec(&gc->read_inflight_gc);
pblk_err(pblk, "failed to GC line %d\n", line->id);
@@ -355,8 +360,13 @@ static int pblk_gc_read(struct pblk *pblk)
pblk_gc_kick(pblk);
- if (pblk_gc_line(pblk, line))
+ if (pblk_gc_line(pblk, line)) {
pblk_err(pblk, "failed to GC line %d\n", line->id);
+ /* rollback */
+ spin_lock(&gc->r_lock);
+ list_add_tail(&line->list, &gc->r_list);
+ spin_unlock(&gc->r_lock);
+ }
return 0;
}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 8b643d0bffae..b351c7f002de 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -47,33 +47,6 @@ static struct pblk_global_caches pblk_caches = {
struct bio_set pblk_bio_set;
-static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
- struct bio *bio)
-{
- int ret;
-
- /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
- * constraint. Writes can be of arbitrary size.
- */
- if (bio_data_dir(bio) == READ) {
- blk_queue_split(q, &bio);
- ret = pblk_submit_read(pblk, bio);
- if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
- bio_put(bio);
-
- return ret;
- }
-
- /* Prevent deadlock in the case of a modest LUN configuration and large
- * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
- * available for user I/O.
- */
- if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
- blk_queue_split(q, &bio);
-
- return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
-}
-
static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
{
struct pblk *pblk = q->queuedata;
@@ -86,13 +59,21 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
}
}
- switch (pblk_rw_io(q, pblk, bio)) {
- case NVM_IO_ERR:
- bio_io_error(bio);
- break;
- case NVM_IO_DONE:
- bio_endio(bio);
- break;
+ /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
+ * constraint. Writes can be of arbitrary size.
+ */
+ if (bio_data_dir(bio) == READ) {
+ blk_queue_split(q, &bio);
+ pblk_submit_read(pblk, bio);
+ } else {
+ /* Prevent deadlock in the case of a modest LUN configuration
+ * and large user I/Os. Unless stalled, the rate limiter
+ * leaves at least 256KB available for user I/O.
+ */
+ if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
+ blk_queue_split(q, &bio);
+
+ pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}
return BLK_QC_T_NONE;
@@ -105,7 +86,7 @@ static size_t pblk_trans_map_size(struct pblk *pblk)
if (pblk->addrf_len < 32)
entry_size = 4;
- return entry_size * pblk->rl.nr_secs;
+ return entry_size * pblk->capacity;
}
#ifdef CONFIG_NVM_PBLK_DEBUG
@@ -164,13 +145,18 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
int ret = 0;
map_size = pblk_trans_map_size(pblk);
- pblk->trans_map = vmalloc(map_size);
- if (!pblk->trans_map)
+ pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN
+ | __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM,
+ PAGE_KERNEL);
+ if (!pblk->trans_map) {
+ pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
+ map_size);
return -ENOMEM;
+ }
pblk_ppa_set_empty(&ppa);
- for (i = 0; i < pblk->rl.nr_secs; i++)
+ for (i = 0; i < pblk->capacity; i++)
pblk_trans_map_set(pblk, i, ppa);
ret = pblk_l2p_recover(pblk, factory_init);
@@ -701,7 +687,6 @@ static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
* on user capacity consider only provisioned blocks
*/
pblk->rl.total_blocks = nr_free_chks;
- pblk->rl.nr_secs = nr_free_chks * geo->clba;
/* Consider sectors used for metadata */
sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
@@ -1284,7 +1269,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
geo->all_luns, pblk->l_mg.nr_lines,
- (unsigned long long)pblk->rl.nr_secs,
+ (unsigned long long)pblk->capacity,
pblk->rwb.nr_entries);
wake_up_process(pblk->writer_ts);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 7fbc99b60cac..5408e32b2f13 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -162,6 +162,7 @@ int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
*erase_ppa = ppa_list[i];
erase_ppa->a.blk = e_line->id;
+ erase_ppa->a.reserved = 0;
spin_unlock(&e_line->lock);
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 03c241b340ea..5abb1705b039 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -642,7 +642,7 @@ try:
* be directed to disk.
*/
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter, bool advanced_bio)
+ struct ppa_addr ppa)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry;
@@ -673,15 +673,6 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
ret = 0;
goto out;
}
-
- /* Only advance the bio if it hasn't been advanced already. If advanced,
- * this bio is at least a partial bio (i.e., it has partially been
- * filled with data from the cache). If part of the data resides on the
- * media, we will read later on
- */
- if (unlikely(!advanced_bio))
- bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
-
data = bio_data(bio);
memcpy(data, entry->data, rb->seg_size);
@@ -799,8 +790,8 @@ int pblk_rb_tear_down_check(struct pblk_rb *rb)
}
out:
- spin_unlock(&rb->w_lock);
spin_unlock_irq(&rb->s_lock);
+ spin_unlock(&rb->w_lock);
return ret;
}
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 0b7d5fb4548d..d98ea392fe33 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -26,8 +26,7 @@
* issued.
*/
static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
- sector_t lba, struct ppa_addr ppa,
- int bio_iter, bool advanced_bio)
+ sector_t lba, struct ppa_addr ppa)
{
#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a cache address */
@@ -35,73 +34,75 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
BUG_ON(!pblk_addr_in_cache(ppa));
#endif
- return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
- bio_iter, advanced_bio);
+ return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
}
-static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
+static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct bio *bio, sector_t blba,
- unsigned long *read_bitmap)
+ bool *from_cache)
{
void *meta_list = rqd->meta_list;
- struct ppa_addr ppas[NVM_MAX_VLBA];
- int nr_secs = rqd->nr_ppas;
- bool advanced_bio = false;
- int i, j = 0;
+ int nr_secs, i;
- pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
+retry:
+ nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
+ from_cache);
+
+ if (!*from_cache)
+ goto end;
for (i = 0; i < nr_secs; i++) {
- struct ppa_addr p = ppas[i];
struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
sector_t lba = blba + i;
-retry:
- if (pblk_ppa_empty(p)) {
+ if (pblk_ppa_empty(rqd->ppa_list[i])) {
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
- WARN_ON(test_and_set_bit(i, read_bitmap));
meta->lba = addr_empty;
-
- if (unlikely(!advanced_bio)) {
- bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
- advanced_bio = true;
+ } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
+ /*
+ * Try to read from write buffer. The address is later
+ * checked on the write buffer to prevent retrieving
+ * overwritten data.
+ */
+ if (!pblk_read_from_cache(pblk, bio, lba,
+ rqd->ppa_list[i])) {
+ if (i == 0) {
+ /*
+ * We didn't call with bio_advance()
+ * yet, so we can just retry.
+ */
+ goto retry;
+ } else {
+ /*
+ * We already call bio_advance()
+ * so we cannot retry and we need
+ * to quit that function in order
+ * to allow caller to handle the bio
+ * splitting in the current sector
+ * position.
+ */
+ nr_secs = i;
+ goto end;
+ }
}
-
- goto next;
- }
-
- /* Try to read from write buffer. The address is later checked
- * on the write buffer to prevent retrieving overwritten data.
- */
- if (pblk_addr_in_cache(p)) {
- if (!pblk_read_from_cache(pblk, bio, lba, p, i,
- advanced_bio)) {
- pblk_lookup_l2p_seq(pblk, &p, lba, 1);
- goto retry;
- }
- WARN_ON(test_and_set_bit(i, read_bitmap));
meta->lba = cpu_to_le64(lba);
- advanced_bio = true;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
- } else {
- /* Read from media non-cached sectors */
- rqd->ppa_list[j++] = p;
}
-
-next:
- if (advanced_bio)
- bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
+ bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
}
+end:
if (pblk_io_aligned(pblk, nr_secs))
rqd->is_seq = 1;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads);
#endif
+
+ return nr_secs;
}
@@ -175,12 +176,12 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
}
-static void pblk_end_user_read(struct bio *bio)
+static void pblk_end_user_read(struct bio *bio, int error)
{
-#ifdef CONFIG_NVM_PBLK_DEBUG
- WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
-#endif
- bio_endio(bio);
+ if (error && error != NVM_RSP_WARN_HIGHECC)
+ bio_io_error(bio);
+ else
+ bio_endio(bio);
}
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
@@ -197,9 +198,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
pblk_log_read_err(pblk, rqd);
pblk_read_check_seq(pblk, rqd, r_ctx->lba);
-
- if (int_bio)
- bio_put(int_bio);
+ bio_put(int_bio);
if (put_line)
pblk_rq_to_line_put(pblk, rqd);
@@ -219,188 +218,17 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = (struct bio *)r_ctx->private;
- pblk_end_user_read(bio);
+ pblk_end_user_read(bio, rqd->error);
__pblk_end_io_read(pblk, rqd, true);
}
-static void pblk_end_partial_read(struct nvm_rq *rqd)
-{
- struct pblk *pblk = rqd->private;
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
- struct pblk_pr_ctx *pr_ctx = r_ctx->private;
- struct pblk_sec_meta *meta;
- struct bio *new_bio = rqd->bio;
- struct bio *bio = pr_ctx->orig_bio;
- void *meta_list = rqd->meta_list;
- unsigned long *read_bitmap = pr_ctx->bitmap;
- struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
- struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
- int nr_secs = pr_ctx->orig_nr_secs;
- int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
- void *src_p, *dst_p;
- int bit, i;
-
- if (unlikely(nr_holes == 1)) {
- struct ppa_addr ppa;
-
- ppa = rqd->ppa_addr;
- rqd->ppa_list = pr_ctx->ppa_ptr;
- rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
- rqd->ppa_list[0] = ppa;
- }
-
- for (i = 0; i < nr_secs; i++) {
- meta = pblk_get_meta(pblk, meta_list, i);
- pr_ctx->lba_list_media[i] = le64_to_cpu(meta->lba);
- meta->lba = cpu_to_le64(pr_ctx->lba_list_mem[i]);
- }
-
- /* Fill the holes in the original bio */
- i = 0;
- for (bit = 0; bit < nr_secs; bit++) {
- if (!test_bit(bit, read_bitmap)) {
- struct bio_vec dst_bv, src_bv;
- struct pblk_line *line;
-
- line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
- kref_put(&line->ref, pblk_line_put);
-
- meta = pblk_get_meta(pblk, meta_list, bit);
- meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
-
- dst_bv = bio_iter_iovec(bio, orig_iter);
- src_bv = bio_iter_iovec(new_bio, new_iter);
-
- src_p = kmap_atomic(src_bv.bv_page);
- dst_p = kmap_atomic(dst_bv.bv_page);
-
- memcpy(dst_p + dst_bv.bv_offset,
- src_p + src_bv.bv_offset,
- PBLK_EXPOSED_PAGE_SIZE);
-
- kunmap_atomic(src_p);
- kunmap_atomic(dst_p);
-
- flush_dcache_page(dst_bv.bv_page);
- mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
-
- bio_advance_iter(new_bio, &new_iter,
- PBLK_EXPOSED_PAGE_SIZE);
- i++;
- }
- bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
- }
-
- bio_put(new_bio);
- kfree(pr_ctx);
-
- /* restore original request */
- rqd->bio = NULL;
- rqd->nr_ppas = nr_secs;
-
- bio_endio(bio);
- __pblk_end_io_read(pblk, rqd, false);
-}
-
-static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap,
- int nr_holes)
-{
- void *meta_list = rqd->meta_list;
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
- struct pblk_pr_ctx *pr_ctx;
- struct bio *new_bio, *bio = r_ctx->private;
- int nr_secs = rqd->nr_ppas;
- int i;
-
- new_bio = bio_alloc(GFP_KERNEL, nr_holes);
-
- if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
- goto fail_bio_put;
-
- if (nr_holes != new_bio->bi_vcnt) {
- WARN_ONCE(1, "pblk: malformed bio\n");
- goto fail_free_pages;
- }
-
- pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
- if (!pr_ctx)
- goto fail_free_pages;
-
- for (i = 0; i < nr_secs; i++) {
- struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-
- pr_ctx->lba_list_mem[i] = le64_to_cpu(meta->lba);
- }
-
- new_bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
-
- rqd->bio = new_bio;
- rqd->nr_ppas = nr_holes;
-
- pr_ctx->orig_bio = bio;
- bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
- pr_ctx->bio_init_idx = bio_init_idx;
- pr_ctx->orig_nr_secs = nr_secs;
- r_ctx->private = pr_ctx;
-
- if (unlikely(nr_holes == 1)) {
- pr_ctx->ppa_ptr = rqd->ppa_list;
- pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
- rqd->ppa_addr = rqd->ppa_list[0];
- }
- return 0;
-
-fail_free_pages:
- pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
-fail_bio_put:
- bio_put(new_bio);
-
- return -ENOMEM;
-}
-
-static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap, int nr_secs)
-{
- int nr_holes;
- int ret;
-
- nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
-
- if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
- nr_holes))
- return NVM_IO_ERR;
-
- rqd->end_io = pblk_end_partial_read;
-
- ret = pblk_submit_io(pblk, rqd);
- if (ret) {
- bio_put(rqd->bio);
- pblk_err(pblk, "partial read IO submission failed\n");
- goto err;
- }
-
- return NVM_IO_OK;
-
-err:
- pblk_err(pblk, "failed to perform partial read\n");
-
- /* Free allocated pages in new bio */
- pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
- __pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_ERR;
-}
-
static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
- sector_t lba, unsigned long *read_bitmap)
+ sector_t lba, bool *from_cache)
{
struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
struct ppa_addr ppa;
- pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
+ pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->inflight_reads);
@@ -410,7 +238,6 @@ retry:
if (pblk_ppa_empty(ppa)) {
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
- WARN_ON(test_and_set_bit(0, read_bitmap));
meta->lba = addr_empty;
return;
}
@@ -419,12 +246,11 @@ retry:
* write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(ppa)) {
- if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
- pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
+ if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
+ pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
goto retry;
}
- WARN_ON(test_and_set_bit(0, read_bitmap));
meta->lba = cpu_to_le64(lba);
#ifdef CONFIG_NVM_PBLK_DEBUG
@@ -435,95 +261,92 @@ retry:
}
}
-int pblk_submit_read(struct pblk *pblk, struct bio *bio)
+void pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
+ bool from_cache;
struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
- unsigned int bio_init_idx;
- DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
- int ret = NVM_IO_ERR;
+ struct bio *int_bio, *split_bio;
generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
&pblk->disk->part0);
- bitmap_zero(read_bitmap, nr_secs);
-
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
rqd->opcode = NVM_OP_PREAD;
rqd->nr_ppas = nr_secs;
- rqd->bio = NULL; /* cloned bio if needed */
rqd->private = pblk;
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
r_ctx->start_time = jiffies;
r_ctx->lba = blba;
- r_ctx->private = bio; /* original bio */
- /* Save the index for this bio's start. This is needed in case
- * we need to fill a partial read.
- */
- bio_init_idx = pblk_get_bi_idx(bio);
+ if (pblk_alloc_rqd_meta(pblk, rqd)) {
+ bio_io_error(bio);
+ pblk_free_rqd(pblk, rqd, PBLK_READ);
+ return;
+ }
- if (pblk_alloc_rqd_meta(pblk, rqd))
- goto fail_rqd_free;
+ /* Clone read bio to deal internally with:
+ * -read errors when reading from drive
+ * -bio_advance() calls during cache reads
+ */
+ int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
if (nr_secs > 1)
- pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
+ nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
+ &from_cache);
else
- pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
+ pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
- if (bitmap_full(read_bitmap, nr_secs)) {
+split_retry:
+ r_ctx->private = bio; /* original bio */
+ rqd->bio = int_bio; /* internal bio */
+
+ if (from_cache && nr_secs == rqd->nr_ppas) {
+ /* All data was read from cache, we can complete the IO. */
+ pblk_end_user_read(bio, 0);
atomic_inc(&pblk->inflight_io);
__pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_DONE;
- }
-
- /* All sectors are to be read from the device */
- if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
- struct bio *int_bio = NULL;
+ } else if (nr_secs != rqd->nr_ppas) {
+ /* The read bio request could be partially filled by the write
+ * buffer, but there are some holes that need to be read from
+ * the drive. In order to handle this, we will use block layer
+ * mechanism to split this request in to smaller ones and make
+ * a chain of it.
+ */
+ split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
+ &pblk_bio_set);
+ bio_chain(split_bio, bio);
+ generic_make_request(bio);
+
+ /* New bio contains first N sectors of the previous one, so
+ * we can continue to use existing rqd, but we need to shrink
+ * the number of PPAs in it. New bio is also guaranteed that
+ * it contains only either data from cache or from drive, newer
+ * mix of them.
+ */
+ bio = split_bio;
+ rqd->nr_ppas = nr_secs;
+ if (rqd->nr_ppas == 1)
+ rqd->ppa_addr = rqd->ppa_list[0];
- /* Clone read bio to deal with read errors internally */
+ /* Recreate int_bio - existing might have some needed internal
+ * fields modified already.
+ */
+ bio_put(int_bio);
int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
- if (!int_bio) {
- pblk_err(pblk, "could not clone read bio\n");
- goto fail_end_io;
- }
-
- rqd->bio = int_bio;
-
- if (pblk_submit_io(pblk, rqd)) {
- pblk_err(pblk, "read IO submission failed\n");
- ret = NVM_IO_ERR;
- goto fail_end_io;
- }
-
- return NVM_IO_OK;
+ goto split_retry;
+ } else if (pblk_submit_io(pblk, rqd)) {
+ /* Submitting IO to drive failed, let's report an error */
+ rqd->error = -ENODEV;
+ pblk_end_io_read(rqd);
}
-
- /* The read bio request could be partially filled by the write buffer,
- * but there are some holes that need to be read from the drive.
- */
- ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
- nr_secs);
- if (ret)
- goto fail_meta_free;
-
- return NVM_IO_OK;
-
-fail_meta_free:
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
-fail_rqd_free:
- pblk_free_rqd(pblk, rqd, PBLK_READ);
- return ret;
-fail_end_io:
- __pblk_end_io_read(pblk, rqd, false);
- return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
@@ -568,7 +391,7 @@ static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
goto out;
/* logic error: lba out-of-bounds */
- if (lba >= pblk->rl.nr_secs) {
+ if (lba >= pblk->capacity) {
WARN(1, "pblk: read lba out of bounds\n");
goto out;
}
@@ -642,7 +465,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
if (pblk_submit_io_sync(pblk, &rqd)) {
ret = -EIO;
- pblk_err(pblk, "GC read request failed\n");
goto err_free_bio;
}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index d86f580036d3..e6dda04de144 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -93,10 +93,24 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
u64 written_secs)
{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
int i;
for (i = 0; i < written_secs; i += pblk->min_write_pgs)
- pblk_alloc_page(pblk, line, pblk->min_write_pgs);
+ __pblk_alloc_page(pblk, line, pblk->min_write_pgs);
+
+ spin_lock(&l_mg->free_lock);
+ if (written_secs > line->left_msecs) {
+ /*
+ * We have all data sectors written
+ * and some emeta sectors written too.
+ */
+ line->left_msecs = 0;
+ } else {
+ /* We have only some data sectors written. */
+ line->left_msecs -= written_secs;
+ }
+ spin_unlock(&l_mg->free_lock);
}
static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
@@ -165,6 +179,7 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
struct pblk_pad_rq *pad_rq;
struct nvm_rq *rqd;
struct bio *bio;
+ struct ppa_addr *ppa_list;
void *data;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
u64 w_ptr = line->cur_sec;
@@ -194,7 +209,7 @@ next_pad_rq:
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
if (rq_ppas < pblk->min_write_pgs) {
pblk_err(pblk, "corrupted pad line %d\n", line->id);
- goto fail_free_pad;
+ goto fail_complete;
}
rq_len = rq_ppas * geo->csecs;
@@ -203,7 +218,7 @@ next_pad_rq:
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto fail_free_pad;
+ goto fail_complete;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
@@ -212,8 +227,11 @@ next_pad_rq:
rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
ret = pblk_alloc_rqd_meta(pblk, rqd);
- if (ret)
- goto fail_free_rqd;
+ if (ret) {
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
+ bio_put(bio);
+ goto fail_complete;
+ }
rqd->bio = bio;
rqd->opcode = NVM_OP_PWRITE;
@@ -222,6 +240,7 @@ next_pad_rq:
rqd->end_io = pblk_end_io_recov;
rqd->private = pad_rq;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
meta_list = rqd->meta_list;
for (i = 0; i < rqd->nr_ppas; ) {
@@ -249,18 +268,21 @@ next_pad_rq:
lba_list[w_ptr] = addr_empty;
meta = pblk_get_meta(pblk, meta_list, i);
meta->lba = addr_empty;
- rqd->ppa_list[i] = dev_ppa;
+ ppa_list[i] = dev_ppa;
}
}
kref_get(&pad_rq->ref);
- pblk_down_chunk(pblk, rqd->ppa_list[0]);
+ pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io(pblk, rqd);
if (ret) {
pblk_err(pblk, "I/O submission failed: %d\n", ret);
- pblk_up_chunk(pblk, rqd->ppa_list[0]);
- goto fail_free_rqd;
+ pblk_up_chunk(pblk, ppa_list[0]);
+ kref_put(&pad_rq->ref, pblk_recov_complete);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
+ bio_put(bio);
+ goto fail_complete;
}
left_line_ppas -= rq_ppas;
@@ -268,13 +290,9 @@ next_pad_rq:
if (left_ppas && left_line_ppas)
goto next_pad_rq;
+fail_complete:
kref_put(&pad_rq->ref, pblk_recov_complete);
-
- if (!wait_for_completion_io_timeout(&pad_rq->wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pblk_err(pblk, "pad write timed out\n");
- ret = -ETIME;
- }
+ wait_for_completion(&pad_rq->wait);
if (!pblk_line_is_full(line))
pblk_err(pblk, "corrupted padded line: %d\n", line->id);
@@ -283,14 +301,6 @@ next_pad_rq:
free_rq:
kfree(pad_rq);
return ret;
-
-fail_free_rqd:
- pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
- bio_put(bio);
-fail_free_pad:
- kfree(pad_rq);
- vfree(data);
- return ret;
}
static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
@@ -412,6 +422,7 @@ retry_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
if (pblk_io_aligned(pblk, rq_ppas))
rqd->is_seq = 1;
@@ -430,7 +441,7 @@ retry_rq:
}
for (j = 0; j < pblk->min_write_pgs; j++, i++)
- rqd->ppa_list[i] =
+ ppa_list[i] =
addr_to_gen_ppa(pblk, paddr + j, line->id);
}
@@ -444,7 +455,7 @@ retry_rq:
atomic_dec(&pblk->inflight_io);
/* If a read fails, do a best effort by padding the line and retrying */
- if (rqd->error) {
+ if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
int pad_distance, ret;
if (padded) {
@@ -474,11 +485,11 @@ retry_rq:
lba_list[paddr++] = cpu_to_le64(lba);
- if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
+ if (lba == ADDR_EMPTY || lba >= pblk->capacity)
continue;
line->nr_valid_lbas++;
- pblk_update_map(pblk, lba, rqd->ppa_list[i]);
+ pblk_update_map(pblk, lba, ppa_list[i]);
}
left_ppas -= rq_ppas;
@@ -647,10 +658,12 @@ static int pblk_line_was_written(struct pblk_line *line,
bppa = pblk->luns[smeta_blk].bppa;
chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
- if (chunk->state & NVM_CHK_ST_FREE)
- return 0;
+ if (chunk->state & NVM_CHK_ST_CLOSED ||
+ (chunk->state & NVM_CHK_ST_OPEN
+ && chunk->wp >= lm->smeta_sec))
+ return 1;
- return 1;
+ return 0;
}
static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
@@ -844,6 +857,7 @@ next:
spin_unlock(&l_mg->free_lock);
} else {
spin_lock(&l_mg->free_lock);
+ l_mg->data_line = data_line;
/* Allocate next line for preparation */
l_mg->data_next = pblk_line_get(pblk);
if (l_mg->data_next) {
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 6593deab52da..4e63f9b5954c 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -228,6 +228,7 @@ static void pblk_submit_rec(struct work_struct *work)
mempool_free(recovery, &pblk->rec_pool);
atomic_dec(&pblk->inflight_io);
+ pblk_write_kick(pblk);
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index ac3ab778e976..a67855387f53 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -43,8 +43,6 @@
#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
-#define PBLK_COMMAND_TIMEOUT_MS 30000
-
/* Max 512 LUNs per device */
#define PBLK_MAX_LUNS_BITMAP (4)
@@ -123,18 +121,6 @@ struct pblk_g_ctx {
u64 lba;
};
-/* partial read context */
-struct pblk_pr_ctx {
- struct bio *orig_bio;
- DECLARE_BITMAP(bitmap, NVM_MAX_VLBA);
- unsigned int orig_nr_secs;
- unsigned int bio_init_idx;
- void *ppa_ptr;
- dma_addr_t dma_ppa_list;
- u64 lba_list_mem[NVM_MAX_VLBA];
- u64 lba_list_media[NVM_MAX_VLBA];
-};
-
/* Pad context */
struct pblk_pad_rq {
struct pblk *pblk;
@@ -305,7 +291,6 @@ struct pblk_rl {
struct timer_list u_timer;
- unsigned long long nr_secs;
unsigned long total_blocks;
atomic_t free_blocks; /* Total number of free blocks (+ OP) */
@@ -440,6 +425,7 @@ struct pblk_smeta {
struct pblk_w_err_gc {
int has_write_err;
+ int has_gc_err;
__le64 *lba_list;
};
@@ -465,7 +451,6 @@ struct pblk_line {
int meta_line; /* Metadata line id */
int meta_distance; /* Distance between data and metadata */
- u64 smeta_ssec; /* Sector where smeta starts */
u64 emeta_ssec; /* Sector where emeta starts */
unsigned int sec_in_line; /* Number of usable secs in line */
@@ -762,7 +747,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
unsigned int pos, unsigned int nr_entries,
unsigned int count);
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter, bool advanced_bio);
+ struct ppa_addr ppa);
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
@@ -862,15 +847,15 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
struct pblk_line *gc_line, u64 paddr);
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
u64 *lba_list, int nr_secs);
-void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
- sector_t blba, int nr_secs);
+int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
+ sector_t blba, int nr_secs, bool *from_cache);
void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
/*
* pblk user I/O write path
*/
-int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
+void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
unsigned long flags);
int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
@@ -896,7 +881,7 @@ void pblk_write_kick(struct pblk *pblk);
* pblk read path
*/
extern struct bio_set pblk_bio_set;
-int pblk_submit_read(struct pblk *pblk, struct bio *bio);
+void pblk_submit_read(struct pblk *pblk, struct bio *bio);
int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk recovery
@@ -921,6 +906,7 @@ void pblk_gc_free_full_lines(struct pblk *pblk);
void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
int *gc_active);
int pblk_gc_sysfs_force(struct pblk *pblk, int force);
+void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line);
/*
* pblk rate limiter
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 22811784dc7d..00d5219094e5 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 2557f198e175..db269a348b20 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -436,6 +436,15 @@ config DM_DELAY
If unsure, say N.
+config DM_DUST
+ tristate "Bad sector simulation target"
+ depends on BLK_DEV_DM
+ ---help---
+ A target that simulates bad sector behavior.
+ Useful for testing.
+
+ If unsure, say N.
+
config DM_INIT
bool "DM \"dm-mod.create=\" parameter support"
depends on BLK_DEV_DM=y
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index a52b703e588e..be7a6eb92abc 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
+obj-$(CONFIG_DM_DUST) += dm-dust.o
obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6fc93834da44..151aa95775be 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1167,11 +1167,18 @@ static int __load_discards(struct dm_cache_metadata *cmd,
if (r)
return r;
- for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+ for (b = 0; ; b++) {
r = fn(context, cmd->discard_block_size, to_dblock(b),
dm_bitset_cursor_get_value(&c));
if (r)
break;
+
+ if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
+ break;
+
+ r = dm_bitset_cursor_next(&c);
+ if (r)
+ break;
}
dm_bitset_cursor_end(&c);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 7f6462f74ac8..1b16d34bb785 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -946,6 +946,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
{
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
+ struct mapped_device *md = dm_table_get_md(ti->table);
/* From now we require underlying device with our integrity profile */
if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
@@ -965,7 +966,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
if (crypt_integrity_aead(cc)) {
cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
- DMINFO("Integrity AEAD, tag size %u, IV size %u.",
+ DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
cc->integrity_tag_size, cc->integrity_iv_size);
if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
@@ -973,7 +974,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
return -EINVAL;
}
} else if (cc->integrity_iv_size)
- DMINFO("Additional per-sector space %u bytes for IV.",
+ DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
cc->integrity_iv_size);
if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
@@ -1031,11 +1032,11 @@ static u8 *org_iv_of_dmreq(struct crypt_config *cc,
return iv_of_dmreq(cc, dmreq) + cc->iv_size;
}
-static uint64_t *org_sector_of_dmreq(struct crypt_config *cc,
+static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
- return (uint64_t*) ptr;
+ return (__le64 *) ptr;
}
static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
@@ -1071,7 +1072,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq;
u8 *iv, *org_iv, *tag_iv, *tag;
- uint64_t *sector;
+ __le64 *sector;
int r = 0;
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
@@ -1143,9 +1144,11 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
r = crypto_aead_decrypt(req);
}
- if (r == -EBADMSG)
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
+ if (r == -EBADMSG) {
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
(unsigned long long)le64_to_cpu(*sector));
+ }
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
@@ -1166,7 +1169,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
struct scatterlist *sg_in, *sg_out;
struct dm_crypt_request *dmreq;
u8 *iv, *org_iv, *tag_iv;
- uint64_t *sector;
+ __le64 *sector;
int r = 0;
/* Reject unexpected unaligned bio. */
@@ -1788,7 +1791,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
if (error == -EBADMSG) {
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
(unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
io->error = BLK_STS_PROTECTION;
} else if (error < 0)
@@ -1887,7 +1891,7 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
* algorithm implementation is used. Help people debug performance
* problems by logging the ->cra_driver_name.
*/
- DMINFO("%s using implementation \"%s\"", ciphermode,
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
return 0;
}
@@ -1907,7 +1911,7 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
return err;
}
- DMINFO("%s using implementation \"%s\"", ciphermode,
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
return 0;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index fddffe251bf6..f496213f8b67 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- destroy_workqueue(dc->kdelayd_wq);
+ if (dc->kdelayd_wq)
+ destroy_workqueue(dc->kdelayd_wq);
if (dc->read.dev)
dm_put_device(ti, dc->read.dev);
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
new file mode 100644
index 000000000000..845f376a72d9
--- /dev/null
+++ b/drivers/md/dm-dust.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This is a test "dust" device, which fails reads on specified
+ * sectors, emulating the behavior of a hard disk drive sending
+ * a "Read Medium Error" sense.
+ *
+ */
+
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+
+#define DM_MSG_PREFIX "dust"
+
+struct badblock {
+ struct rb_node node;
+ sector_t bb;
+};
+
+struct dust_device {
+ struct dm_dev *dev;
+ struct rb_root badblocklist;
+ unsigned long long badblock_count;
+ spinlock_t dust_lock;
+ unsigned int blksz;
+ unsigned int sect_per_block;
+ sector_t start;
+ bool fail_read_on_bb:1;
+ bool quiet_mode:1;
+};
+
+static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct badblock *bblk = rb_entry(node, struct badblock, node);
+
+ if (bblk->bb > blk)
+ node = node->rb_left;
+ else if (bblk->bb < blk)
+ node = node->rb_right;
+ else
+ return bblk;
+ }
+
+ return NULL;
+}
+
+static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
+{
+ struct badblock *bblk;
+ struct rb_node **link = &root->rb_node, *parent = NULL;
+ sector_t value = new->bb;
+
+ while (*link) {
+ parent = *link;
+ bblk = rb_entry(parent, struct badblock, node);
+
+ if (bblk->bb > value)
+ link = &(*link)->rb_left;
+ else if (bblk->bb < value)
+ link = &(*link)->rb_right;
+ else
+ return false;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, root);
+
+ return true;
+}
+
+static int dust_remove_block(struct dust_device *dd, unsigned long long block)
+{
+ struct badblock *bblock;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+
+ if (bblock == NULL) {
+ if (!dd->quiet_mode) {
+ DMERR("%s: block %llu not found in badblocklist",
+ __func__, block);
+ }
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ return -EINVAL;
+ }
+
+ rb_erase(&bblock->node, &dd->badblocklist);
+ dd->badblock_count--;
+ if (!dd->quiet_mode)
+ DMINFO("%s: badblock removed at block %llu", __func__, block);
+ kfree(bblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ return 0;
+}
+
+static int dust_add_block(struct dust_device *dd, unsigned long long block)
+{
+ struct badblock *bblock;
+ unsigned long flags;
+
+ bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
+ if (bblock == NULL) {
+ if (!dd->quiet_mode)
+ DMERR("%s: badblock allocation failed", __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ bblock->bb = block * dd->sect_per_block;
+ if (!dust_rb_insert(&dd->badblocklist, bblock)) {
+ if (!dd->quiet_mode) {
+ DMERR("%s: block %llu already in badblocklist",
+ __func__, block);
+ }
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ kfree(bblock);
+ return -EINVAL;
+ }
+
+ dd->badblock_count++;
+ if (!dd->quiet_mode)
+ DMINFO("%s: badblock added at block %llu", __func__, block);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ return 0;
+}
+
+static int dust_query_block(struct dust_device *dd, unsigned long long block)
+{
+ struct badblock *bblock;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+ if (bblock != NULL)
+ DMINFO("%s: block %llu found in badblocklist", __func__, block);
+ else
+ DMINFO("%s: block %llu not found in badblocklist", __func__, block);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ return 0;
+}
+
+static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
+{
+ struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
+
+ if (bblk)
+ return DM_MAPIO_KILL;
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int dust_map_read(struct dust_device *dd, sector_t thisblock,
+ bool fail_read_on_bb)
+{
+ unsigned long flags;
+ int ret = DM_MAPIO_REMAPPED;
+
+ if (fail_read_on_bb) {
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ ret = __dust_map_read(dd, thisblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ }
+
+ return ret;
+}
+
+static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
+{
+ struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
+
+ if (bblk) {
+ rb_erase(&bblk->node, &dd->badblocklist);
+ dd->badblock_count--;
+ kfree(bblk);
+ if (!dd->quiet_mode) {
+ sector_div(thisblock, dd->sect_per_block);
+ DMINFO("block %llu removed from badblocklist by write",
+ (unsigned long long)thisblock);
+ }
+ }
+}
+
+static int dust_map_write(struct dust_device *dd, sector_t thisblock,
+ bool fail_read_on_bb)
+{
+ unsigned long flags;
+
+ if (fail_read_on_bb) {
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ __dust_map_write(dd, thisblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ }
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int dust_map(struct dm_target *ti, struct bio *bio)
+{
+ struct dust_device *dd = ti->private;
+ int ret;
+
+ bio_set_dev(bio, dd->dev->bdev);
+ bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
+
+ if (bio_data_dir(bio) == READ)
+ ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+ else
+ ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+
+ return ret;
+}
+
+static bool __dust_clear_badblocks(struct rb_root *tree,
+ unsigned long long count)
+{
+ struct rb_node *node = NULL, *nnode = NULL;
+
+ nnode = rb_first(tree);
+ if (nnode == NULL) {
+ BUG_ON(count != 0);
+ return false;
+ }
+
+ while (nnode) {
+ node = nnode;
+ nnode = rb_next(node);
+ rb_erase(node, tree);
+ count--;
+ kfree(node);
+ }
+ BUG_ON(count != 0);
+ BUG_ON(tree->rb_node != NULL);
+
+ return true;
+}
+
+static int dust_clear_badblocks(struct dust_device *dd)
+{
+ unsigned long flags;
+ struct rb_root badblocklist;
+ unsigned long long badblock_count;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ badblocklist = dd->badblocklist;
+ badblock_count = dd->badblock_count;
+ dd->badblocklist = RB_ROOT;
+ dd->badblock_count = 0;
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ if (!__dust_clear_badblocks(&badblocklist, badblock_count))
+ DMINFO("%s: no badblocks found", __func__);
+ else
+ DMINFO("%s: badblocks cleared", __func__);
+
+ return 0;
+}
+
+/*
+ * Target parameters:
+ *
+ * <device_path> <offset> <blksz>
+ *
+ * device_path: path to the block device
+ * offset: offset to data area from start of device_path
+ * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
+ */
+static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct dust_device *dd;
+ unsigned long long tmp;
+ char dummy;
+ unsigned int blksz;
+ unsigned int sect_per_block;
+ sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
+ sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
+
+ if (argc != 3) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
+ ti->error = "Invalid block size parameter";
+ return -EINVAL;
+ }
+
+ if (blksz < 512) {
+ ti->error = "Block size must be at least 512";
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(blksz)) {
+ ti->error = "Block size must be a power of 2";
+ return -EINVAL;
+ }
+
+ if (to_sector(blksz) > max_block_sectors) {
+ ti->error = "Block size is too large";
+ return -EINVAL;
+ }
+
+ sect_per_block = (blksz >> SECTOR_SHIFT);
+
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
+ ti->error = "Invalid device offset sector";
+ return -EINVAL;
+ }
+
+ dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
+ if (dd == NULL) {
+ ti->error = "Cannot allocate context";
+ return -ENOMEM;
+ }
+
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
+ ti->error = "Device lookup failed";
+ kfree(dd);
+ return -EINVAL;
+ }
+
+ dd->sect_per_block = sect_per_block;
+ dd->blksz = blksz;
+ dd->start = tmp;
+
+ /*
+ * Whether to fail a read on a "bad" block.
+ * Defaults to false; enabled later by message.
+ */
+ dd->fail_read_on_bb = false;
+
+ /*
+ * Initialize bad block list rbtree.
+ */
+ dd->badblocklist = RB_ROOT;
+ dd->badblock_count = 0;
+ spin_lock_init(&dd->dust_lock);
+
+ dd->quiet_mode = false;
+
+ BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
+
+ ti->num_discard_bios = 1;
+ ti->num_flush_bios = 1;
+ ti->private = dd;
+
+ return 0;
+}
+
+static void dust_dtr(struct dm_target *ti)
+{
+ struct dust_device *dd = ti->private;
+
+ __dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
+ dm_put_device(ti, dd->dev);
+ kfree(dd);
+}
+
+static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result_buf, unsigned int maxlen)
+{
+ struct dust_device *dd = ti->private;
+ sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ bool invalid_msg = false;
+ int result = -EINVAL;
+ unsigned long long tmp, block;
+ unsigned long flags;
+ char dummy;
+
+ if (argc == 1) {
+ if (!strcasecmp(argv[0], "addbadblock") ||
+ !strcasecmp(argv[0], "removebadblock") ||
+ !strcasecmp(argv[0], "queryblock")) {
+ DMERR("%s requires an additional argument", argv[0]);
+ } else if (!strcasecmp(argv[0], "disable")) {
+ DMINFO("disabling read failures on bad sectors");
+ dd->fail_read_on_bb = false;
+ result = 0;
+ } else if (!strcasecmp(argv[0], "enable")) {
+ DMINFO("enabling read failures on bad sectors");
+ dd->fail_read_on_bb = true;
+ result = 0;
+ } else if (!strcasecmp(argv[0], "countbadblocks")) {
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ DMINFO("countbadblocks: %llu badblock(s) found",
+ dd->badblock_count);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ result = 0;
+ } else if (!strcasecmp(argv[0], "clearbadblocks")) {
+ result = dust_clear_badblocks(dd);
+ } else if (!strcasecmp(argv[0], "quiet")) {
+ if (!dd->quiet_mode)
+ dd->quiet_mode = true;
+ else
+ dd->quiet_mode = false;
+ result = 0;
+ } else {
+ invalid_msg = true;
+ }
+ } else if (argc == 2) {
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
+ return result;
+
+ block = tmp;
+ sector_div(size, dd->sect_per_block);
+ if (block > size) {
+ DMERR("selected block value out of range");
+ return result;
+ }
+
+ if (!strcasecmp(argv[0], "addbadblock"))
+ result = dust_add_block(dd, block);
+ else if (!strcasecmp(argv[0], "removebadblock"))
+ result = dust_remove_block(dd, block);
+ else if (!strcasecmp(argv[0], "queryblock"))
+ result = dust_query_block(dd, block);
+ else
+ invalid_msg = true;
+
+ } else
+ DMERR("invalid number of arguments '%d'", argc);
+
+ if (invalid_msg)
+ DMERR("unrecognized message '%s' received", argv[0]);
+
+ return result;
+}
+
+static void dust_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct dust_device *dd = ti->private;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%s %s %s", dd->dev->name,
+ dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
+ dd->quiet_mode ? "quiet" : "verbose");
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %llu %u", dd->dev->name,
+ (unsigned long long)dd->start, dd->blksz);
+ break;
+ }
+}
+
+static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+{
+ struct dust_device *dd = ti->private;
+ struct dm_dev *dev = dd->dev;
+
+ *bdev = dev->bdev;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (dd->start ||
+ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ return 1;
+
+ return 0;
+}
+
+static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
+ void *data)
+{
+ struct dust_device *dd = ti->private;
+
+ return fn(ti, dd->dev, dd->start, ti->len, data);
+}
+
+static struct target_type dust_target = {
+ .name = "dust",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = dust_ctr,
+ .dtr = dust_dtr,
+ .iterate_devices = dust_iterate_devices,
+ .map = dust_map,
+ .message = dust_message,
+ .status = dust_status,
+ .prepare_ioctl = dust_prepare_ioctl,
+};
+
+static int __init dm_dust_init(void)
+{
+ int result = dm_register_target(&dust_target);
+
+ if (result < 0)
+ DMERR("dm_register_target failed %d", result);
+
+ return result;
+}
+
+static void __exit dm_dust_exit(void)
+{
+ dm_unregister_target(&dust_target);
+}
+
+module_init(dm_dust_init);
+module_exit(dm_dust_exit);
+
+MODULE_DESCRIPTION(DM_NAME " dust test target");
+MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 721efc493942..3f4139ac1f60 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -11,6 +11,7 @@
#define _LINUX_DM_EXCEPTION_STORE
#include <linux/blkdev.h>
+#include <linux/list_bl.h>
#include <linux/device-mapper.h>
/*
@@ -27,7 +28,7 @@ typedef sector_t chunk_t;
* chunk within the device.
*/
struct dm_exception {
- struct list_head hash_list;
+ struct hlist_bl_node hash_list;
chunk_t old_chunk;
chunk_t new_chunk;
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index 4b76f84424c3..352e803f566e 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -160,7 +160,7 @@ static int __init dm_parse_table(struct dm_device *dev, char *str)
while (table_entry) {
DMDEBUG("parsing table \"%s\"", str);
- if (++dev->dmi.target_count >= DM_MAX_TARGETS) {
+ if (++dev->dmi.target_count > DM_MAX_TARGETS) {
DMERR("too many targets %u > %d",
dev->dmi.target_count, DM_MAX_TARGETS);
return -EINVAL;
@@ -242,9 +242,9 @@ static int __init dm_parse_devices(struct list_head *devices, char *str)
return -ENOMEM;
list_add_tail(&dev->list, devices);
- if (++ndev >= DM_MAX_DEVICES) {
- DMERR("too many targets %u > %d",
- dev->dmi.target_count, DM_MAX_TARGETS);
+ if (++ndev > DM_MAX_DEVICES) {
+ DMERR("too many devices %lu > %d",
+ ndev, DM_MAX_DEVICES);
return -EINVAL;
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c27c32cf4a30..44e76cda087a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -15,6 +15,7 @@
#include <linux/rbtree.h>
#include <linux/delay.h>
#include <linux/random.h>
+#include <linux/reboot.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/async_tx.h>
@@ -24,6 +25,7 @@
#define DEFAULT_INTERLEAVE_SECTORS 32768
#define DEFAULT_JOURNAL_SIZE_FACTOR 7
+#define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
#define DEFAULT_BUFFER_SECTORS 128
#define DEFAULT_JOURNAL_WATERMARK 50
#define DEFAULT_SYNC_MSEC 10000
@@ -33,6 +35,8 @@
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
#define RECALC_SECTORS 8192
#define RECALC_WRITE_SUPER 16
+#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
+#define BITMAP_FLUSH_INTERVAL (10 * HZ)
/*
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
@@ -48,6 +52,7 @@
#define SB_MAGIC "integrt"
#define SB_VERSION_1 1
#define SB_VERSION_2 2
+#define SB_VERSION_3 3
#define SB_SECTORS 8
#define MAX_SECTORS_PER_BLOCK 8
@@ -60,12 +65,14 @@ struct superblock {
__u64 provided_data_sectors; /* userspace uses this value */
__u32 flags;
__u8 log2_sectors_per_block;
- __u8 pad[3];
+ __u8 log2_blocks_per_bitmap_bit;
+ __u8 pad[2];
__u64 recalc_sector;
};
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
#define SB_FLAG_RECALCULATING 0x2
+#define SB_FLAG_DIRTY_BITMAP 0x4
#define JOURNAL_ENTRY_ROUNDUP 8
@@ -151,9 +158,18 @@ struct dm_integrity_c {
struct workqueue_struct *metadata_wq;
struct superblock *sb;
unsigned journal_pages;
+ unsigned n_bitmap_blocks;
+
struct page_list *journal;
struct page_list *journal_io;
struct page_list *journal_xor;
+ struct page_list *recalc_bitmap;
+ struct page_list *may_write_bitmap;
+ struct bitmap_block_status *bbs;
+ unsigned bitmap_flush_interval;
+ int synchronous_mode;
+ struct bio_list synchronous_bios;
+ struct delayed_work bitmap_flush_work;
struct crypto_skcipher *journal_crypt;
struct scatterlist **journal_scatterlist;
@@ -180,6 +196,7 @@ struct dm_integrity_c {
__s8 log2_metadata_run;
__u8 log2_buffer_sectors;
__u8 sectors_per_block;
+ __u8 log2_blocks_per_bitmap_bit;
unsigned char mode;
int suspending;
@@ -232,17 +249,20 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
+ bool recalculate_flag;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
struct alg_spec journal_mac_alg;
atomic64_t number_of_mismatches;
+
+ struct notifier_block reboot_notifier;
};
struct dm_integrity_range {
sector_t logical_sector;
- unsigned n_sectors;
+ sector_t n_sectors;
bool waiting;
union {
struct rb_node node;
@@ -288,6 +308,16 @@ struct journal_io {
struct journal_completion *comp;
};
+struct bitmap_block_status {
+ struct work_struct work;
+ struct dm_integrity_c *ic;
+ unsigned idx;
+ unsigned long *bitmap;
+ struct bio_list bio_queue;
+ spinlock_t bio_queue_lock;
+
+};
+
static struct kmem_cache *journal_io_cache;
#define JOURNAL_IO_MEMPOOL 32
@@ -423,7 +453,9 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
static void sb_set_version(struct dm_integrity_c *ic)
{
- if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
+ ic->sb->version = SB_VERSION_3;
+ else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
ic->sb->version = SB_VERSION_2;
else
ic->sb->version = SB_VERSION_1;
@@ -447,6 +479,137 @@ static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
return dm_io(&io_req, 1, &io_loc, NULL);
}
+#define BITMAP_OP_TEST_ALL_SET 0
+#define BITMAP_OP_TEST_ALL_CLEAR 1
+#define BITMAP_OP_SET 2
+#define BITMAP_OP_CLEAR 3
+
+static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
+ sector_t sector, sector_t n_sectors, int mode)
+{
+ unsigned long bit, end_bit, this_end_bit, page, end_page;
+ unsigned long *data;
+
+ if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
+ DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
+ (unsigned long long)sector,
+ (unsigned long long)n_sectors,
+ ic->sb->log2_sectors_per_block,
+ ic->log2_blocks_per_bitmap_bit,
+ mode);
+ BUG();
+ }
+
+ if (unlikely(!n_sectors))
+ return true;
+
+ bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ end_bit = (sector + n_sectors - 1) >>
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+
+ page = bit / (PAGE_SIZE * 8);
+ bit %= PAGE_SIZE * 8;
+
+ end_page = end_bit / (PAGE_SIZE * 8);
+ end_bit %= PAGE_SIZE * 8;
+
+repeat:
+ if (page < end_page) {
+ this_end_bit = PAGE_SIZE * 8 - 1;
+ } else {
+ this_end_bit = end_bit;
+ }
+
+ data = lowmem_page_address(bitmap[page].page);
+
+ if (mode == BITMAP_OP_TEST_ALL_SET) {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ if (data[bit / BITS_PER_LONG] != -1)
+ return false;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ if (!test_bit(bit, data))
+ return false;
+ bit++;
+ }
+ } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ if (data[bit / BITS_PER_LONG] != 0)
+ return false;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ if (test_bit(bit, data))
+ return false;
+ bit++;
+ }
+ } else if (mode == BITMAP_OP_SET) {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ data[bit / BITS_PER_LONG] = -1;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ __set_bit(bit, data);
+ bit++;
+ }
+ } else if (mode == BITMAP_OP_CLEAR) {
+ if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
+ clear_page(data);
+ else while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ data[bit / BITS_PER_LONG] = 0;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ __clear_bit(bit, data);
+ bit++;
+ }
+ } else {
+ BUG();
+ }
+
+ if (unlikely(page < end_page)) {
+ bit = 0;
+ page++;
+ goto repeat;
+ }
+
+ return true;
+}
+
+static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
+{
+ unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+ unsigned i;
+
+ for (i = 0; i < n_bitmap_pages; i++) {
+ unsigned long *dst_data = lowmem_page_address(dst[i].page);
+ unsigned long *src_data = lowmem_page_address(src[i].page);
+ copy_page(dst_data, src_data);
+ }
+}
+
+static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
+{
+ unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
+
+ BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
+ return &ic->bbs[bitmap_block];
+}
+
static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
bool e, const char *function)
{
@@ -455,8 +618,8 @@ static void access_journal_check(struct dm_integrity_c *ic, unsigned section, un
if (unlikely(section >= ic->journal_sections) ||
unlikely(offset >= limit)) {
- printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
- function, section, offset, ic->journal_sections, limit);
+ DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
+ function, section, offset, ic->journal_sections, limit);
BUG();
}
#endif
@@ -756,12 +919,12 @@ static void complete_journal_io(unsigned long error, void *context)
complete_journal_op(comp);
}
-static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
+ unsigned sector, unsigned n_sectors, struct journal_completion *comp)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
- unsigned sector, n_sectors, pl_index, pl_offset;
+ unsigned pl_index, pl_offset;
int r;
if (unlikely(dm_integrity_failed(ic))) {
@@ -770,9 +933,6 @@ static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned
return;
}
- sector = section * ic->journal_section_sectors;
- n_sectors = n_sections * ic->journal_section_sectors;
-
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
@@ -805,6 +965,17 @@ static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned
}
}
+static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
+ unsigned n_sections, struct journal_completion *comp)
+{
+ unsigned sector, n_sectors;
+
+ sector = section * ic->journal_section_sectors;
+ n_sectors = n_sections * ic->journal_section_sectors;
+
+ rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
+}
+
static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
{
struct journal_completion io_comp;
@@ -988,6 +1159,12 @@ static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrit
} while (unlikely(new_range->waiting));
}
+static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
+{
+ if (unlikely(!add_new_range(ic, new_range, true)))
+ wait_and_add_new_range(ic, new_range);
+}
+
static void init_journal_node(struct journal_node *node)
{
RB_CLEAR_NODE(&node->node);
@@ -1204,6 +1381,14 @@ static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
int r = dm_integrity_failed(ic);
if (unlikely(r) && !bio->bi_status)
bio->bi_status = errno_to_blk_status(r);
+ if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
+ unsigned long flags;
+ spin_lock_irqsave(&ic->endio_wait.lock, flags);
+ bio_list_add(&ic->synchronous_bios, bio);
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
+ spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
+ return;
+ }
bio_endio(bio);
}
@@ -1477,7 +1662,8 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
else
wanted_tag_size *= ic->tag_size;
if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
- DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
+ DMERR("Invalid integrity data size %u, expected %u",
+ bip->bip_iter.bi_size, wanted_tag_size);
return DM_MAPIO_KILL;
}
}
@@ -1681,7 +1867,7 @@ retry:
unsigned ws, we, range_sectors;
dio->range.n_sectors = min(dio->range.n_sectors,
- ic->free_sectors << ic->sb->log2_sectors_per_block);
+ (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
if (unlikely(!dio->range.n_sectors)) {
if (from_map)
goto offload_to_thread;
@@ -1764,6 +1950,20 @@ offload_to_thread:
goto journal_read_write;
}
+ if (ic->mode == 'B' && dio->write) {
+ if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
+ struct bitmap_block_status *bbs;
+
+ bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
+ spin_lock(&bbs->bio_queue_lock);
+ bio_list_add(&bbs->bio_queue, bio);
+ spin_unlock(&bbs->bio_queue_lock);
+ queue_work(ic->writer_wq, &bbs->work);
+ return;
+ }
+ }
+
dio->in_flight = (atomic_t)ATOMIC_INIT(2);
if (need_sync_io) {
@@ -1790,10 +1990,15 @@ offload_to_thread:
if (need_sync_io) {
wait_for_completion_io(&read_comp);
- if (unlikely(ic->recalc_wq != NULL) &&
- ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
goto skip_check;
+ if (ic->mode == 'B') {
+ if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
+ goto skip_check;
+ }
+
if (likely(!bio->bi_status))
integrity_metadata(&dio->work);
else
@@ -1831,8 +2036,16 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
wraparound_section(ic, &ic->free_section);
ic->n_uncommitted_sections++;
}
- WARN_ON(ic->journal_sections * ic->journal_section_entries !=
- (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
+ if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
+ (ic->n_uncommitted_sections + ic->n_committed_sections) *
+ ic->journal_section_entries + ic->free_sectors)) {
+ DMCRIT("journal_sections %u, journal_section_entries %u, "
+ "n_uncommitted_sections %u, n_committed_sections %u, "
+ "journal_section_entries %u, free_sectors %u",
+ ic->journal_sections, ic->journal_section_entries,
+ ic->n_uncommitted_sections, ic->n_committed_sections,
+ ic->journal_section_entries, ic->free_sectors);
+ }
}
static void integrity_commit(struct work_struct *w)
@@ -1981,8 +2194,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
spin_lock_irq(&ic->endio_wait.lock);
- if (unlikely(!add_new_range(ic, &io->range, true)))
- wait_and_add_new_range(ic, &io->range);
+ add_new_range_and_wait(ic, &io->range);
if (likely(!from_replay)) {
struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
@@ -2120,11 +2332,14 @@ static void integrity_recalc(struct work_struct *w)
sector_t area, offset;
sector_t metadata_block;
unsigned metadata_offset;
+ sector_t logical_sector, n_sectors;
__u8 *t;
unsigned i;
int r;
unsigned super_counter = 0;
+ DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
+
spin_lock_irq(&ic->endio_wait.lock);
next_chunk:
@@ -2133,21 +2348,49 @@ next_chunk:
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
- if (unlikely(range.logical_sector >= ic->provided_data_sectors))
+ if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
+ if (ic->mode == 'B') {
+ DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
+ }
goto unlock_ret;
+ }
get_area_and_offset(ic, range.logical_sector, &area, &offset);
range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
if (!ic->meta_dev)
- range.n_sectors = min(range.n_sectors, (1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
-
- if (unlikely(!add_new_range(ic, &range, true)))
- wait_and_add_new_range(ic, &range);
+ range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
+ add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
+ logical_sector = range.logical_sector;
+ n_sectors = range.n_sectors;
+
+ if (ic->mode == 'B') {
+ if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
+ goto advance_and_next;
+ }
+ while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
+ ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
+ logical_sector += ic->sectors_per_block;
+ n_sectors -= ic->sectors_per_block;
+ cond_resched();
+ }
+ while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
+ ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
+ n_sectors -= ic->sectors_per_block;
+ cond_resched();
+ }
+ get_area_and_offset(ic, logical_sector, &area, &offset);
+ }
+
+ DEBUG_print("recalculating: %lx, %lx\n", logical_sector, n_sectors);
if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
recalc_write_super(ic);
+ if (ic->mode == 'B') {
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
+ }
super_counter = 0;
}
@@ -2162,7 +2405,7 @@ next_chunk:
io_req.client = ic->io;
io_loc.bdev = ic->dev->bdev;
io_loc.sector = get_data_sector(ic, area, offset);
- io_loc.count = range.n_sectors;
+ io_loc.count = n_sectors;
r = dm_io(&io_req, 1, &io_loc, NULL);
if (unlikely(r)) {
@@ -2171,8 +2414,8 @@ next_chunk:
}
t = ic->recalc_tags;
- for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+ for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
+ integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
t += ic->tag_size;
}
@@ -2184,6 +2427,9 @@ next_chunk:
goto err;
}
+advance_and_next:
+ cond_resched();
+
spin_lock_irq(&ic->endio_wait.lock);
remove_range_unlocked(ic, &range);
ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
@@ -2199,6 +2445,103 @@ unlock_ret:
recalc_write_super(ic);
}
+static void bitmap_block_work(struct work_struct *w)
+{
+ struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
+ struct dm_integrity_c *ic = bbs->ic;
+ struct bio *bio;
+ struct bio_list bio_queue;
+ struct bio_list waiting;
+
+ bio_list_init(&waiting);
+
+ spin_lock(&bbs->bio_queue_lock);
+ bio_queue = bbs->bio_queue;
+ bio_list_init(&bbs->bio_queue);
+ spin_unlock(&bbs->bio_queue_lock);
+
+ while ((bio = bio_list_pop(&bio_queue))) {
+ struct dm_integrity_io *dio;
+
+ dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
+
+ if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
+ remove_range(ic, &dio->range);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+ queue_work(ic->wait_wq, &dio->work);
+ } else {
+ block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_SET);
+ bio_list_add(&waiting, bio);
+ }
+ }
+
+ if (bio_list_empty(&waiting))
+ return;
+
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
+ bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
+ BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
+
+ while ((bio = bio_list_pop(&waiting))) {
+ struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
+
+ block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_SET);
+
+ remove_range(ic, &dio->range);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+ queue_work(ic->wait_wq, &dio->work);
+ }
+
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
+}
+
+static void bitmap_flush_work(struct work_struct *work)
+{
+ struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
+ struct dm_integrity_range range;
+ unsigned long limit;
+ struct bio *bio;
+
+ dm_integrity_flush_buffers(ic);
+
+ range.logical_sector = 0;
+ range.n_sectors = ic->provided_data_sectors;
+
+ spin_lock_irq(&ic->endio_wait.lock);
+ add_new_range_and_wait(ic, &range);
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ dm_integrity_flush_buffers(ic);
+ if (ic->meta_dev)
+ blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
+
+ limit = ic->provided_data_sectors;
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ limit = le64_to_cpu(ic->sb->recalc_sector)
+ >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
+ << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ }
+ /*DEBUG_print("zeroing journal\n");*/
+ block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
+
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+
+ spin_lock_irq(&ic->endio_wait.lock);
+ remove_range_unlocked(ic, &range);
+ while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
+ bio_endio(bio);
+ spin_unlock_irq(&ic->endio_wait.lock);
+ spin_lock_irq(&ic->endio_wait.lock);
+ }
+ spin_unlock_irq(&ic->endio_wait.lock);
+}
+
+
static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
unsigned n_sections, unsigned char commit_seq)
{
@@ -2395,9 +2738,37 @@ clear_journal:
init_journal_node(&ic->journal_tree[i]);
}
+static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
+{
+ DEBUG_print("dm_integrity_enter_synchronous_mode\n");
+
+ if (ic->mode == 'B') {
+ ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
+ ic->synchronous_mode = 1;
+
+ cancel_delayed_work_sync(&ic->bitmap_flush_work);
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
+ flush_workqueue(ic->commit_wq);
+ }
+}
+
+static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
+{
+ struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
+
+ DEBUG_print("dm_integrity_reboot\n");
+
+ dm_integrity_enter_synchronous_mode(ic);
+
+ return NOTIFY_DONE;
+}
+
static void dm_integrity_postsuspend(struct dm_target *ti)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+ int r;
+
+ WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
del_timer_sync(&ic->autocommit_timer);
@@ -2406,6 +2777,9 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
if (ic->recalc_wq)
drain_workqueue(ic->recalc_wq);
+ if (ic->mode == 'B')
+ cancel_delayed_work_sync(&ic->bitmap_flush_work);
+
queue_work(ic->commit_wq, &ic->commit_work);
drain_workqueue(ic->commit_wq);
@@ -2416,6 +2790,18 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
dm_integrity_flush_buffers(ic);
}
+ if (ic->mode == 'B') {
+ dm_integrity_flush_buffers(ic);
+#if 1
+ /* set to 0 to test bitmap replay code */
+ init_journal(ic, 0, ic->journal_sections, 0);
+ ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+#endif
+ }
+
WRITE_ONCE(ic->suspending, 0);
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
@@ -2426,11 +2812,70 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
static void dm_integrity_resume(struct dm_target *ti)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+ int r;
+ DEBUG_print("resume\n");
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
+ DEBUG_print("resume dirty_bitmap\n");
+ rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ if (ic->mode == 'B') {
+ if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
+ block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
+ block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
+ if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
+ BITMAP_OP_TEST_ALL_CLEAR)) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+ } else {
+ DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
+ ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
+ ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+ } else {
+ if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+ init_journal(ic, 0, ic->journal_sections, 0);
+ replay_journal(ic);
+ ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ }
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+ } else {
+ replay_journal(ic);
+ if (ic->mode == 'B') {
+ int mode;
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+
+ mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ }
+ }
- replay_journal(ic);
-
- if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ DEBUG_print("testing recalc: %x\n", ic->sb->flags);
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
+ DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors);
if (recalc_pos < ic->provided_data_sectors) {
queue_work(ic->recalc_wq, &ic->recalc_work);
} else if (recalc_pos > ic->provided_data_sectors) {
@@ -2438,6 +2883,16 @@ static void dm_integrity_resume(struct dm_target *ti)
recalc_write_super(ic);
}
}
+
+ ic->reboot_notifier.notifier_call = dm_integrity_reboot;
+ ic->reboot_notifier.next = NULL;
+ ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
+ WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
+
+#if 0
+ /* set to 1 to stress test synchronous mode */
+ dm_integrity_enter_synchronous_mode(ic);
+#endif
}
static void dm_integrity_status(struct dm_target *ti, status_type_t type,
@@ -2462,10 +2917,14 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
watermark_percentage += ic->journal_entries / 2;
do_div(watermark_percentage, ic->journal_entries);
- arg_count = 5;
+ arg_count = 3;
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
+ arg_count += ic->mode == 'J';
+ arg_count += ic->mode == 'J';
+ arg_count += ic->mode == 'B';
+ arg_count += ic->mode == 'B';
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
@@ -2475,13 +2934,19 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" meta_device:%s", ic->meta_dev->name);
if (ic->sectors_per_block != 1)
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
- if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ if (ic->recalculate_flag)
DMEMIT(" recalculate");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
- DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
- DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ if (ic->mode == 'J') {
+ DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
+ DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ }
+ if (ic->mode == 'B') {
+ DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
+ DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
+ }
#define EMIT_ALG(a, n) \
do { \
@@ -2562,7 +3027,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
return -EINVAL;
} else {
- __u64 meta_size = ic->provided_data_sectors * ic->tag_size;
+ __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
meta_size <<= ic->log2_buffer_sectors;
@@ -2659,37 +3124,37 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
}
-static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
+static void dm_integrity_free_page_list(struct page_list *pl)
{
unsigned i;
if (!pl)
return;
- for (i = 0; i < ic->journal_pages; i++)
- if (pl[i].page)
- __free_page(pl[i].page);
+ for (i = 0; pl[i].page; i++)
+ __free_page(pl[i].page);
kvfree(pl);
}
-static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
+static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
{
- size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
struct page_list *pl;
unsigned i;
- pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
+ pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
if (!pl)
return NULL;
- for (i = 0; i < ic->journal_pages; i++) {
+ for (i = 0; i < n_pages; i++) {
pl[i].page = alloc_page(GFP_KERNEL);
if (!pl[i].page) {
- dm_integrity_free_page_list(ic, pl);
+ dm_integrity_free_page_list(pl);
return NULL;
}
if (i)
pl[i - 1].next = &pl[i];
}
+ pl[i].page = NULL;
+ pl[i].next = NULL;
return pl;
}
@@ -2702,7 +3167,8 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
kvfree(sl);
}
-static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
+static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
+ struct page_list *pl)
{
struct scatterlist **sl;
unsigned i;
@@ -2721,7 +3187,8 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
unsigned idx;
page_list_location(ic, i, 0, &start_index, &start_offset);
- page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
+ page_list_location(ic, i, ic->journal_section_sectors - 1,
+ &end_index, &end_offset);
n_pages = (end_index - start_index + 1);
@@ -2842,7 +3309,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
}
ic->journal_pages = journal_pages;
- ic->journal = dm_integrity_alloc_page_list(ic);
+ ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
if (!ic->journal) {
*error = "Could not allocate memory for journal";
r = -ENOMEM;
@@ -2874,7 +3341,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
DEBUG_print("cipher %s, block size %u iv size %u\n",
ic->journal_crypt_alg.alg_string, blocksize, ivsize);
- ic->journal_io = dm_integrity_alloc_page_list(ic);
+ ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
if (!ic->journal_io) {
*error = "Could not allocate memory for journal io";
r = -ENOMEM;
@@ -2898,7 +3365,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
- ic->journal_xor = dm_integrity_alloc_page_list(ic);
+ ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
if (!ic->journal_xor) {
*error = "Could not allocate memory for journal xor";
r = -ENOMEM;
@@ -2922,7 +3389,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
memset(crypt_iv, 0x00, ivsize);
- skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
+ skcipher_request_set_crypt(req, sg, sg,
+ PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -3063,7 +3531,7 @@ bad:
* device
* offset from the start of the device
* tag size
- * D - direct writes, J - journal writes, R - recovery mode
+ * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
* number of optional arguments
* optional arguments:
* journal_sectors
@@ -3071,10 +3539,14 @@ bad:
* buffer_sectors
* journal_watermark
* commit_time
+ * meta_device
+ * block_size
+ * sectors_per_bit
+ * bitmap_flush_interval
* internal_hash
* journal_crypt
* journal_mac
- * block_size
+ * recalculate
*/
static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
@@ -3087,10 +3559,13 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{0, 9, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
- bool recalculate;
bool should_write_sb;
__u64 threshold;
unsigned long long start;
+ __s8 log2_sectors_per_bitmap_bit = -1;
+ __s8 log2_blocks_per_bitmap_bit;
+ __u64 bits_in_journal;
+ __u64 n_bitmap_bits;
#define DIRECT_ARGUMENTS 4
@@ -3114,6 +3589,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
init_waitqueue_head(&ic->copy_to_journal_wait);
init_completion(&ic->crypto_backoff);
atomic64_set(&ic->number_of_mismatches, 0);
+ ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
if (r) {
@@ -3136,10 +3612,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
}
- if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
+ if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
+ !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
ic->mode = argv[3][0];
- else {
- ti->error = "Invalid mode (expecting J, D, R)";
+ } else {
+ ti->error = "Invalid mode (expecting J, B, D, R)";
r = -EINVAL;
goto bad;
}
@@ -3149,7 +3626,6 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
buffer_sectors = DEFAULT_BUFFER_SECTORS;
journal_watermark = DEFAULT_JOURNAL_WATERMARK;
sync_msec = DEFAULT_SYNC_MSEC;
- recalculate = false;
ic->sectors_per_block = 1;
as.argc = argc - DIRECT_ARGUMENTS;
@@ -3161,6 +3637,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
while (extra_args--) {
const char *opt_string;
unsigned val;
+ unsigned long long llval;
opt_string = dm_shift_arg(&as);
if (!opt_string) {
r = -EINVAL;
@@ -3182,7 +3659,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_put_device(ti, ic->meta_dev);
ic->meta_dev = NULL;
}
- r = dm_get_device(ti, strchr(opt_string, ':') + 1, dm_table_get_mode(ti->table), &ic->meta_dev);
+ r = dm_get_device(ti, strchr(opt_string, ':') + 1,
+ dm_table_get_mode(ti->table), &ic->meta_dev);
if (r) {
ti->error = "Device lookup failed";
goto bad;
@@ -3196,6 +3674,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->sectors_per_block = val >> SECTOR_SHIFT;
+ } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
+ log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
+ } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
+ if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ r = -EINVAL;
+ ti->error = "Invalid bitmap_flush_interval argument";
+ }
+ ic->bitmap_flush_interval = msecs_to_jiffies(val);
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
"Invalid internal_hash argument");
@@ -3212,7 +3698,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
- recalculate = true;
+ ic->recalculate_flag = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -3228,7 +3714,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (!journal_sectors) {
journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
- ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
+ ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
}
if (!buffer_sectors)
@@ -3263,6 +3749,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
else
ic->log2_tag_size = -1;
+ if (ic->mode == 'B' && !ic->internal_hash) {
+ r = -EINVAL;
+ ti->error = "Bitmap mode can be only used with internal hash";
+ goto bad;
+ }
+
ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
ic->autocommit_msec = sync_msec;
timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
@@ -3308,7 +3800,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
INIT_WORK(&ic->commit_work, integrity_commit);
- if (ic->mode == 'J') {
+ if (ic->mode == 'J' || ic->mode == 'B') {
ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
if (!ic->writer_wq) {
ti->error = "Cannot allocate workqueue";
@@ -3349,7 +3841,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
should_write_sb = true;
}
- if (!ic->sb->version || ic->sb->version > SB_VERSION_2) {
+ if (!ic->sb->version || ic->sb->version > SB_VERSION_3) {
r = -EINVAL;
ti->error = "Unknown version";
goto bad;
@@ -3409,6 +3901,27 @@ try_smaller_buffer:
ti->error = "The device is too small";
goto bad;
}
+
+ if (log2_sectors_per_bitmap_bit < 0)
+ log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
+ if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
+ log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
+
+ bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
+ if (bits_in_journal > UINT_MAX)
+ bits_in_journal = UINT_MAX;
+ while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
+ log2_sectors_per_bitmap_bit++;
+
+ log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
+ ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
+ if (should_write_sb) {
+ ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
+ }
+ n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
+ + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
+ ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
+
if (!ic->meta_dev)
ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
@@ -3433,25 +3946,21 @@ try_smaller_buffer:
DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
- DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
+ DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
(unsigned long long)ic->provided_data_sectors);
DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
+ DEBUG_print(" bits_in_journal %llu\n", (unsigned long long)bits_in_journal);
- if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
+ if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
ic->sb->recalc_sector = cpu_to_le64(0);
}
- if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
- if (!ic->internal_hash) {
- r = -EINVAL;
- ti->error = "Recalculate is only valid with internal hash";
- goto bad;
- }
+ if (ic->internal_hash) {
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
if (!ic->recalc_wq ) {
ti->error = "Cannot allocate workqueue";
@@ -3488,6 +3997,45 @@ try_smaller_buffer:
r = create_journal(ic, &ti->error);
if (r)
goto bad;
+
+ }
+
+ if (ic->mode == 'B') {
+ unsigned i;
+ unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+
+ ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ if (!ic->recalc_bitmap) {
+ r = -ENOMEM;
+ goto bad;
+ }
+ ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ if (!ic->may_write_bitmap) {
+ r = -ENOMEM;
+ goto bad;
+ }
+ ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
+ if (!ic->bbs) {
+ r = -ENOMEM;
+ goto bad;
+ }
+ INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
+ for (i = 0; i < ic->n_bitmap_blocks; i++) {
+ struct bitmap_block_status *bbs = &ic->bbs[i];
+ unsigned sector, pl_index, pl_offset;
+
+ INIT_WORK(&bbs->work, bitmap_block_work);
+ bbs->ic = ic;
+ bbs->idx = i;
+ bio_list_init(&bbs->bio_queue);
+ spin_lock_init(&bbs->bio_queue_lock);
+
+ sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
+ pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
+ pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
+
+ bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
+ }
}
if (should_write_sb) {
@@ -3512,6 +4060,17 @@ try_smaller_buffer:
if (r)
goto bad;
}
+ if (ic->mode == 'B') {
+ unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
+ if (!max_io_len)
+ max_io_len = 1U << 31;
+ DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
+ if (!ti->max_io_len || ti->max_io_len > max_io_len) {
+ r = dm_set_target_max_io_len(ti, max_io_len);
+ if (r)
+ goto bad;
+ }
+ }
if (!ic->internal_hash)
dm_integrity_set(ti, ic);
@@ -3520,6 +4079,7 @@ try_smaller_buffer:
ti->flush_supported = true;
return 0;
+
bad:
dm_integrity_dtr(ti);
return r;
@@ -3542,10 +4102,9 @@ static void dm_integrity_dtr(struct dm_target *ti)
destroy_workqueue(ic->writer_wq);
if (ic->recalc_wq)
destroy_workqueue(ic->recalc_wq);
- if (ic->recalc_buffer)
- vfree(ic->recalc_buffer);
- if (ic->recalc_tags)
- kvfree(ic->recalc_tags);
+ vfree(ic->recalc_buffer);
+ kvfree(ic->recalc_tags);
+ kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
mempool_exit(&ic->journal_io_mempool);
@@ -3555,9 +4114,11 @@ static void dm_integrity_dtr(struct dm_target *ti)
dm_put_device(ti, ic->dev);
if (ic->meta_dev)
dm_put_device(ti, ic->meta_dev);
- dm_integrity_free_page_list(ic, ic->journal);
- dm_integrity_free_page_list(ic, ic->journal_io);
- dm_integrity_free_page_list(ic, ic->journal_xor);
+ dm_integrity_free_page_list(ic->journal);
+ dm_integrity_free_page_list(ic->journal_io);
+ dm_integrity_free_page_list(ic->journal_xor);
+ dm_integrity_free_page_list(ic->recalc_bitmap);
+ dm_integrity_free_page_list(ic->may_write_bitmap);
if (ic->journal_scatterlist)
dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
if (ic->journal_io_scatterlist)
@@ -3595,7 +4156,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c740153b4e52..1e03bc89e20f 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -2069,7 +2069,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
/* alloc table */
r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
if (r)
- goto err_destroy_dm;
+ goto err_hash_remove;
/* add targets */
for (i = 0; i < dmi->target_count; i++) {
@@ -2116,6 +2116,10 @@ int __init dm_early_create(struct dm_ioctl *dmi,
err_destroy_table:
dm_table_destroy(t);
+err_hash_remove:
+ (void) __hash_remove(__get_name_cell(dmi->name));
+ /* release reference from __get_name_cell */
+ dm_put(md);
err_destroy_dm:
dm_put(md);
dm_destroy(md);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 2ee5e357a0a7..dbcc1e41cd57 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -544,8 +544,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
return DM_MAPIO_REMAPPED;
}
-static void multipath_release_clone(struct request *clone)
+static void multipath_release_clone(struct request *clone,
+ union map_info *map_context)
{
+ if (unlikely(map_context)) {
+ /*
+ * non-NULL map_context means caller is still map
+ * method; must undo multipath_clone_and_map()
+ */
+ struct dm_mpath_io *mpio = get_mpio(map_context);
+ struct pgpath *pgpath = mpio->pgpath;
+
+ if (pgpath && pgpath->pg->ps.type->end_io)
+ pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
+ &pgpath->path,
+ mpio->nr_bytes);
+ }
+
blk_put_request(clone);
}
@@ -882,6 +897,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
if (attached_handler_name || m->hw_handler_name) {
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
+ kfree(attached_handler_name);
if (r) {
dm_put_device(ti, p->path.dev);
goto bad;
@@ -896,7 +912,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
return p;
bad:
- kfree(attached_handler_name);
free_pgpath(p);
return ERR_PTR(r);
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index b66745bd08bb..5f7063f05ae0 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -168,7 +168,7 @@ static void dm_end_request(struct request *clone, blk_status_t error)
struct request *rq = tio->orig;
blk_rq_unprep_clone(clone);
- tio->ti->type->release_clone_rq(clone);
+ tio->ti->type->release_clone_rq(clone, NULL);
rq_end_stats(md, rq);
blk_mq_end_request(rq, error);
@@ -201,7 +201,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
rq_end_stats(md, rq);
if (tio->clone) {
blk_rq_unprep_clone(tio->clone);
- tio->ti->type->release_clone_rq(tio->clone);
+ tio->ti->type->release_clone_rq(tio->clone, NULL);
}
dm_mq_delay_requeue_request(rq, delay_ms);
@@ -398,7 +398,7 @@ static int map_request(struct dm_rq_target_io *tio)
case DM_MAPIO_REMAPPED:
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
- ti->type->release_clone_rq(clone);
+ ti->type->release_clone_rq(clone, &tio->info);
return DM_MAPIO_REQUEUE;
}
@@ -408,7 +408,7 @@ static int map_request(struct dm_rq_target_io *tio)
ret = dm_dispatch_clone_request(clone, rq);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
blk_rq_unprep_clone(clone);
- tio->ti->type->release_clone_rq(clone);
+ tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
return DM_MAPIO_REQUEUE;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a168963b757d..3107f2b1988b 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/list_bl.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -44,11 +45,11 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
struct dm_exception_table {
uint32_t hash_mask;
unsigned hash_shift;
- struct list_head *table;
+ struct hlist_bl_head *table;
};
struct dm_snapshot {
- struct mutex lock;
+ struct rw_semaphore lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -76,7 +77,9 @@ struct dm_snapshot {
atomic_t pending_exceptions_count;
- /* Protected by "lock" */
+ spinlock_t pe_allocation_lock;
+
+ /* Protected by "pe_allocation_lock" */
sector_t exception_start_sequence;
/* Protected by kcopyd single-threaded callback */
@@ -457,9 +460,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
active = s->active;
- mutex_unlock(&s->lock);
+ up_read(&s->lock);
if (active) {
if (snap_src)
@@ -618,6 +621,36 @@ static void unregister_snapshot(struct dm_snapshot *s)
* The lowest hash_shift bits of the chunk number are ignored, allowing
* some consecutive chunks to be grouped together.
*/
+static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
+
+/* Lock to protect access to the completed and pending exception hash tables. */
+struct dm_exception_table_lock {
+ struct hlist_bl_head *complete_slot;
+ struct hlist_bl_head *pending_slot;
+};
+
+static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
+ struct dm_exception_table_lock *lock)
+{
+ struct dm_exception_table *complete = &s->complete;
+ struct dm_exception_table *pending = &s->pending;
+
+ lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
+ lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
+}
+
+static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_lock(lock->complete_slot);
+ hlist_bl_lock(lock->pending_slot);
+}
+
+static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_unlock(lock->pending_slot);
+ hlist_bl_unlock(lock->complete_slot);
+}
+
static int dm_exception_table_init(struct dm_exception_table *et,
uint32_t size, unsigned hash_shift)
{
@@ -625,12 +658,12 @@ static int dm_exception_table_init(struct dm_exception_table *et,
et->hash_shift = hash_shift;
et->hash_mask = size - 1;
- et->table = dm_vcalloc(size, sizeof(struct list_head));
+ et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head));
if (!et->table)
return -ENOMEM;
for (i = 0; i < size; i++)
- INIT_LIST_HEAD(et->table + i);
+ INIT_HLIST_BL_HEAD(et->table + i);
return 0;
}
@@ -638,15 +671,16 @@ static int dm_exception_table_init(struct dm_exception_table *et,
static void dm_exception_table_exit(struct dm_exception_table *et,
struct kmem_cache *mem)
{
- struct list_head *slot;
- struct dm_exception *ex, *next;
+ struct hlist_bl_head *slot;
+ struct dm_exception *ex;
+ struct hlist_bl_node *pos, *n;
int i, size;
size = et->hash_mask + 1;
for (i = 0; i < size; i++) {
slot = et->table + i;
- list_for_each_entry_safe (ex, next, slot, hash_list)
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
kmem_cache_free(mem, ex);
}
@@ -660,7 +694,7 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
static void dm_remove_exception(struct dm_exception *e)
{
- list_del(&e->hash_list);
+ hlist_bl_del(&e->hash_list);
}
/*
@@ -670,11 +704,12 @@ static void dm_remove_exception(struct dm_exception *e)
static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
chunk_t chunk)
{
- struct list_head *slot;
+ struct hlist_bl_head *slot;
+ struct hlist_bl_node *pos;
struct dm_exception *e;
slot = &et->table[exception_hash(et, chunk)];
- list_for_each_entry (e, slot, hash_list)
+ hlist_bl_for_each_entry(e, pos, slot, hash_list)
if (chunk >= e->old_chunk &&
chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
return e;
@@ -721,7 +756,8 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
static void dm_insert_exception(struct dm_exception_table *eh,
struct dm_exception *new_e)
{
- struct list_head *l;
+ struct hlist_bl_head *l;
+ struct hlist_bl_node *pos;
struct dm_exception *e = NULL;
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
@@ -731,7 +767,7 @@ static void dm_insert_exception(struct dm_exception_table *eh,
goto out;
/* List is ordered by old_chunk */
- list_for_each_entry_reverse(e, l, hash_list) {
+ hlist_bl_for_each_entry(e, pos, l, hash_list) {
/* Insert after an existing chunk? */
if (new_e->old_chunk == (e->old_chunk +
dm_consecutive_chunk_count(e) + 1) &&
@@ -752,12 +788,24 @@ static void dm_insert_exception(struct dm_exception_table *eh,
return;
}
- if (new_e->old_chunk > e->old_chunk)
+ if (new_e->old_chunk < e->old_chunk)
break;
}
out:
- list_add(&new_e->hash_list, e ? &e->hash_list : l);
+ if (!e) {
+ /*
+ * Either the table doesn't support consecutive chunks or slot
+ * l is empty.
+ */
+ hlist_bl_add_head(&new_e->hash_list, l);
+ } else if (new_e->old_chunk < e->old_chunk) {
+ /* Add before an existing exception */
+ hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
+ } else {
+ /* Add to l's tail: e is the last exception in this slot */
+ hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
+ }
}
/*
@@ -766,6 +814,7 @@ out:
*/
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
+ struct dm_exception_table_lock lock;
struct dm_snapshot *s = context;
struct dm_exception *e;
@@ -778,7 +827,17 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
+ /*
+ * Although there is no need to lock access to the exception tables
+ * here, if we don't then hlist_bl_add_head(), called by
+ * dm_insert_exception(), will complain about accessing the
+ * corresponding list without locking it first.
+ */
+ dm_exception_table_lock_init(s, old, &lock);
+
+ dm_exception_table_lock(&lock);
dm_insert_exception(&s->complete, e);
+ dm_exception_table_unlock(&lock);
return 0;
}
@@ -807,7 +866,7 @@ static int calc_max_buckets(void)
{
/* use a fixed size of 2MB */
unsigned long mem = 2 * 1024 * 1024;
- mem /= sizeof(struct list_head);
+ mem /= sizeof(struct hlist_bl_head);
return mem;
}
@@ -927,7 +986,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -942,7 +1001,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
if (b)
flush_bios(b);
@@ -1001,9 +1060,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->merge_failed = 1;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
}
goto shut;
}
@@ -1044,10 +1103,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@@ -1089,10 +1148,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1188,10 +1247,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->snapshot_overflowed = 0;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
+ spin_lock_init(&s->pe_allocation_lock);
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
s->out_of_order_tree = RB_ROOT;
- mutex_init(&s->lock);
+ init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@@ -1357,9 +1417,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- mutex_lock(&snap_dest->lock);
+ down_write(&snap_dest->lock);
snap_dest->valid = 0;
- mutex_unlock(&snap_dest->lock);
+ up_write(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1390,8 +1450,6 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
- mutex_destroy(&s->lock);
-
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
@@ -1467,6 +1525,13 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
dm_table_event(s->ti->table);
}
+static void invalidate_snapshot(struct dm_snapshot *s, int err)
+{
+ down_write(&s->lock);
+ __invalidate_snapshot(s, err);
+ up_write(&s->lock);
+}
+
static void pending_complete(void *context, int success)
{
struct dm_snap_pending_exception *pe = context;
@@ -1475,43 +1540,63 @@ static void pending_complete(void *context, int success)
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
struct bio *full_bio = NULL;
+ struct dm_exception_table_lock lock;
int error = 0;
+ dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
+
if (!success) {
/* Read/write error - snapshot is unusable */
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -EIO);
+ invalidate_snapshot(s, -EIO);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -ENOMEM);
+ invalidate_snapshot(s, -ENOMEM);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
*e = pe->e;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid) {
+ up_read(&s->lock);
free_completed_exception(e);
error = 1;
+
goto out;
}
- /* Check for conflicting reads */
- __check_for_conflicting_io(s, pe->e.old_chunk);
-
/*
- * Add a proper exception, and remove the
- * in-flight exception from the list.
+ * Add a proper exception. After inserting the completed exception all
+ * subsequent snapshot reads to this chunk will be redirected to the
+ * COW device. This ensures that we do not starve. Moreover, as long
+ * as the pending exception exists, neither origin writes nor snapshot
+ * merging can overwrite the chunk in origin.
*/
dm_insert_exception(&s->complete, e);
+ up_read(&s->lock);
+
+ /* Wait for conflicting reads to drain */
+ if (__chunk_is_tracked(s, pe->e.old_chunk)) {
+ dm_exception_table_unlock(&lock);
+ __check_for_conflicting_io(s, pe->e.old_chunk);
+ dm_exception_table_lock(&lock);
+ }
out:
+ /* Remove the in-flight exception from the list */
dm_remove_exception(&pe->e);
+
+ dm_exception_table_unlock(&lock);
+
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = bio_list_get(&pe->origin_bios);
full_bio = pe->full_bio;
@@ -1519,8 +1604,6 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
- mutex_unlock(&s->lock);
-
/* Submit any pending write bios */
if (error) {
if (full_bio)
@@ -1660,43 +1743,59 @@ __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
}
/*
- * Looks to see if this snapshot already has a pending exception
- * for this chunk, otherwise it allocates a new one and inserts
- * it into the pending table.
+ * Inserts a pending exception into the pending table.
*
- * NOTE: a write lock must be held on snap->lock before calling
- * this.
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
*/
static struct dm_snap_pending_exception *
-__find_pending_exception(struct dm_snapshot *s,
- struct dm_snap_pending_exception *pe, chunk_t chunk)
+__insert_pending_exception(struct dm_snapshot *s,
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
{
- struct dm_snap_pending_exception *pe2;
-
- pe2 = __lookup_pending_exception(s, chunk);
- if (pe2) {
- free_pending_exception(pe);
- return pe2;
- }
-
pe->e.old_chunk = chunk;
bio_list_init(&pe->origin_bios);
bio_list_init(&pe->snapshot_bios);
pe->started = 0;
pe->full_bio = NULL;
+ spin_lock(&s->pe_allocation_lock);
if (s->store->type->prepare_exception(s->store, &pe->e)) {
+ spin_unlock(&s->pe_allocation_lock);
free_pending_exception(pe);
return NULL;
}
pe->exception_sequence = s->exception_start_sequence++;
+ spin_unlock(&s->pe_allocation_lock);
dm_insert_exception(&s->pending, &pe->e);
return pe;
}
+/*
+ * Looks to see if this snapshot already has a pending exception
+ * for this chunk, otherwise it allocates a new one and inserts
+ * it into the pending table.
+ *
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
+ */
+static struct dm_snap_pending_exception *
+__find_pending_exception(struct dm_snapshot *s,
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
+{
+ struct dm_snap_pending_exception *pe2;
+
+ pe2 = __lookup_pending_exception(s, chunk);
+ if (pe2) {
+ free_pending_exception(pe);
+ return pe2;
+ }
+
+ return __insert_pending_exception(s, pe, chunk);
+}
+
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
@@ -1714,6 +1813,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ struct dm_exception_table_lock lock;
init_tracked_chunk(bio);
@@ -1723,13 +1823,15 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
+ dm_exception_table_lock_init(s, chunk, &lock);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
if (!s->valid)
return DM_MAPIO_KILL;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1752,15 +1854,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
pe = alloc_pending_exception(s);
- mutex_lock(&s->lock);
-
- if (!s->valid || s->snapshot_overflowed) {
- free_pending_exception(pe);
- r = DM_MAPIO_KILL;
- goto out_unlock;
- }
+ dm_exception_table_lock(&lock);
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
@@ -1771,13 +1867,22 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = __find_pending_exception(s, pe, chunk);
if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
+ down_write(&s->lock);
+
if (s->store->userspace_supports_overflow) {
- s->snapshot_overflowed = 1;
- DMERR("Snapshot overflowed: Unable to allocate exception.");
+ if (s->valid && !s->snapshot_overflowed) {
+ s->snapshot_overflowed = 1;
+ DMERR("Snapshot overflowed: Unable to allocate exception.");
+ }
} else
__invalidate_snapshot(s, -ENOMEM);
+ up_write(&s->lock);
+
r = DM_MAPIO_KILL;
- goto out_unlock;
+ goto out;
}
}
@@ -1789,7 +1894,10 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_full_bio(pe, bio);
goto out;
}
@@ -1797,9 +1905,12 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio_list_add(&pe->snapshot_bios, bio);
if (!pe->started) {
- /* this is protected by snap->lock */
+ /* this is protected by the exception table lock */
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_copy(pe);
goto out;
}
@@ -1809,7 +1920,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
out:
return r;
}
@@ -1845,7 +1957,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1876,12 +1988,12 @@ redirect_to_origin:
bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
return do_origin(s->origin, bio);
}
out_unlock:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
return r;
}
@@ -1913,7 +2025,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
+ down_read(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1923,7 +2035,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
- mutex_unlock(&snap_src->lock);
+ up_read(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1969,11 +2081,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
- mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ down_write(&snap_src->lock);
+ down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- mutex_unlock(&snap_dest->lock);
- mutex_unlock(&snap_src->lock);
+ up_write(&snap_dest->lock);
+ up_write(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1988,9 +2100,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->active = 1;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -2030,7 +2142,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- mutex_lock(&snap->lock);
+ down_write(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2055,7 +2167,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
- mutex_unlock(&snap->lock);
+ up_write(&snap->lock);
break;
@@ -2107,9 +2219,10 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
int r = DM_MAPIO_REMAPPED;
struct dm_snapshot *snap;
struct dm_exception *e;
- struct dm_snap_pending_exception *pe;
+ struct dm_snap_pending_exception *pe, *pe2;
struct dm_snap_pending_exception *pe_to_start_now = NULL;
struct dm_snap_pending_exception *pe_to_start_last = NULL;
+ struct dm_exception_table_lock lock;
chunk_t chunk;
/* Do all the snapshots on this origin */
@@ -2121,52 +2234,59 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- mutex_lock(&snap->lock);
-
- /* Only deal with valid and active snapshots */
- if (!snap->valid || !snap->active)
- goto next_snapshot;
-
/* Nothing to do if writing beyond end of snapshot */
if (sector >= dm_table_get_size(snap->ti->table))
- goto next_snapshot;
+ continue;
/*
* Remember, different snapshots can have
* different chunk sizes.
*/
chunk = sector_to_chunk(snap->store, sector);
+ dm_exception_table_lock_init(snap, chunk, &lock);
- /*
- * Check exception table to see if block
- * is already remapped in this snapshot
- * and trigger an exception if not.
- */
- e = dm_lookup_exception(&snap->complete, chunk);
- if (e)
+ down_read(&snap->lock);
+ dm_exception_table_lock(&lock);
+
+ /* Only deal with valid and active snapshots */
+ if (!snap->valid || !snap->active)
goto next_snapshot;
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- mutex_unlock(&snap->lock);
- pe = alloc_pending_exception(snap);
- mutex_lock(&snap->lock);
-
- if (!snap->valid) {
- free_pending_exception(pe);
- goto next_snapshot;
- }
-
+ /*
+ * Check exception table to see if block is already
+ * remapped in this snapshot and trigger an exception
+ * if not.
+ */
e = dm_lookup_exception(&snap->complete, chunk);
- if (e) {
- free_pending_exception(pe);
+ if (e)
goto next_snapshot;
- }
- pe = __find_pending_exception(snap, pe, chunk);
- if (!pe) {
- __invalidate_snapshot(snap, -ENOMEM);
- goto next_snapshot;
+ dm_exception_table_unlock(&lock);
+ pe = alloc_pending_exception(snap);
+ dm_exception_table_lock(&lock);
+
+ pe2 = __lookup_pending_exception(snap, chunk);
+
+ if (!pe2) {
+ e = dm_lookup_exception(&snap->complete, chunk);
+ if (e) {
+ free_pending_exception(pe);
+ goto next_snapshot;
+ }
+
+ pe = __insert_pending_exception(snap, pe, chunk);
+ if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
+
+ invalidate_snapshot(snap, -ENOMEM);
+ continue;
+ }
+ } else {
+ free_pending_exception(pe);
+ pe = pe2;
}
}
@@ -2193,7 +2313,8 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
- mutex_unlock(&snap->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 314d17ca6466..64dd0b34fcf4 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -136,7 +136,8 @@ static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
return DM_MAPIO_KILL;
}
-static void io_err_release_clone_rq(struct request *clone)
+static void io_err_release_clone_rq(struct request *clone,
+ union map_info *map_context)
{
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index ed3caceaed07..7f0840601737 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -202,6 +202,13 @@ struct dm_pool_metadata {
bool fail_io:1;
/*
+ * Set once a thin-pool has been accessed through one of the interfaces
+ * that imply the pool is in-service (e.g. thin devices created/deleted,
+ * thin-pool message, metadata snapshots, etc).
+ */
+ bool in_service:1;
+
+ /*
* Reading the space map roots can fail, so we read it into these
* buffers before the superblock is locked and updated.
*/
@@ -367,6 +374,32 @@ static int subtree_equal(void *context, const void *value1_le, const void *value
/*----------------------------------------------------------------*/
+/*
+ * Variant that is used for in-core only changes or code that
+ * shouldn't put the pool in service on its own (e.g. commit).
+ */
+static inline void __pmd_write_lock(struct dm_pool_metadata *pmd)
+ __acquires(pmd->root_lock)
+{
+ down_write(&pmd->root_lock);
+}
+#define pmd_write_lock_in_core(pmd) __pmd_write_lock((pmd))
+
+static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
+{
+ __pmd_write_lock(pmd);
+ if (unlikely(!pmd->in_service))
+ pmd->in_service = true;
+}
+
+static inline void pmd_write_unlock(struct dm_pool_metadata *pmd)
+ __releases(pmd->root_lock)
+{
+ up_write(&pmd->root_lock);
+}
+
+/*----------------------------------------------------------------*/
+
static int superblock_lock_zero(struct dm_pool_metadata *pmd,
struct dm_block **sblock)
{
@@ -790,6 +823,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
*/
BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
+ if (unlikely(!pmd->in_service))
+ return 0;
+
r = __write_changed_details(pmd);
if (r < 0)
return r;
@@ -853,6 +889,7 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
pmd->time = 0;
INIT_LIST_HEAD(&pmd->thin_devices);
pmd->fail_io = false;
+ pmd->in_service = false;
pmd->bdev = bdev;
pmd->data_block_size = data_block_size;
@@ -903,7 +940,6 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
DMWARN("%s: __commit_transaction() failed, error = %d",
__func__, r);
}
-
if (!pmd->fail_io)
__destroy_persistent_data_objects(pmd);
@@ -1032,10 +1068,10 @@ int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_thin(pmd, dev);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1123,10 +1159,10 @@ int dm_pool_create_snap(struct dm_pool_metadata *pmd,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_snap(pmd, dev, origin);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1166,10 +1202,10 @@ int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __delete_device(pmd, dev);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1180,7 +1216,7 @@ int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
@@ -1194,7 +1230,7 @@ int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
r = 0;
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1225,7 +1261,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
* We commit to ensure the btree roots which we increment in a
* moment are up to date.
*/
- __commit_transaction(pmd);
+ r = __commit_transaction(pmd);
+ if (r < 0) {
+ DMWARN("%s: __commit_transaction() failed, error = %d",
+ __func__, r);
+ return r;
+ }
/*
* Copy the superblock.
@@ -1283,10 +1324,10 @@ int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __reserve_metadata_snap(pmd);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1331,10 +1372,10 @@ int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __release_metadata_snap(pmd);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1377,19 +1418,19 @@ int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
if (!pmd->fail_io)
r = __open_device(pmd, dev, 0, td);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
int dm_pool_close_thin_device(struct dm_thin_device *td)
{
- down_write(&td->pmd->root_lock);
+ pmd_write_lock_in_core(td->pmd);
__close_device(td);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return 0;
}
@@ -1570,10 +1611,10 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
{
int r = -EINVAL;
- down_write(&td->pmd->root_lock);
+ pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __insert(td, block, data_block);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return r;
}
@@ -1657,10 +1698,10 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
{
int r = -EINVAL;
- down_write(&td->pmd->root_lock);
+ pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove(td, block);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return r;
}
@@ -1670,10 +1711,10 @@ int dm_thin_remove_range(struct dm_thin_device *td,
{
int r = -EINVAL;
- down_write(&td->pmd->root_lock);
+ pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove_range(td, begin, end);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return r;
}
@@ -1696,13 +1737,13 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
{
int r = 0;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
for (; b != e; b++) {
r = dm_sm_inc_block(pmd->data_sm, b);
if (r)
break;
}
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1711,13 +1752,13 @@ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
{
int r = 0;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
for (; b != e; b++) {
r = dm_sm_dec_block(pmd->data_sm, b);
if (r)
break;
}
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1765,10 +1806,10 @@ int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = dm_sm_new_block(pmd->data_sm, result);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1777,12 +1818,16 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ /*
+ * Care is taken to not have commit be what
+ * triggers putting the thin-pool in-service.
+ */
+ __pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
r = __commit_transaction(pmd);
- if (r <= 0)
+ if (r < 0)
goto out;
/*
@@ -1790,7 +1835,7 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
*/
r = __begin_transaction(pmd);
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1806,7 +1851,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
@@ -1817,7 +1862,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
pmd->fail_io = true;
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1948,10 +1993,10 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __resize_space_map(pmd->data_sm, new_count);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1960,29 +2005,29 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
if (!r)
__set_metadata_reserve(pmd);
}
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
{
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
dm_bm_set_read_only(pmd->bm);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
}
void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
{
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
dm_bm_set_read_write(pmd->bm);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
}
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
@@ -1992,9 +2037,9 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
{
int r;
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -2005,7 +2050,7 @@ int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
r = superblock_lock(pmd, &sblock);
@@ -2019,7 +2064,7 @@ int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
dm_bm_unlock(sblock);
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index f7822875589e..1cb137f0ef9d 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -190,7 +190,6 @@ struct writeback_struct {
struct dm_writecache *wc;
struct wc_entry **wc_list;
unsigned wc_list_n;
- unsigned page_offset;
struct page *page;
struct wc_entry *wc_list_inline[WB_LIST_INLINE];
struct bio bio;
@@ -546,21 +545,20 @@ static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
e = container_of(node, struct wc_entry, rb_node);
if (read_original_sector(wc, e) == block)
break;
+
node = (read_original_sector(wc, e) >= block ?
e->rb_node.rb_left : e->rb_node.rb_right);
if (unlikely(!node)) {
- if (!(flags & WFE_RETURN_FOLLOWING)) {
+ if (!(flags & WFE_RETURN_FOLLOWING))
return NULL;
- }
if (read_original_sector(wc, e) >= block) {
- break;
+ return e;
} else {
node = rb_next(&e->rb_node);
- if (unlikely(!node)) {
+ if (unlikely(!node))
return NULL;
- }
e = container_of(node, struct wc_entry, rb_node);
- break;
+ return e;
}
}
}
@@ -571,7 +569,7 @@ static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
node = rb_prev(&e->rb_node);
else
node = rb_next(&e->rb_node);
- if (!node)
+ if (unlikely(!node))
return e;
e2 = container_of(node, struct wc_entry, rb_node);
if (read_original_sector(wc, e2) != block)
@@ -804,7 +802,7 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
writecache_free_entry(wc, e);
}
- if (!node)
+ if (unlikely(!node))
break;
e = container_of(node, struct wc_entry, rb_node);
@@ -1478,10 +1476,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
wb = container_of(bio, struct writeback_struct, bio);
wb->wc = wc;
- wb->bio.bi_end_io = writecache_writeback_endio;
- bio_set_dev(&wb->bio, wc->dev->bdev);
- wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
- wb->page_offset = PAGE_SIZE;
+ bio->bi_end_io = writecache_writeback_endio;
+ bio_set_dev(bio, wc->dev->bdev);
+ bio->bi_iter.bi_sector = read_original_sector(wc, e);
if (max_pages <= WB_LIST_INLINE ||
unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
GFP_NOIO | __GFP_NORETRY |
@@ -1507,12 +1504,12 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->wc_list[wb->wc_list_n++] = f;
e = f;
}
- bio_set_op_attrs(&wb->bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
if (writecache_has_error(wc)) {
bio->bi_status = BLK_STS_IOERR;
- bio_endio(&wb->bio);
+ bio_endio(bio);
} else {
- submit_bio(&wb->bio);
+ submit_bio(bio);
}
__writeback_throttle(wc, wbl);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index fa68336560c3..d8334cd45d7c 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1169,6 +1169,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
goto out;
}
+ if (!nr_blkz)
+ break;
+
/* Process report */
for (i = 0; i < nr_blkz; i++) {
ret = dmz_init_zone(zmd, zone, &blkz[i]);
@@ -1204,6 +1207,8 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Get zone information from disk */
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
&blkz, &nr_blkz, GFP_NOIO);
+ if (!nr_blkz)
+ ret = -EIO;
if (ret) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 8865c1709e16..51d029bbb740 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -643,7 +643,8 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
q = bdev_get_queue(dev->bdev);
dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
+ aligned_capacity = dev->capacity &
+ ~((sector_t)blk_queue_zone_sectors(q) - 1);
if (ti->begin ||
((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
ti->error = "Partial mapping not supported";
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 043f0761e4a0..1fb1333fefec 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -781,7 +781,8 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
}
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
- fmode_t mode) {
+ fmode_t mode)
+{
struct table_device *td;
list_for_each_entry(td, l, list)
@@ -792,7 +793,8 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
}
int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
- struct dm_dev **result) {
+ struct dm_dev **result)
+{
int r;
struct table_device *td;
@@ -1906,7 +1908,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
static struct mapped_device *alloc_dev(int minor)
{
int r, numa_node_id = dm_get_numa_node();
- struct dax_device *dax_dev = NULL;
struct mapped_device *md;
void *old_md;
@@ -1969,11 +1970,10 @@ static struct mapped_device *alloc_dev(int minor)
sprintf(md->disk->disk_name, "dm-%d", minor);
if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
- dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
- if (!dax_dev)
+ md->dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
+ if (!md->dax_dev)
goto bad;
}
- md->dax_dev = dax_dev;
add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 0a3b8ae4a29c..b8a62188f6be 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -190,6 +190,8 @@ static int sm_find_free(void *addr, unsigned begin, unsigned end,
static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
{
+ memset(ll, 0, sizeof(struct ll_disk));
+
ll->tm = tm;
ll->bitmap_info.tm = tm;
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index d730693f299c..8f7f8efc71a7 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -37,6 +37,25 @@
#define ISC_PFG_CFG0_BPS_TWELVE (0x0 << 28)
#define ISC_PFE_CFG0_BPS_MASK GENMASK(30, 28)
+#define ISC_PFE_CFG0_COLEN BIT(12)
+#define ISC_PFE_CFG0_ROWEN BIT(13)
+
+/* ISC Parallel Front End Configuration 1 Register */
+#define ISC_PFE_CFG1 0x00000010
+
+#define ISC_PFE_CFG1_COLMIN(v) ((v))
+#define ISC_PFE_CFG1_COLMIN_MASK GENMASK(15, 0)
+#define ISC_PFE_CFG1_COLMAX(v) ((v) << 16)
+#define ISC_PFE_CFG1_COLMAX_MASK GENMASK(31, 16)
+
+/* ISC Parallel Front End Configuration 2 Register */
+#define ISC_PFE_CFG2 0x00000014
+
+#define ISC_PFE_CFG2_ROWMIN(v) ((v))
+#define ISC_PFE_CFG2_ROWMIN_MASK GENMASK(15, 0)
+#define ISC_PFE_CFG2_ROWMAX(v) ((v) << 16)
+#define ISC_PFE_CFG2_ROWMAX_MASK GENMASK(31, 16)
+
/* ISC Clock Enable Register */
#define ISC_CLKEN 0x00000018
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index 4bba9da206e4..94cb309fdb52 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -721,6 +721,40 @@ static void isc_start_dma(struct isc_device *isc)
u32 sizeimage = isc->fmt.fmt.pix.sizeimage;
u32 dctrl_dview;
dma_addr_t addr0;
+ u32 h, w;
+
+ h = isc->fmt.fmt.pix.height;
+ w = isc->fmt.fmt.pix.width;
+
+ /*
+ * In case the sensor is not RAW, it will output a pixel (12-16 bits)
+ * with two samples on the ISC Data bus (which is 8-12)
+ * ISC will count each sample, so, we need to multiply these values
+ * by two, to get the real number of samples for the required pixels.
+ */
+ if (!ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) {
+ h <<= 1;
+ w <<= 1;
+ }
+
+ /*
+ * We limit the column/row count that the ISC will output according
+ * to the configured resolution that we want.
+ * This will avoid the situation where the sensor is misconfigured,
+ * sending more data, and the ISC will just take it and DMA to memory,
+ * causing corruption.
+ */
+ regmap_write(regmap, ISC_PFE_CFG1,
+ (ISC_PFE_CFG1_COLMIN(0) & ISC_PFE_CFG1_COLMIN_MASK) |
+ (ISC_PFE_CFG1_COLMAX(w - 1) & ISC_PFE_CFG1_COLMAX_MASK));
+
+ regmap_write(regmap, ISC_PFE_CFG2,
+ (ISC_PFE_CFG2_ROWMIN(0) & ISC_PFE_CFG2_ROWMIN_MASK) |
+ (ISC_PFE_CFG2_ROWMAX(h - 1) & ISC_PFE_CFG2_ROWMAX_MASK));
+
+ regmap_update_bits(regmap, ISC_PFE_CFG0,
+ ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN,
+ ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN);
addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
regmap_write(regmap, ISC_DAD0, addr0);
@@ -1965,6 +1999,8 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
struct vb2_queue *q = &isc->vb2_vidq;
int ret;
+ INIT_WORK(&isc->awb_work, isc_awb_work);
+
ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev, "Failed to register subdev nodes\n");
@@ -2018,8 +2054,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
return ret;
}
- INIT_WORK(&isc->awb_work, isc_awb_work);
-
/* Register video device */
strscpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name));
vdev->release = video_device_release_empty;
@@ -2135,8 +2169,11 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
break;
}
- subdev_entity->asd = devm_kzalloc(dev,
- sizeof(*subdev_entity->asd), GFP_KERNEL);
+ /* asd will be freed by the subsystem once it's added to the
+ * notifier list
+ */
+ subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd),
+ GFP_KERNEL);
if (!subdev_entity->asd) {
of_node_put(rem);
ret = -ENOMEM;
@@ -2284,6 +2321,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
subdev_entity->asd);
if (ret) {
fwnode_handle_put(subdev_entity->asd->match.fwnode);
+ kfree(subdev_entity->asd);
goto cleanup_subdev;
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 3ce58dee4422..1d96cca61547 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1515,10 +1515,20 @@ static int coda_queue_setup(struct vb2_queue *vq,
static int coda_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct coda_q_data *q_data;
q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "%s field isn't supported\n", __func__);
+ return -EINVAL;
+ }
+ }
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
v4l2_warn(&ctx->dev->v4l2_dev,
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 8339163a5231..4e24f5d781f4 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -104,7 +104,7 @@ static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
struct v4l2_output *output)
{
struct vpbe_config *cfg = vpbe_dev->cfg;
- int temp_index = output->index;
+ unsigned int temp_index = output->index;
if (temp_index >= cfg->num_outputs)
return -EINVAL;
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 37f0d7146dfa..cb6a9e3946b6 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -1527,23 +1527,20 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
unsigned long size;
struct videobuf_buffer *vb;
- vb = q->bufs[b->index];
-
if (!vout->streaming)
return -EINVAL;
- if (file->f_flags & O_NONBLOCK)
- /* Call videobuf_dqbuf for non blocking mode */
- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
- else
- /* Call videobuf_dqbuf for blocking mode */
- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+ ret = videobuf_dqbuf(q, b, !!(file->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
addr = (unsigned long) vout->buf_phy_addr[vb->i];
size = (unsigned long) vb->size;
dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
size, DMA_TO_DEVICE);
- return ret;
+ return 0;
}
static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index 799e526fd3df..8f097e514900 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -68,6 +68,7 @@ struct rcar_csi2;
/* Field Detection Control */
#define FLD_REG 0x1c
#define FLD_FLD_NUM(n) (((n) & 0xff) << 16)
+#define FLD_DET_SEL(n) (((n) & 0x3) << 4)
#define FLD_FLD_EN4 BIT(3)
#define FLD_FLD_EN3 BIT(2)
#define FLD_FLD_EN2 BIT(1)
@@ -84,6 +85,9 @@ struct rcar_csi2;
/* Interrupt Enable */
#define INTEN_REG 0x30
+#define INTEN_INT_AFIFO_OF BIT(27)
+#define INTEN_INT_ERRSOTHS BIT(4)
+#define INTEN_INT_ERRSOTSYNCHS BIT(3)
/* Interrupt Source Mask */
#define INTCLOSE_REG 0x34
@@ -475,7 +479,7 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp)
static int rcsi2_start_receiver(struct rcar_csi2 *priv)
{
const struct rcar_csi2_format *format;
- u32 phycnt, vcdt = 0, vcdt2 = 0;
+ u32 phycnt, vcdt = 0, vcdt2 = 0, fld = 0;
unsigned int i;
int mbps, ret;
@@ -507,6 +511,16 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
vcdt2 |= vcdt_part << ((i % 2) * 16);
}
+ if (priv->mf.field == V4L2_FIELD_ALTERNATE) {
+ fld = FLD_DET_SEL(1) | FLD_FLD_EN4 | FLD_FLD_EN3 | FLD_FLD_EN2
+ | FLD_FLD_EN;
+
+ if (priv->mf.height == 240)
+ fld |= FLD_FLD_NUM(0);
+ else
+ fld |= FLD_FLD_NUM(1);
+ }
+
phycnt = PHYCNT_ENABLECLK;
phycnt |= (1 << priv->lanes) - 1;
@@ -514,6 +528,10 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
if (mbps < 0)
return mbps;
+ /* Enable interrupts. */
+ rcsi2_write(priv, INTEN_REG, INTEN_INT_AFIFO_OF | INTEN_INT_ERRSOTHS
+ | INTEN_INT_ERRSOTSYNCHS);
+
/* Init */
rcsi2_write(priv, TREF_REG, TREF_TREF);
rcsi2_write(priv, PHTC_REG, 0);
@@ -549,8 +567,7 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, PHYCNT_REG, phycnt);
rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN |
LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP);
- rcsi2_write(priv, FLD_REG, FLD_FLD_NUM(2) | FLD_FLD_EN4 |
- FLD_FLD_EN3 | FLD_FLD_EN2 | FLD_FLD_EN);
+ rcsi2_write(priv, FLD_REG, fld);
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ);
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ | PHYCNT_RSTZ);
@@ -675,6 +692,43 @@ static const struct v4l2_subdev_ops rcar_csi2_subdev_ops = {
.pad = &rcar_csi2_pad_ops,
};
+static irqreturn_t rcsi2_irq(int irq, void *data)
+{
+ struct rcar_csi2 *priv = data;
+ u32 status, err_status;
+
+ status = rcsi2_read(priv, INTSTATE_REG);
+ err_status = rcsi2_read(priv, INTERRSTATE_REG);
+
+ if (!status)
+ return IRQ_HANDLED;
+
+ rcsi2_write(priv, INTSTATE_REG, status);
+
+ if (!err_status)
+ return IRQ_HANDLED;
+
+ rcsi2_write(priv, INTERRSTATE_REG, err_status);
+
+ dev_info(priv->dev, "Transfer error, restarting CSI-2 receiver\n");
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t rcsi2_irq_thread(int irq, void *data)
+{
+ struct rcar_csi2 *priv = data;
+
+ mutex_lock(&priv->lock);
+ rcsi2_stop(priv);
+ usleep_range(1000, 2000);
+ if (rcsi2_start(priv))
+ dev_warn(priv->dev, "Failed to restart CSI-2 receiver\n");
+ mutex_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
/* -----------------------------------------------------------------------------
* Async handling and registration of subdevices and links.
*/
@@ -947,7 +1001,7 @@ static int rcsi2_probe_resources(struct rcar_csi2 *priv,
struct platform_device *pdev)
{
struct resource *res;
- int irq;
+ int irq, ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
@@ -958,6 +1012,12 @@ static int rcsi2_probe_resources(struct rcar_csi2 *priv,
if (irq < 0)
return irq;
+ ret = devm_request_threaded_irq(&pdev->dev, irq, rcsi2_irq,
+ rcsi2_irq_thread, IRQF_SHARED,
+ KBUILD_MODNAME, priv);
+ if (ret)
+ return ret;
+
priv->rstc = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(priv->rstc))
return PTR_ERR(priv->rstc);
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index 7fb3a4fa07c1..447bdfbe5afe 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -334,8 +334,8 @@ static int tegra_cec_probe(struct platform_device *pdev)
hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
- if (!hdmi_dev)
- return -ENODEV;
+ if (IS_ERR(hdmi_dev))
+ return PTR_ERR(hdmi_dev);
cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index eedb7d48e2ea..772716ab6b23 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -20,6 +20,7 @@
#include <linux/clkdev.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 7e425ff53491..fc6aa4c50144 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -18,6 +18,7 @@
#include <linux/clk-provider.h>
#include <linux/debugfs.h>
#include <linux/idr.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index ec980bda071c..b61de360f26f 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index b6b3ff0fe17f..7ccb950aa7d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -22,7 +22,6 @@ config MLXSW_CORE_HWMON
config MLXSW_CORE_THERMAL
bool "Thermal zone support for Mellanox Technologies Switch ASICs"
depends on MLXSW_CORE && THERMAL
- depends on !(MLXSW_CORE=y && THERMAL=m)
default y
---help---
Say Y here if you want to automatically control fans speed according
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index b2ff903a9cb6..b188fce3f641 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -53,6 +53,7 @@
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/ieee802154.h>
+#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a6644a2c3ef7..7da80f375315 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1257,10 +1257,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return 0;
}
+ effects |= nvme_known_admin_effects(opcode);
if (ctrl->effects)
effects = le32_to_cpu(ctrl->effects->acs[opcode]);
- else
- effects = nvme_known_admin_effects(opcode);
/*
* For simplicity, IO to all namespaces is quiesced even if the command
@@ -2342,20 +2341,35 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
-static int nvme_active_ctrls(struct nvme_subsystem *subsys)
+static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
+ struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
- int count = 0;
- struct nvme_ctrl *ctrl;
+ struct nvme_ctrl *tmp;
+
+ lockdep_assert_held(&nvme_subsystems_lock);
+
+ list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
+ if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DEAD)
+ continue;
+
+ if (tmp->cntlid == ctrl->cntlid) {
+ dev_err(ctrl->device,
+ "Duplicate cntlid %u with %s, rejecting\n",
+ ctrl->cntlid, dev_name(tmp->device));
+ return false;
+ }
- mutex_lock(&subsys->lock);
- list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
- if (ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DEAD)
- count++;
+ if ((id->cmic & (1 << 1)) ||
+ (ctrl->opts && ctrl->opts->discovery_nqn))
+ continue;
+
+ dev_err(ctrl->device,
+ "Subsystem does not support multiple controllers\n");
+ return false;
}
- mutex_unlock(&subsys->lock);
- return count;
+ return true;
}
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
@@ -2395,22 +2409,13 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
mutex_lock(&nvme_subsystems_lock);
found = __nvme_find_get_subsystem(subsys->subnqn);
if (found) {
- /*
- * Verify that the subsystem actually supports multiple
- * controllers, else bail out.
- */
- if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
- nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
- dev_err(ctrl->device,
- "ignoring ctrl due to duplicate subnqn (%s).\n",
- found->subnqn);
- nvme_put_subsystem(found);
- ret = -EINVAL;
- goto out_unlock;
- }
-
__nvme_release_subsystem(subsys);
subsys = found;
+
+ if (!nvme_validate_cntlid(subsys, ctrl, id)) {
+ ret = -EINVAL;
+ goto out_put_subsystem;
+ }
} else {
ret = device_add(&subsys->dev);
if (ret) {
@@ -2422,23 +2427,20 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
list_add_tail(&subsys->entry, &nvme_subsystems);
}
- ctrl->subsys = subsys;
- mutex_unlock(&nvme_subsystems_lock);
-
if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
dev_name(ctrl->device))) {
dev_err(ctrl->device,
"failed to create sysfs link from subsystem.\n");
- /* the transport driver will eventually put the subsystem */
- return -EINVAL;
+ goto out_put_subsystem;
}
- mutex_lock(&subsys->lock);
+ ctrl->subsys = subsys;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
- mutex_unlock(&subsys->lock);
-
+ mutex_unlock(&nvme_subsystems_lock);
return 0;
+out_put_subsystem:
+ nvme_put_subsystem(subsys);
out_unlock:
mutex_unlock(&nvme_subsystems_lock);
put_device(&subsys->dev);
@@ -3605,19 +3607,18 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
u32 aer_notice_type = (result & 0xff00) >> 8;
+ trace_nvme_async_event(ctrl, aer_notice_type);
+
switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED:
- trace_nvme_async_event(ctrl, aer_notice_type);
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
- trace_nvme_async_event(ctrl, aer_notice_type);
queue_work(nvme_wq, &ctrl->fw_act_work);
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
- trace_nvme_async_event(ctrl, aer_notice_type);
if (!ctrl->ana_log_buf)
break;
queue_work(nvme_wq, &ctrl->ana_work);
@@ -3696,10 +3697,10 @@ static void nvme_free_ctrl(struct device *dev)
__free_page(ctrl->discard_page);
if (subsys) {
- mutex_lock(&subsys->lock);
+ mutex_lock(&nvme_subsystems_lock);
list_del(&ctrl->subsys_entry);
- mutex_unlock(&subsys->lock);
sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
+ mutex_unlock(&nvme_subsystems_lock);
}
ctrl->ops->free_ctrl(ctrl);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 592d1e61ef7e..5838f7cd53ac 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -978,7 +978,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_DISABLE_SQFLOW)
static struct nvme_ctrl *
-nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
+nvmf_create_ctrl(struct device *dev, const char *buf)
{
struct nvmf_ctrl_options *opts;
struct nvmf_transport_ops *ops;
@@ -1073,7 +1073,7 @@ static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
goto out_unlock;
}
- ctrl = nvmf_create_ctrl(nvmf_device, buf, count);
+ ctrl = nvmf_create_ctrl(nvmf_device, buf);
if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl);
goto out_unlock;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9544eb60f725..dd8169bbf0d2 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -202,7 +202,7 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
-
+static struct workqueue_struct *nvme_fc_wq;
/*
* These items are short-term. They will eventually be moved into
@@ -2054,7 +2054,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
*/
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
active = atomic_xchg(&ctrl->err_work_active, 1);
- if (!active && !schedule_work(&ctrl->err_work)) {
+ if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
atomic_set(&ctrl->err_work_active, 0);
WARN_ON(1);
}
@@ -3399,6 +3399,10 @@ static int __init nvme_fc_init_module(void)
{
int ret;
+ nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
+ if (!nvme_fc_wq)
+ return -ENOMEM;
+
/*
* NOTE:
* It is expected that in the future the kernel will combine
@@ -3416,7 +3420,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class);
if (ret) {
pr_err("couldn't register class fc\n");
- return ret;
+ goto out_destroy_wq;
}
/*
@@ -3440,6 +3444,9 @@ out_destroy_device:
device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
class_unregister(&fc_class);
+out_destroy_wq:
+ destroy_workqueue(nvme_fc_wq);
+
return ret;
}
@@ -3456,6 +3463,7 @@ static void __exit nvme_fc_exit_module(void)
device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class);
+ destroy_workqueue(nvme_fc_wq);
}
module_init(nvme_fc_init_module);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 949e29e1d782..4f20a10b39d3 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -977,6 +977,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
geo->csecs = 1 << ns->lba_shift;
geo->sos = ns->ms;
geo->ext = ns->ext;
+ geo->mdts = ns->ctrl->max_hw_sectors;
dev->q = q;
memcpy(dev->name, disk_name, DISK_NAME_LEN);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5c9429d41120..499acf07d61a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -31,7 +31,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
} else if (ns->head->disk) {
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
- ctrl->cntlid, ns->head->instance);
+ ctrl->instance, ns->head->instance);
*flags = GENHD_FL_HIDDEN;
} else {
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3e4fb891a95a..2a8708c9ac18 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1296,6 +1296,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
switch (dev->ctrl.state) {
case NVME_CTRL_DELETING:
shutdown = true;
+ /* fall through */
case NVME_CTRL_CONNECTING:
case NVME_CTRL_RESETTING:
dev_warn_ratelimited(dev->ctrl.device,
@@ -2280,8 +2281,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
return ret;
}
dev->ctrl.tagset = &dev->tagset;
-
- nvme_dbbuf_set(dev);
} else {
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
@@ -2289,6 +2288,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
nvme_free_queues(dev, dev->online_queues);
}
+ nvme_dbbuf_set(dev);
return 0;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index e1824c2e0a1c..f383146e7d0f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -697,15 +697,6 @@ out_free_queues:
return ret;
}
-static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl,
- struct blk_mq_tag_set *set)
-{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-
- blk_mq_free_tag_set(set);
- nvme_rdma_dev_put(ctrl->device);
-}
-
static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
bool admin)
{
@@ -744,24 +735,9 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
ret = blk_mq_alloc_tag_set(set);
if (ret)
- goto out;
-
- /*
- * We need a reference on the device as long as the tag_set is alive,
- * as the MRs in the request structures need a valid ib_device.
- */
- ret = nvme_rdma_dev_get(ctrl->device);
- if (!ret) {
- ret = -EINVAL;
- goto out_free_tagset;
- }
+ return ERR_PTR(ret);
return set;
-
-out_free_tagset:
- blk_mq_free_tag_set(set);
-out:
- return ERR_PTR(ret);
}
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
@@ -769,7 +745,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
{
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -847,7 +823,7 @@ out_cleanup_queue:
blk_cleanup_queue(ctrl->ctrl.admin_q);
out_free_tagset:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
@@ -862,7 +838,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
{
if (remove) {
blk_cleanup_queue(ctrl->ctrl.connect_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.tagset);
}
nvme_rdma_free_io_queues(ctrl);
}
@@ -903,7 +879,7 @@ out_cleanup_connect_q:
blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.tagset);
out_free_io_queues:
nvme_rdma_free_io_queues(ctrl);
return ret;
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 97d3c77365b8..e71502d141ed 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -167,6 +167,7 @@ TRACE_EVENT(nvme_async_event,
aer_name(NVME_AER_NOTICE_NS_CHANGED),
aer_name(NVME_AER_NOTICE_ANA),
aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+ aer_name(NVME_AER_NOTICE_DISC_CHANGED),
aer_name(NVME_AER_ERROR),
aer_name(NVME_AER_SMART),
aer_name(NVME_AER_CSS),
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index f89f9d02e788..c09039eea707 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3827,7 +3827,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
if ((start_padding_sectors || end_padding_sectors) &&
(rq_data_dir(req) == WRITE)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
- "raw write not track aligned (%lu,%lu) req %p",
+ "raw write not track aligned (%llu,%llu) req %p",
start_padding_sectors, end_padding_sectors, req);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index cfce255521ac..7b7620de2acd 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -205,17 +205,22 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
int auto_ack, int merge_pending)
{
unsigned char __state = 0;
- int i;
+ int i = 1;
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
/* get initial state: */
__state = q->slsb.val[bufnr];
+
+ /* Bail out early if there is no work on the queue: */
+ if (__state & SLSB_OWNER_CU)
+ goto out;
+
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
__state = SLSB_P_OUTPUT_EMPTY;
- for (i = 1; i < count; i++) {
+ for (; i < count; i++) {
bufnr = next_buf(bufnr);
/* merge PENDING into EMPTY: */
@@ -228,6 +233,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (q->slsb.val[bufnr] != __state)
break;
}
+
+out:
*state = __state;
return i;
}
@@ -382,7 +389,7 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
{
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- return get_buf_states(q, bufnr, state, 1, 0, 0);
+ return get_buf_state(q, bufnr, state, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
@@ -719,11 +726,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
multicast_outbound(q)))
qdio_siga_sync_q(q);
- /*
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
- * would return 0.
- */
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ count = atomic_read(&q->nr_buf_used);
if (!count)
return 0;
diff --git a/drivers/s390/cio/trace.c b/drivers/s390/cio/trace.c
index e331cd97e83b..882ee538ca30 100644
--- a/drivers/s390/cio/trace.c
+++ b/drivers/s390/cio/trace.c
@@ -21,5 +21,4 @@ EXPORT_TRACEPOINT_SYMBOL(s390_cio_csch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_hsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_xsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_rsch);
-EXPORT_TRACEPOINT_SYMBOL(s390_cio_rchp);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_chsc);
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 0ebb29b6fd6d..4803139bce14 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -274,29 +274,6 @@ DEFINE_EVENT(s390_class_schid, s390_cio_rsch,
TP_ARGS(schid, cc)
);
-/**
- * s390_cio_rchp - Reset Channel Path (RCHP) instruction was performed
- * @chpid: Channel-Path Identifier
- * @cc: Condition code
- */
-TRACE_EVENT(s390_cio_rchp,
- TP_PROTO(struct chp_id chpid, int cc),
- TP_ARGS(chpid, cc),
- TP_STRUCT__entry(
- __field(u8, cssid)
- __field(u8, id)
- __field(int, cc)
- ),
- TP_fast_assign(
- __entry->cssid = chpid.cssid;
- __entry->id = chpid.id;
- __entry->cc = cc;
- ),
- TP_printk("chpid=%x.%02x cc=%d", __entry->cssid, __entry->id,
- __entry->cc
- )
-);
-
#define CHSC_MAX_REQUEST_LEN 64
#define CHSC_MAX_RESPONSE_LEN 64
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 1ba4a5154fb5..64037b0a8387 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -1266,7 +1266,7 @@ static int prp_registered(struct v4l2_subdev *sd)
if (ret)
return ret;
- ret = imx_media_capture_device_register(priv->vdev);
+ ret = imx_media_capture_device_register(priv->md, priv->vdev);
if (ret)
return ret;
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index b7ce9d439279..9430c835c434 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -701,7 +701,8 @@ void imx_media_capture_device_error(struct imx_media_video_dev *vdev)
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_error);
-int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
+int imx_media_capture_device_register(struct imx_media_dev *md,
+ struct imx_media_video_dev *vdev)
{
struct capture_priv *priv = to_capture_priv(vdev);
struct v4l2_subdev *sd = priv->src_sd;
@@ -710,8 +711,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
struct v4l2_subdev_format fmt_src;
int ret;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
+ priv->md = md;
vfd->v4l2_dev = sd->v4l2_dev;
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 28fe66052cc7..1d248aca40a9 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -1812,7 +1812,7 @@ static int csi_registered(struct v4l2_subdev *sd)
if (ret)
goto free_fim;
- ret = imx_media_capture_device_register(priv->vdev);
+ ret = imx_media_capture_device_register(priv->md, priv->vdev);
if (ret)
goto free_fim;
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index eb59ba0c3b62..6587aa49e005 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -268,7 +268,8 @@ int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct imx_media_video_dev *
imx_media_capture_device_init(struct v4l2_subdev *src_sd, int pad);
void imx_media_capture_device_remove(struct imx_media_video_dev *vdev);
-int imx_media_capture_device_register(struct imx_media_video_dev *vdev);
+int imx_media_capture_device_register(struct imx_media_dev *md,
+ struct imx_media_video_dev *vdev);
void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev);
struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 18eb5d3ecf10..a708a0340eb1 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1126,7 +1126,7 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
if (ret < 0)
return ret;
- ret = imx_media_capture_device_register(csi->vdev);
+ ret = imx_media_capture_device_register(csi->imxmd, csi->vdev);
if (ret < 0)
return ret;
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
index 58721c46fba4..8bbc905b26c8 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
@@ -352,7 +352,7 @@ static int rockchip_vpu_video_device_register(struct rockchip_vpu_dev *vpu)
vpu->vfd_enc = vfd;
video_set_drvdata(vfd, vpu);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret) {
v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
goto err_free_dev;
@@ -463,6 +463,8 @@ static int rockchip_vpu_probe(struct platform_device *pdev)
vpu->mdev.dev = vpu->dev;
strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
+ strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
+ sizeof(vpu->mdev.model));
media_device_init(&vpu->mdev);
vpu->v4l2_dev.mdev = &vpu->mdev;
@@ -480,15 +482,18 @@ static int rockchip_vpu_probe(struct platform_device *pdev)
return 0;
err_video_dev_unreg:
if (vpu->vfd_enc) {
+ v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
video_unregister_device(vpu->vfd_enc);
video_device_release(vpu->vfd_enc);
}
err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
v4l2_m2m_release(vpu->m2m_dev);
err_v4l2_unreg:
v4l2_device_unregister(&vpu->v4l2_dev);
err_clk_unprepare:
clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
pm_runtime_disable(vpu->dev);
return ret;
}
@@ -500,15 +505,16 @@ static int rockchip_vpu_remove(struct platform_device *pdev)
v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
media_device_unregister(&vpu->mdev);
- v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
- v4l2_m2m_release(vpu->m2m_dev);
- media_device_cleanup(&vpu->mdev);
if (vpu->vfd_enc) {
+ v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
video_unregister_device(vpu->vfd_enc);
video_device_release(vpu->vfd_enc);
}
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
v4l2_device_unregister(&vpu->v4l2_dev);
clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
pm_runtime_disable(vpu->dev);
return 0;
}
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
index fb5e36aedd8c..dcbfc3cbc9f3 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
@@ -152,9 +152,10 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct rockchip_vpu_dev *vpu = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
- strscpy(cap->card, vpu->vfd_enc->name, sizeof(cap->card));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s",
vpu->dev->driver->name);
return 0;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 66a709d5d6b9..15bdd25780be 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig THERMAL
- tristate "Generic Thermal sysfs driver"
+ bool "Generic Thermal sysfs driver"
help
Generic Thermal Sysfs driver offers a generic mechanism for
thermal management. Usually it's made up of one or more thermal
@@ -11,7 +11,7 @@ menuconfig THERMAL
Each thermal zone contains its own temperature, trip points,
cooling devices.
All platforms with ACPI thermal support can use this driver.
- If you want this support, you should say Y or M here.
+ If you want this support, you should say Y here.
if THERMAL
@@ -24,7 +24,6 @@ config THERMAL_STATISTICS
config THERMAL_EMERGENCY_POWEROFF_DELAY_MS
int "Emergency poweroff delay in milli-seconds"
- depends on THERMAL
default 0
help
Thermal subsystem will issue a graceful shutdown when
@@ -149,10 +148,9 @@ config THERMAL_GOV_POWER_ALLOCATOR
allocating and limiting power to devices.
config CPU_THERMAL
- bool "generic cpu cooling support"
+ bool "Generic cpu cooling support"
depends on CPU_FREQ
depends on THERMAL_OF
- depends on THERMAL=y
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index 2e013eeb4a1d..2c727a820759 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -1,6 +1,5 @@
config INTEL_POWERCLAMP
tristate "Intel PowerClamp idle injection driver"
- depends on THERMAL
depends on X86
depends on CPU_SUP_INTEL
help
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 0c19fcd56a0d..79a7df2baa92 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -220,6 +220,7 @@ static int int3403_add(struct platform_device *pdev)
{
struct int3403_priv *priv;
int result = 0;
+ unsigned long long tmp;
acpi_status status;
priv = devm_kzalloc(&pdev->dev, sizeof(struct int3403_priv),
@@ -234,19 +235,18 @@ static int int3403_add(struct platform_device *pdev)
goto err;
}
- status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
- NULL, &priv->type);
- if (ACPI_FAILURE(status)) {
- unsigned long long tmp;
- status = acpi_evaluate_integer(priv->adev->handle, "_TMP",
- NULL, &tmp);
+ status = acpi_evaluate_integer(priv->adev->handle, "_TMP",
+ NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
+ NULL, &priv->type);
if (ACPI_FAILURE(status)) {
result = -EINVAL;
goto err;
- } else {
- priv->type = INT3403_TYPE_SENSOR;
}
+ } else {
+ priv->type = INT3403_TYPE_SENSOR;
}
platform_set_drvdata(pdev, priv);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 8e1cf4d789be..2e6071a82da2 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -81,22 +81,13 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct pci_dev *pci_dev; \
- struct platform_device *pdev; \
- struct proc_thermal_device *proc_dev; \
+ struct proc_thermal_device *proc_dev = dev_get_drvdata(dev); \
\
if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
return 0; \
} \
\
- if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
- pdev = to_platform_device(dev); \
- proc_dev = platform_get_drvdata(pdev); \
- } else { \
- pci_dev = to_pci_dev(dev); \
- proc_dev = pci_get_drvdata(pci_dev); \
- } \
return sprintf(buf, "%lu\n",\
(unsigned long)proc_dev->power_limits[index].suffix * 1000); \
}
@@ -274,7 +265,7 @@ static void proc_thermal_notify(acpi_handle handle, u32 event, void *data)
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED);
break;
default:
- dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event);
+ dev_dbg(proc_priv->dev, "Unsupported event [0x%x]\n", event);
break;
}
}
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index cdb455ffd575..3ce20fec86a2 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -1,6 +1,5 @@
config QCOM_TSENS
tristate "Qualcomm TSENS Temperature Alarm"
- depends on THERMAL
depends on QCOM_QFPROM
depends on ARCH_QCOM || COMPILE_TEST
help
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index e0b530603db6..46cfb7de4eb2 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -266,7 +266,7 @@ static int __init thermal_register_governors(void)
return thermal_gov_power_allocator_register();
}
-static void thermal_unregister_governors(void)
+static void __init thermal_unregister_governors(void)
{
thermal_gov_step_wise_unregister();
thermal_gov_fair_share_unregister();
@@ -941,7 +941,7 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
*/
static struct thermal_cooling_device *
__thermal_cooling_device_register(struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device *cdev;
@@ -1015,7 +1015,7 @@ __thermal_cooling_device_register(struct device_node *np,
* ERR_PTR. Caller must check return value with IS_ERR*() helpers.
*/
struct thermal_cooling_device *
-thermal_cooling_device_register(char *type, void *devdata,
+thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
return __thermal_cooling_device_register(NULL, type, devdata, ops);
@@ -1039,7 +1039,7 @@ EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
*/
struct thermal_cooling_device *
thermal_of_cooling_device_register(struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
return __thermal_cooling_device_register(np, type, devdata, ops);
@@ -1543,6 +1543,7 @@ static int thermal_pm_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
struct thermal_zone_device *tz;
+ enum thermal_device_mode tz_mode;
switch (mode) {
case PM_HIBERNATION_PREPARE:
@@ -1555,6 +1556,13 @@ static int thermal_pm_notify(struct notifier_block *nb,
case PM_POST_SUSPEND:
atomic_set(&in_suspend, 0);
list_for_each_entry(tz, &thermal_tz_list, node) {
+ tz_mode = THERMAL_DEVICE_ENABLED;
+ if (tz->ops->get_mode)
+ tz->ops->get_mode(tz, &tz_mode);
+
+ if (tz_mode == THERMAL_DEVICE_DISABLED)
+ continue;
+
thermal_zone_device_init(tz);
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
@@ -1612,19 +1620,4 @@ error:
mutex_destroy(&poweroff_lock);
return result;
}
-
-static void __exit thermal_exit(void)
-{
- unregister_pm_notifier(&thermal_pm_nb);
- of_thermal_destroy_zones();
- genetlink_exit();
- class_unregister(&thermal_class);
- thermal_unregister_governors();
- ida_destroy(&thermal_tz_ida);
- ida_destroy(&thermal_cdev_ida);
- mutex_destroy(&thermal_list_lock);
- mutex_destroy(&thermal_governor_lock);
-}
-
fs_initcall(thermal_init);
-module_exit(thermal_exit);
diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c
index 75155bde2b88..31f53fa77e4a 100644
--- a/drivers/tty/hvc/hvc_riscv_sbi.c
+++ b/drivers/tty/hvc/hvc_riscv_sbi.c
@@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init);
static int __init hvc_sbi_console_init(void)
{
hvc_instantiate(0, 0, &hvc_sbi_ops);
- add_preferred_console("hvc", 0, NULL);
return 0;
}
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index 967db336d11a..9eaff55df7b4 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -251,7 +251,7 @@ struct afs_vlserver_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry
_enter("%s", cell->name);
ret = dns_query("afsdb", cell->name, cell->name_len, "srv=1",
- &result, _expiry);
+ &result, _expiry, true);
if (ret < 0) {
_leave(" = %d [dns]", ret);
return ERR_PTR(ret);
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index d12ffb457e47..3f4e460c6655 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -23,6 +23,9 @@
#define AFSPATHMAX 1024 /* Maximum length of a pathname plus NUL */
#define AFSOPAQUEMAX 1024 /* Maximum length of an opaque field */
+#define AFS_VL_MAX_LIFESPAN (120 * HZ)
+#define AFS_PROBE_MAX_LIFESPAN (30 * HZ)
+
typedef u64 afs_volid_t;
typedef u64 afs_vnodeid_t;
typedef u64 afs_dataversion_t;
@@ -69,8 +72,8 @@ typedef enum {
struct afs_callback {
time64_t expires_at; /* Time at which expires */
- unsigned version; /* Callback version */
- afs_callback_type_t type; /* Type of callback */
+ //unsigned version; /* Callback version */
+ //afs_callback_type_t type; /* Type of callback */
};
struct afs_callback_break {
@@ -144,6 +147,15 @@ struct afs_file_status {
u32 abort_code; /* Abort if bulk-fetching this failed */
};
+struct afs_status_cb {
+ struct afs_file_status status;
+ struct afs_callback callback;
+ unsigned int cb_break; /* Pre-op callback break counter */
+ bool have_status; /* True if status record was retrieved */
+ bool have_cb; /* True if cb record was retrieved */
+ bool have_error; /* True if status.abort_code indicates an error */
+};
+
/*
* AFS file status change request
*/
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 128f2dbe256a..d441bef72163 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -94,15 +94,15 @@ int afs_register_server_cb_interest(struct afs_vnode *vnode,
struct afs_server *server = entry->server;
again:
- if (vnode->cb_interest &&
- likely(vnode->cb_interest == entry->cb_interest))
+ vcbi = rcu_dereference_protected(vnode->cb_interest,
+ lockdep_is_held(&vnode->io_lock));
+ if (vcbi && likely(vcbi == entry->cb_interest))
return 0;
read_lock(&slist->lock);
cbi = afs_get_cb_interest(entry->cb_interest);
read_unlock(&slist->lock);
- vcbi = vnode->cb_interest;
if (vcbi) {
if (vcbi == cbi) {
afs_put_cb_interest(afs_v2net(vnode), cbi);
@@ -114,8 +114,9 @@ again:
*/
if (cbi && vcbi->server == cbi->server) {
write_seqlock(&vnode->cb_lock);
- old = vnode->cb_interest;
- vnode->cb_interest = cbi;
+ old = rcu_dereference_protected(vnode->cb_interest,
+ lockdep_is_held(&vnode->cb_lock.lock));
+ rcu_assign_pointer(vnode->cb_interest, cbi);
write_sequnlock(&vnode->cb_lock);
afs_put_cb_interest(afs_v2net(vnode), old);
return 0;
@@ -160,8 +161,9 @@ again:
*/
write_seqlock(&vnode->cb_lock);
- old = vnode->cb_interest;
- vnode->cb_interest = cbi;
+ old = rcu_dereference_protected(vnode->cb_interest,
+ lockdep_is_held(&vnode->cb_lock.lock));
+ rcu_assign_pointer(vnode->cb_interest, cbi);
vnode->cb_s_break = cbi->server->cb_s_break;
vnode->cb_v_break = vnode->volume->cb_v_break;
clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
@@ -191,10 +193,11 @@ void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi)
vi = NULL;
write_unlock(&cbi->server->cb_break_lock);
- kfree(vi);
+ if (vi)
+ kfree_rcu(vi, rcu);
afs_put_server(net, cbi->server);
}
- kfree(cbi);
+ kfree_rcu(cbi, rcu);
}
}
@@ -218,14 +221,8 @@ void __afs_break_callback(struct afs_vnode *vnode)
vnode->cb_break++;
afs_clear_permits(vnode);
- spin_lock(&vnode->lock);
-
- _debug("break callback");
-
- if (list_empty(&vnode->granted_locks) &&
- !list_empty(&vnode->pending_locks))
+ if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
afs_lock_may_be_available(vnode);
- spin_unlock(&vnode->lock);
}
}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 9de46116c749..9c3b07ba2222 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -123,6 +123,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
const char *name, unsigned int namelen,
const char *addresses)
{
+ struct afs_vlserver_list *vllist;
struct afs_cell *cell;
int i, ret;
@@ -151,18 +152,14 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
atomic_set(&cell->usage, 2);
INIT_WORK(&cell->manager, afs_manage_cell);
- cell->flags = ((1 << AFS_CELL_FL_NOT_READY) |
- (1 << AFS_CELL_FL_NO_LOOKUP_YET));
INIT_LIST_HEAD(&cell->proc_volumes);
rwlock_init(&cell->proc_lock);
rwlock_init(&cell->vl_servers_lock);
- /* Fill in the VL server list if we were given a list of addresses to
- * use.
+ /* Provide a VL server list, filling it in if we were given a list of
+ * addresses to use.
*/
if (addresses) {
- struct afs_vlserver_list *vllist;
-
vllist = afs_parse_text_addrs(net,
addresses, strlen(addresses), ':',
VL_SERVICE, AFS_VL_PORT);
@@ -171,19 +168,32 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
goto parse_failed;
}
- rcu_assign_pointer(cell->vl_servers, vllist);
+ vllist->source = DNS_RECORD_FROM_CONFIG;
+ vllist->status = DNS_LOOKUP_NOT_DONE;
cell->dns_expiry = TIME64_MAX;
- __clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags);
} else {
+ ret = -ENOMEM;
+ vllist = afs_alloc_vlserver_list(0);
+ if (!vllist)
+ goto error;
+ vllist->source = DNS_RECORD_UNAVAILABLE;
+ vllist->status = DNS_LOOKUP_NOT_DONE;
cell->dns_expiry = ktime_get_real_seconds();
}
+ rcu_assign_pointer(cell->vl_servers, vllist);
+
+ cell->dns_source = vllist->source;
+ cell->dns_status = vllist->status;
+ smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
+
_leave(" = %p", cell);
return cell;
parse_failed:
if (ret == -EINVAL)
printk(KERN_ERR "kAFS: bad VL server IP address\n");
+error:
kfree(cell);
_leave(" = %d", ret);
return ERR_PTR(ret);
@@ -208,6 +218,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
{
struct afs_cell *cell, *candidate, *cursor;
struct rb_node *parent, **pp;
+ enum afs_cell_state state;
int ret, n;
_enter("%s,%s", name, vllist);
@@ -267,18 +278,16 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
wait_for_cell:
_debug("wait_for_cell");
- ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NOT_READY, TASK_INTERRUPTIBLE);
- smp_rmb();
-
- switch (READ_ONCE(cell->state)) {
- case AFS_CELL_FAILED:
+ wait_var_event(&cell->state,
+ ({
+ state = smp_load_acquire(&cell->state); /* vs error */
+ state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED;
+ }));
+
+ /* Check the state obtained from the wait check. */
+ if (state == AFS_CELL_FAILED) {
ret = cell->error;
goto error;
- default:
- _debug("weird %u %d", cell->state, cell->error);
- goto error;
- case AFS_CELL_ACTIVE:
- break;
}
_leave(" = %p [cell]", cell);
@@ -360,16 +369,46 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
/*
* Update a cell's VL server address list from the DNS.
*/
-static void afs_update_cell(struct afs_cell *cell)
+static int afs_update_cell(struct afs_cell *cell)
{
- struct afs_vlserver_list *vllist, *old;
+ struct afs_vlserver_list *vllist, *old = NULL, *p;
unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
time64_t now, expiry = 0;
+ int ret = 0;
_enter("%s", cell->name);
vllist = afs_dns_query(cell, &expiry);
+ if (IS_ERR(vllist)) {
+ ret = PTR_ERR(vllist);
+
+ _debug("%s: fail %d", cell->name, ret);
+ if (ret == -ENOMEM)
+ goto out_wake;
+
+ ret = -ENOMEM;
+ vllist = afs_alloc_vlserver_list(0);
+ if (!vllist)
+ goto out_wake;
+
+ switch (ret) {
+ case -ENODATA:
+ case -EDESTADDRREQ:
+ vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
+ break;
+ case -EAGAIN:
+ case -ECONNREFUSED:
+ vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
+ break;
+ default:
+ vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
+ break;
+ }
+ }
+
+ _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
+ cell->dns_status = vllist->status;
now = ktime_get_real_seconds();
if (min_ttl > max_ttl)
@@ -379,48 +418,47 @@ static void afs_update_cell(struct afs_cell *cell)
else if (expiry > now + max_ttl)
expiry = now + max_ttl;
- if (IS_ERR(vllist)) {
- switch (PTR_ERR(vllist)) {
- case -ENODATA:
- case -EDESTADDRREQ:
+ _debug("%s: status %d", cell->name, vllist->status);
+ if (vllist->source == DNS_RECORD_UNAVAILABLE) {
+ switch (vllist->status) {
+ case DNS_LOOKUP_GOT_NOT_FOUND:
/* The DNS said that the cell does not exist or there
* weren't any addresses to be had.
*/
- set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
- clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
cell->dns_expiry = expiry;
break;
- case -EAGAIN:
- case -ECONNREFUSED:
+ case DNS_LOOKUP_BAD:
+ case DNS_LOOKUP_GOT_LOCAL_FAILURE:
+ case DNS_LOOKUP_GOT_TEMP_FAILURE:
+ case DNS_LOOKUP_GOT_NS_FAILURE:
default:
- set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
cell->dns_expiry = now + 10;
break;
}
-
- cell->error = -EDESTADDRREQ;
} else {
- clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
- clear_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
-
- /* Exclusion on changing vl_addrs is achieved by a
- * non-reentrant work item.
- */
- old = rcu_dereference_protected(cell->vl_servers, true);
- rcu_assign_pointer(cell->vl_servers, vllist);
cell->dns_expiry = expiry;
-
- if (old)
- afs_put_vlserverlist(cell->net, old);
}
- if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags))
- wake_up_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET);
+ /* Replace the VL server list if the new record has servers or the old
+ * record doesn't.
+ */
+ write_lock(&cell->vl_servers_lock);
+ p = rcu_dereference_protected(cell->vl_servers, true);
+ if (vllist->nr_servers > 0 || p->nr_servers == 0) {
+ rcu_assign_pointer(cell->vl_servers, vllist);
+ cell->dns_source = vllist->source;
+ old = p;
+ }
+ write_unlock(&cell->vl_servers_lock);
+ afs_put_vlserverlist(cell->net, old);
- now = ktime_get_real_seconds();
- afs_set_cell_timer(cell->net, cell->dns_expiry - now);
- _leave("");
+out_wake:
+ smp_store_release(&cell->dns_lookup_count,
+ cell->dns_lookup_count + 1); /* vs source/status */
+ wake_up_var(&cell->dns_lookup_count);
+ _leave(" = %d", ret);
+ return ret;
}
/*
@@ -491,8 +529,7 @@ void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
now = ktime_get_real_seconds();
cell->last_inactive = now;
expire_delay = 0;
- if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
- !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
+ if (cell->vl_servers->nr_servers)
expire_delay = afs_cell_gc_delay;
if (atomic_dec_return(&cell->usage) > 1)
@@ -623,11 +660,13 @@ again:
goto final_destruction;
if (cell->state == AFS_CELL_FAILED)
goto done;
- cell->state = AFS_CELL_UNSET;
+ smp_store_release(&cell->state, AFS_CELL_UNSET);
+ wake_up_var(&cell->state);
goto again;
case AFS_CELL_UNSET:
- cell->state = AFS_CELL_ACTIVATING;
+ smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
+ wake_up_var(&cell->state);
goto again;
case AFS_CELL_ACTIVATING:
@@ -635,28 +674,29 @@ again:
if (ret < 0)
goto activation_failed;
- cell->state = AFS_CELL_ACTIVE;
- smp_wmb();
- clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
- wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
+ smp_store_release(&cell->state, AFS_CELL_ACTIVE);
+ wake_up_var(&cell->state);
goto again;
case AFS_CELL_ACTIVE:
if (atomic_read(&cell->usage) > 1) {
- time64_t now = ktime_get_real_seconds();
- if (cell->dns_expiry <= now && net->live)
- afs_update_cell(cell);
+ if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
+ ret = afs_update_cell(cell);
+ if (ret < 0)
+ cell->error = ret;
+ }
goto done;
}
- cell->state = AFS_CELL_DEACTIVATING;
+ smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
+ wake_up_var(&cell->state);
goto again;
case AFS_CELL_DEACTIVATING:
- set_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
if (atomic_read(&cell->usage) > 1)
goto reverse_deactivation;
afs_deactivate_cell(net, cell);
- cell->state = AFS_CELL_INACTIVE;
+ smp_store_release(&cell->state, AFS_CELL_INACTIVE);
+ wake_up_var(&cell->state);
goto again;
default:
@@ -669,17 +709,13 @@ activation_failed:
cell->error = ret;
afs_deactivate_cell(net, cell);
- cell->state = AFS_CELL_FAILED;
- smp_wmb();
- if (test_and_clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags))
- wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
+ smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
+ wake_up_var(&cell->state);
goto again;
reverse_deactivation:
- cell->state = AFS_CELL_ACTIVE;
- smp_wmb();
- clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
- wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
+ smp_store_release(&cell->state, AFS_CELL_ACTIVE);
+ wake_up_var(&cell->state);
_leave(" [deact->act]");
return;
@@ -739,11 +775,16 @@ void afs_manage_cells(struct work_struct *work)
}
if (usage == 1) {
+ struct afs_vlserver_list *vllist;
time64_t expire_at = cell->last_inactive;
- if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
- !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
+ read_lock(&cell->vl_servers_lock);
+ vllist = rcu_dereference_protected(
+ cell->vl_servers,
+ lockdep_is_held(&cell->vl_servers_lock));
+ if (vllist->nr_servers > 0)
expire_at += afs_cell_gc_delay;
+ read_unlock(&cell->vl_servers_lock);
if (purging || expire_at <= now)
sched_cell = true;
else if (expire_at < next_manage)
@@ -751,10 +792,8 @@ void afs_manage_cells(struct work_struct *work)
}
if (!purging) {
- if (cell->dns_expiry <= now)
+ if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
sched_cell = true;
- else if (cell->dns_expiry <= next_manage)
- next_manage = cell->dns_expiry;
}
if (sched_cell)
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 748090014519..01437cfe5432 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -213,7 +213,7 @@ static int afs_find_cm_server_by_peer(struct afs_call *call)
return 0;
}
- call->cm_server = server;
+ call->server = server;
return afs_record_cm_probe(call, server);
}
@@ -234,7 +234,7 @@ static int afs_find_cm_server_by_uuid(struct afs_call *call,
return 0;
}
- call->cm_server = server;
+ call->server = server;
return afs_record_cm_probe(call, server);
}
@@ -260,8 +260,8 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
* server holds up change visibility till it receives our reply so as
* to maintain cache coherency.
*/
- if (call->cm_server)
- afs_break_callbacks(call->cm_server, call->count, call->request);
+ if (call->server)
+ afs_break_callbacks(call->server, call->count, call->request);
afs_send_empty_reply(call);
afs_put_call(call);
@@ -376,10 +376,10 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
{
struct afs_call *call = container_of(work, struct afs_call, work);
- _enter("{%p}", call->cm_server);
+ _enter("{%p}", call->server);
- if (call->cm_server)
- afs_init_callback_state(call->cm_server);
+ if (call->server)
+ afs_init_callback_state(call->server);
afs_send_empty_reply(call);
afs_put_call(call);
_leave("");
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 9a466be583d2..79d93a26759a 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
+#include "afs_fs.h"
#include "xdr_fs.h"
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
@@ -102,8 +103,8 @@ struct afs_lookup_cookie {
bool found;
bool one_only;
unsigned short nr_fids;
- struct afs_file_status *statuses;
- struct afs_callback *callbacks;
+ struct inode **inodes;
+ struct afs_status_cb *statuses;
struct afs_fid fids[50];
};
@@ -638,12 +639,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct key *key)
{
struct afs_lookup_cookie *cookie;
- struct afs_cb_interest *cbi = NULL;
+ struct afs_cb_interest *dcbi, *cbi = NULL;
struct afs_super_info *as = dir->i_sb->s_fs_info;
- struct afs_iget_data data;
+ struct afs_status_cb *scb;
+ struct afs_iget_data iget_data;
struct afs_fs_cursor fc;
- struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct inode *inode = NULL;
+ struct afs_server *server;
+ struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
+ struct inode *inode = NULL, *ti;
int ret, i;
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -657,10 +660,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
cookie->nr_fids = 1; /* slot 0 is saved for the fid we actually want */
read_seqlock_excl(&dvnode->cb_lock);
- if (dvnode->cb_interest &&
- dvnode->cb_interest->server &&
- test_bit(AFS_SERVER_FL_NO_IBULK, &dvnode->cb_interest->server->flags))
- cookie->one_only = true;
+ dcbi = rcu_dereference_protected(dvnode->cb_interest,
+ lockdep_is_held(&dvnode->cb_lock.lock));
+ if (dcbi) {
+ server = dcbi->server;
+ if (server &&
+ test_bit(AFS_SERVER_FL_NO_IBULK, &server->flags))
+ cookie->one_only = true;
+ }
read_sequnlock_excl(&dvnode->cb_lock);
for (i = 0; i < 50; i++)
@@ -678,24 +685,43 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
goto out;
/* Check to see if we already have an inode for the primary fid. */
- data.volume = dvnode->volume;
- data.fid = cookie->fids[0];
- inode = ilookup5(dir->i_sb, cookie->fids[0].vnode, afs_iget5_test, &data);
+ iget_data.fid = cookie->fids[0];
+ iget_data.volume = dvnode->volume;
+ iget_data.cb_v_break = dvnode->volume->cb_v_break;
+ iget_data.cb_s_break = 0;
+ inode = ilookup5(dir->i_sb, cookie->fids[0].vnode,
+ afs_iget5_test, &iget_data);
if (inode)
goto out;
/* Need space for examining all the selected files */
inode = ERR_PTR(-ENOMEM);
- cookie->statuses = kcalloc(cookie->nr_fids, sizeof(struct afs_file_status),
- GFP_KERNEL);
+ cookie->statuses = kvcalloc(cookie->nr_fids, sizeof(struct afs_status_cb),
+ GFP_KERNEL);
if (!cookie->statuses)
goto out;
- cookie->callbacks = kcalloc(cookie->nr_fids, sizeof(struct afs_callback),
- GFP_KERNEL);
- if (!cookie->callbacks)
+ cookie->inodes = kcalloc(cookie->nr_fids, sizeof(struct inode *),
+ GFP_KERNEL);
+ if (!cookie->inodes)
goto out_s;
+ for (i = 1; i < cookie->nr_fids; i++) {
+ scb = &cookie->statuses[i];
+
+ /* Find any inodes that already exist and get their
+ * callback counters.
+ */
+ iget_data.fid = cookie->fids[i];
+ ti = ilookup5_nowait(dir->i_sb, iget_data.fid.vnode,
+ afs_iget5_test, &iget_data);
+ if (!IS_ERR_OR_NULL(ti)) {
+ vnode = AFS_FS_I(ti);
+ scb->cb_break = afs_calc_vnode_cb_break(vnode);
+ cookie->inodes[i] = ti;
+ }
+ }
+
/* Try FS.InlineBulkStatus first. Abort codes for the individual
* lookups contained therein are stored in the reply without aborting
* the whole operation.
@@ -704,7 +730,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
goto no_inline_bulk_status;
inode = ERR_PTR(-ERESTARTSYS);
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
while (afs_select_fileserver(&fc)) {
if (test_bit(AFS_SERVER_FL_NO_IBULK,
&fc.cbi->server->flags)) {
@@ -712,11 +738,12 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
fc.ac.error = -ECONNABORTED;
break;
}
+ iget_data.cb_v_break = dvnode->volume->cb_v_break;
+ iget_data.cb_s_break = fc.cbi->server->cb_s_break;
afs_fs_inline_bulk_status(&fc,
afs_v2net(dvnode),
cookie->fids,
cookie->statuses,
- cookie->callbacks,
cookie->nr_fids, NULL);
}
@@ -737,15 +764,16 @@ no_inline_bulk_status:
* any of the lookups fails - so, for the moment, revert to
* FS.FetchStatus for just the primary fid.
*/
- cookie->nr_fids = 1;
inode = ERR_PTR(-ERESTARTSYS);
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
while (afs_select_fileserver(&fc)) {
+ iget_data.cb_v_break = dvnode->volume->cb_v_break;
+ iget_data.cb_s_break = fc.cbi->server->cb_s_break;
+ scb = &cookie->statuses[0];
afs_fs_fetch_status(&fc,
afs_v2net(dvnode),
cookie->fids,
- cookie->statuses,
- cookie->callbacks,
+ scb,
NULL);
}
@@ -757,26 +785,36 @@ no_inline_bulk_status:
if (IS_ERR(inode))
goto out_c;
- for (i = 0; i < cookie->nr_fids; i++)
- cookie->statuses[i].abort_code = 0;
-
success:
/* Turn all the files into inodes and save the first one - which is the
* one we actually want.
*/
- if (cookie->statuses[0].abort_code != 0)
- inode = ERR_PTR(afs_abort_to_error(cookie->statuses[0].abort_code));
+ scb = &cookie->statuses[0];
+ if (scb->status.abort_code != 0)
+ inode = ERR_PTR(afs_abort_to_error(scb->status.abort_code));
for (i = 0; i < cookie->nr_fids; i++) {
- struct inode *ti;
+ struct afs_status_cb *scb = &cookie->statuses[i];
+
+ if (!scb->have_status && !scb->have_error)
+ continue;
+
+ if (cookie->inodes[i]) {
+ afs_vnode_commit_status(&fc, AFS_FS_I(cookie->inodes[i]),
+ scb->cb_break, NULL, scb);
+ continue;
+ }
- if (cookie->statuses[i].abort_code != 0)
+ if (scb->status.abort_code != 0)
continue;
- ti = afs_iget(dir->i_sb, key, &cookie->fids[i],
- &cookie->statuses[i],
- &cookie->callbacks[i],
- cbi, dvnode);
+ iget_data.fid = cookie->fids[i];
+ ti = afs_iget(dir->i_sb, key, &iget_data, scb, cbi, dvnode);
+ if (!IS_ERR(ti))
+ afs_cache_permit(AFS_FS_I(ti), key,
+ 0 /* Assume vnode->cb_break is 0 */ +
+ iget_data.cb_v_break,
+ scb);
if (i == 0) {
inode = ti;
} else {
@@ -787,9 +825,13 @@ success:
out_c:
afs_put_cb_interest(afs_v2net(dvnode), cbi);
- kfree(cookie->callbacks);
+ if (cookie->inodes) {
+ for (i = 0; i < cookie->nr_fids; i++)
+ iput(cookie->inodes[i]);
+ kfree(cookie->inodes);
+ }
out_s:
- kfree(cookie->statuses);
+ kvfree(cookie->statuses);
out:
kfree(cookie);
return inode;
@@ -1114,9 +1156,8 @@ void afs_d_release(struct dentry *dentry)
*/
static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
struct dentry *new_dentry,
- struct afs_fid *newfid,
- struct afs_file_status *newstatus,
- struct afs_callback *newcb)
+ struct afs_iget_data *new_data,
+ struct afs_status_cb *new_scb)
{
struct afs_vnode *vnode;
struct inode *inode;
@@ -1125,7 +1166,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
return;
inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
- newfid, newstatus, newcb, fc->cbi, fc->vnode);
+ new_data, new_scb, fc->cbi, fc->vnode);
if (IS_ERR(inode)) {
/* ENOMEM or EINTR at a really inconvenient time - just abandon
* the new directory on the server.
@@ -1136,22 +1177,29 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
vnode = AFS_FS_I(inode);
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
- afs_vnode_commit_status(fc, vnode, 0);
+ if (fc->ac.error == 0)
+ afs_cache_permit(vnode, fc->key, vnode->cb_break, new_scb);
d_instantiate(new_dentry, inode);
}
+static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
+ struct afs_iget_data *iget_data)
+{
+ iget_data->volume = fc->vnode->volume;
+ iget_data->cb_v_break = fc->vnode->volume->cb_v_break;
+ iget_data->cb_s_break = fc->cbi->server->cb_s_break;
+}
+
/*
* create a directory on an AFS filesystem
*/
static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
- struct afs_file_status newstatus;
+ struct afs_iget_data iget_data;
+ struct afs_status_cb *scb;
struct afs_fs_cursor fc;
- struct afs_callback newcb;
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct afs_fid newfid;
struct key *key;
- u64 data_version = dvnode->status.data_version;
int ret;
mode |= S_IFDIR;
@@ -1159,23 +1207,32 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
_enter("{%llx:%llu},{%pd},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
+ ret = -ENOMEM;
+ scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ goto error;
+
key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
- goto error;
+ goto error_scb;
}
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
+ afs_dataversion_t data_version = dvnode->status.data_version + 1;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_create(&fc, dentry->d_name.name, mode, data_version,
- &newfid, &newstatus, &newcb);
+ afs_prep_for_new_inode(&fc, &iget_data);
+ afs_fs_create(&fc, dentry->d_name.name, mode,
+ &scb[0], &iget_data.fid, &scb[1]);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
- afs_vnode_new_inode(&fc, dentry, &newfid, &newstatus, &newcb);
+ afs_check_for_remote_deletion(&fc, dvnode);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, &scb[0]);
+ afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
@@ -1185,15 +1242,18 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (ret == 0 &&
test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_edit_dir_add(dvnode, &dentry->d_name, &newfid,
+ afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
afs_edit_dir_for_create);
key_put(key);
+ kfree(scb);
_leave(" = 0");
return 0;
error_key:
key_put(key);
+error_scb:
+ kfree(scb);
error:
d_drop(dentry);
_leave(" = %d", ret);
@@ -1220,15 +1280,19 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
*/
static int afs_rmdir(struct inode *dir, struct dentry *dentry)
{
+ struct afs_status_cb *scb;
struct afs_fs_cursor fc;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
struct key *key;
- u64 data_version = dvnode->status.data_version;
int ret;
_enter("{%llx:%llu},{%pd}",
dvnode->fid.vid, dvnode->fid.vnode, dentry);
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ return -ENOMEM;
+
key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
@@ -1250,14 +1314,16 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
}
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
+ afs_dataversion_t data_version = dvnode->status.data_version + 1;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_remove(&fc, vnode, dentry->d_name.name, true,
- data_version);
+ afs_fs_remove(&fc, vnode, dentry->d_name.name, true, scb);
}
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, scb);
ret = afs_end_vnode_operation(&fc);
if (ret == 0) {
afs_dir_remove_subdir(dentry);
@@ -1272,6 +1338,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
error_key:
key_put(key);
error:
+ kfree(scb);
return ret;
}
@@ -1285,32 +1352,27 @@ error:
* However, if we didn't have a callback promise outstanding, or it was
* outstanding on a different server, then it won't break it either...
*/
-int afs_dir_remove_link(struct dentry *dentry, struct key *key,
- unsigned long d_version_before,
- unsigned long d_version_after)
+static int afs_dir_remove_link(struct afs_vnode *dvnode, struct dentry *dentry,
+ struct key *key)
{
- bool dir_valid;
int ret = 0;
- /* There were no intervening changes on the server if the version
- * number we got back was incremented by exactly 1.
- */
- dir_valid = (d_version_after == d_version_before + 1);
-
if (d_really_is_positive(dentry)) {
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
/* Already done */
- } else if (dir_valid) {
+ } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
+ write_seqlock(&vnode->cb_lock);
drop_nlink(&vnode->vfs_inode);
if (vnode->vfs_inode.i_nlink == 0) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ __afs_break_callback(vnode);
}
+ write_sequnlock(&vnode->cb_lock);
ret = 0;
} else {
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ afs_break_callback(vnode);
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
kdebug("AFS_VNODE_DELETED");
@@ -1331,11 +1393,10 @@ int afs_dir_remove_link(struct dentry *dentry, struct key *key,
static int afs_unlink(struct inode *dir, struct dentry *dentry)
{
struct afs_fs_cursor fc;
+ struct afs_status_cb *scb;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
struct key *key;
- unsigned long d_version = (unsigned long)dentry->d_fsdata;
bool need_rehash = false;
- u64 data_version = dvnode->status.data_version;
int ret;
_enter("{%llx:%llu},{%pd}",
@@ -1344,10 +1405,15 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
if (dentry->d_name.len >= AFSNAMEMAX)
return -ENAMETOOLONG;
+ ret = -ENOMEM;
+ scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ goto error;
+
key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
- goto error;
+ goto error_scb;
}
/* Try to make sure we have a callback promise on the victim. */
@@ -1374,30 +1440,34 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
spin_unlock(&dentry->d_lock);
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
+ afs_dataversion_t data_version = dvnode->status.data_version + 1;
+ afs_dataversion_t data_version_2 = vnode->status.data_version;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
+ fc.cb_break_2 = afs_calc_vnode_cb_break(vnode);
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) &&
!test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) {
yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name,
- data_version);
+ &scb[0], &scb[1]);
if (fc.ac.error != -ECONNABORTED ||
fc.ac.abort_code != RXGEN_OPCODE)
continue;
set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags);
}
- afs_fs_remove(&fc, vnode, dentry->d_name.name, false,
- data_version);
+ afs_fs_remove(&fc, vnode, dentry->d_name.name, false, &scb[0]);
}
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, &scb[0]);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
+ &data_version_2, &scb[1]);
ret = afs_end_vnode_operation(&fc);
- if (ret == 0)
- ret = afs_dir_remove_link(
- dentry, key, d_version,
- (unsigned long)dvnode->status.data_version);
+ if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
+ ret = afs_dir_remove_link(dvnode, dentry, key);
if (ret == 0 &&
test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
afs_edit_dir_remove(dvnode, &dentry->d_name,
@@ -1409,6 +1479,8 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
error_key:
key_put(key);
+error_scb:
+ kfree(scb);
error:
_leave(" = %d", ret);
return ret;
@@ -1420,13 +1492,11 @@ error:
static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
+ struct afs_iget_data iget_data;
struct afs_fs_cursor fc;
- struct afs_file_status newstatus;
- struct afs_callback newcb;
+ struct afs_status_cb *scb;
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct afs_fid newfid;
struct key *key;
- u64 data_version = dvnode->status.data_version;
int ret;
mode |= S_IFREG;
@@ -1444,17 +1514,26 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto error;
}
+ ret = -ENOMEM;
+ scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ goto error_scb;
+
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
+ afs_dataversion_t data_version = dvnode->status.data_version + 1;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_create(&fc, dentry->d_name.name, mode, data_version,
- &newfid, &newstatus, &newcb);
+ afs_prep_for_new_inode(&fc, &iget_data);
+ afs_fs_create(&fc, dentry->d_name.name, mode,
+ &scb[0], &iget_data.fid, &scb[1]);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
- afs_vnode_new_inode(&fc, dentry, &newfid, &newstatus, &newcb);
+ afs_check_for_remote_deletion(&fc, dvnode);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, &scb[0]);
+ afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
@@ -1463,13 +1542,16 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
}
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_edit_dir_add(dvnode, &dentry->d_name, &newfid,
+ afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
afs_edit_dir_for_create);
+ kfree(scb);
key_put(key);
_leave(" = 0");
return 0;
+error_scb:
+ kfree(scb);
error_key:
key_put(key);
error:
@@ -1485,15 +1567,12 @@ static int afs_link(struct dentry *from, struct inode *dir,
struct dentry *dentry)
{
struct afs_fs_cursor fc;
- struct afs_vnode *dvnode, *vnode;
+ struct afs_status_cb *scb;
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ struct afs_vnode *vnode = AFS_FS_I(d_inode(from));
struct key *key;
- u64 data_version;
int ret;
- vnode = AFS_FS_I(d_inode(from));
- dvnode = AFS_FS_I(dir);
- data_version = dvnode->status.data_version;
-
_enter("{%llx:%llu},{%llx:%llu},{%pd}",
vnode->fid.vid, vnode->fid.vnode,
dvnode->fid.vid, dvnode->fid.vnode,
@@ -1503,14 +1582,21 @@ static int afs_link(struct dentry *from, struct inode *dir,
if (dentry->d_name.len >= AFSNAMEMAX)
goto error;
+ ret = -ENOMEM;
+ scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ goto error;
+
key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
- goto error;
+ goto error_scb;
}
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
+ afs_dataversion_t data_version = dvnode->status.data_version + 1;
+
if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
goto error_key;
@@ -1519,11 +1605,14 @@ static int afs_link(struct dentry *from, struct inode *dir,
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
fc.cb_break_2 = afs_calc_vnode_cb_break(vnode);
- afs_fs_link(&fc, vnode, dentry->d_name.name, data_version);
+ afs_fs_link(&fc, vnode, dentry->d_name.name,
+ &scb[0], &scb[1]);
}
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break_2);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, &scb[0]);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
+ NULL, &scb[1]);
ihold(&vnode->vfs_inode);
d_instantiate(dentry, &vnode->vfs_inode);
@@ -1540,11 +1629,14 @@ static int afs_link(struct dentry *from, struct inode *dir,
afs_edit_dir_for_link);
key_put(key);
+ kfree(scb);
_leave(" = 0");
return 0;
error_key:
key_put(key);
+error_scb:
+ kfree(scb);
error:
d_drop(dentry);
_leave(" = %d", ret);
@@ -1557,12 +1649,11 @@ error:
static int afs_symlink(struct inode *dir, struct dentry *dentry,
const char *content)
{
+ struct afs_iget_data iget_data;
struct afs_fs_cursor fc;
- struct afs_file_status newstatus;
+ struct afs_status_cb *scb;
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct afs_fid newfid;
struct key *key;
- u64 data_version = dvnode->status.data_version;
int ret;
_enter("{%llx:%llu},{%pd},%s",
@@ -1577,24 +1668,32 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
if (strlen(content) >= AFSPATHMAX)
goto error;
+ ret = -ENOMEM;
+ scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ goto error;
+
key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
- goto error;
+ goto error_scb;
}
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
+ afs_dataversion_t data_version = dvnode->status.data_version + 1;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_symlink(&fc, dentry->d_name.name,
- content, data_version,
- &newfid, &newstatus);
+ afs_prep_for_new_inode(&fc, &iget_data);
+ afs_fs_symlink(&fc, dentry->d_name.name, content,
+ &scb[0], &iget_data.fid, &scb[1]);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
- afs_vnode_new_inode(&fc, dentry, &newfid, &newstatus, NULL);
+ afs_check_for_remote_deletion(&fc, dvnode);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, &scb[0]);
+ afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
@@ -1603,15 +1702,18 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
}
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_edit_dir_add(dvnode, &dentry->d_name, &newfid,
+ afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
afs_edit_dir_for_symlink);
key_put(key);
+ kfree(scb);
_leave(" = 0");
return 0;
error_key:
key_put(key);
+error_scb:
+ kfree(scb);
error:
d_drop(dentry);
_leave(" = %d", ret);
@@ -1626,11 +1728,11 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned int flags)
{
struct afs_fs_cursor fc;
+ struct afs_status_cb *scb;
struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
struct dentry *tmp = NULL, *rehash = NULL;
struct inode *new_inode;
struct key *key;
- u64 orig_data_version, new_data_version;
bool new_negative = d_is_negative(new_dentry);
int ret;
@@ -1644,8 +1746,6 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
vnode = AFS_FS_I(d_inode(old_dentry));
orig_dvnode = AFS_FS_I(old_dir);
new_dvnode = AFS_FS_I(new_dir);
- orig_data_version = orig_dvnode->status.data_version;
- new_data_version = new_dvnode->status.data_version;
_enter("{%llx:%llu},{%llx:%llu},{%llx:%llu},{%pd}",
orig_dvnode->fid.vid, orig_dvnode->fid.vnode,
@@ -1653,10 +1753,15 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dvnode->fid.vid, new_dvnode->fid.vnode,
new_dentry);
+ ret = -ENOMEM;
+ scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ goto error;
+
key = afs_request_key(orig_dvnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
- goto error;
+ goto error_scb;
}
/* For non-directories, check whether the target is busy and if so,
@@ -1690,31 +1795,43 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dentry = tmp;
rehash = NULL;
new_negative = true;
- orig_data_version = orig_dvnode->status.data_version;
- new_data_version = new_dvnode->status.data_version;
}
}
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, orig_dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
+ afs_dataversion_t orig_data_version;
+ afs_dataversion_t new_data_version;
+ struct afs_status_cb *new_scb = &scb[1];
+
+ orig_data_version = orig_dvnode->status.data_version + 1;
+
if (orig_dvnode != new_dvnode) {
if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
goto error_rehash;
}
+ new_data_version = new_dvnode->status.data_version;
+ } else {
+ new_data_version = orig_data_version;
+ new_scb = &scb[0];
}
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(orig_dvnode);
fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode);
afs_fs_rename(&fc, old_dentry->d_name.name,
new_dvnode, new_dentry->d_name.name,
- orig_data_version, new_data_version);
+ &scb[0], new_scb);
}
- afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break);
- afs_vnode_commit_status(&fc, new_dvnode, fc.cb_break_2);
- if (orig_dvnode != new_dvnode)
+ afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break,
+ &orig_data_version, &scb[0]);
+ if (new_dvnode != orig_dvnode) {
+ afs_vnode_commit_status(&fc, new_dvnode, fc.cb_break_2,
+ &new_data_version, &scb[1]);
mutex_unlock(&new_dvnode->io_lock);
+ }
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_rehash;
@@ -1754,6 +1871,8 @@ error_tmp:
if (tmp)
dput(tmp);
key_put(key);
+error_scb:
+ kfree(scb);
error:
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
index f6f89fdab6b2..28f4aa015229 100644
--- a/fs/afs/dir_silly.c
+++ b/fs/afs/dir_silly.c
@@ -24,21 +24,28 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
struct key *key)
{
struct afs_fs_cursor fc;
- u64 dir_data_version = dvnode->status.data_version;
+ struct afs_status_cb *scb;
int ret = -ERESTARTSYS;
_enter("%pd,%pd", old, new);
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ return -ENOMEM;
+
trace_afs_silly_rename(vnode, false);
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
+ afs_dataversion_t dir_data_version = dvnode->status.data_version + 1;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
afs_fs_rename(&fc, old->d_name.name,
dvnode, new->d_name.name,
- dir_data_version, dir_data_version);
+ scb, scb);
}
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &dir_data_version, scb);
ret = afs_end_vnode_operation(&fc);
}
@@ -64,6 +71,7 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
fsnotify_nameremove(old, 0);
}
+ kfree(scb);
_leave(" = %d", ret);
return ret;
}
@@ -143,31 +151,37 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
struct dentry *dentry, struct key *key)
{
struct afs_fs_cursor fc;
- u64 dir_data_version = dvnode->status.data_version;
+ struct afs_status_cb *scb;
int ret = -ERESTARTSYS;
_enter("");
+ scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ return -ENOMEM;
+
trace_afs_silly_rename(vnode, true);
- if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ if (afs_begin_vnode_operation(&fc, dvnode, key, false)) {
+ afs_dataversion_t dir_data_version = dvnode->status.data_version + 1;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) &&
!test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) {
yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name,
- dir_data_version);
+ &scb[0], &scb[1]);
if (fc.ac.error != -ECONNABORTED ||
fc.ac.abort_code != RXGEN_OPCODE)
continue;
set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags);
}
- afs_fs_remove(&fc, vnode, dentry->d_name.name, false,
- dir_data_version);
+ afs_fs_remove(&fc, vnode, dentry->d_name.name, false, &scb[0]);
}
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &dir_data_version, &scb[0]);
ret = afs_end_vnode_operation(&fc);
if (ret == 0) {
drop_nlink(&vnode->vfs_inode);
@@ -182,6 +196,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
afs_edit_dir_for_unlink);
}
+ kfree(scb);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index a9ba81ddf154..af1689d1f32e 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -46,7 +46,7 @@ static int afs_probe_cell_name(struct dentry *dentry)
return 0;
}
- ret = dns_query("afsdb", name, len, "srv=1", NULL, NULL);
+ ret = dns_query("afsdb", name, len, "srv=1", NULL, NULL, false);
if (ret == -ENODATA)
ret = -EDESTADDRREQ;
return ret;
@@ -261,8 +261,7 @@ int afs_dynroot_populate(struct super_block *sb)
struct afs_net *net = afs_sb2net(sb);
int ret;
- if (mutex_lock_interruptible(&net->proc_cells_lock) < 0)
- return -ERESTARTSYS;
+ mutex_lock(&net->proc_cells_lock);
net->dynroot_sb = sb;
hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
diff --git a/fs/afs/file.c b/fs/afs/file.c
index e8d6619890a9..11e69c5fb7ab 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -170,11 +170,12 @@ int afs_release(struct inode *inode, struct file *file)
{
struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_file *af = file->private_data;
+ int ret = 0;
_enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
if ((file->f_mode & FMODE_WRITE))
- return vfs_fsync(file, 0);
+ ret = vfs_fsync(file, 0);
file->private_data = NULL;
if (af->wb)
@@ -182,8 +183,8 @@ int afs_release(struct inode *inode, struct file *file)
key_put(af->key);
kfree(af);
afs_prune_wb_keys(vnode);
- _leave(" = 0");
- return 0;
+ _leave(" = %d", ret);
+ return ret;
}
/*
@@ -227,6 +228,7 @@ static void afs_file_readpage_read_complete(struct page *page,
int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *desc)
{
struct afs_fs_cursor fc;
+ struct afs_status_cb *scb;
int ret;
_enter("%s{%llx:%llu.%u},%x,,,",
@@ -236,15 +238,22 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
vnode->fid.unique,
key_serial(key));
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ return -ENOMEM;
+
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
+ afs_dataversion_t data_version = vnode->status.data_version;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_fetch_data(&fc, desc);
+ afs_fs_fetch_data(&fc, scb, desc);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_check_for_remote_deletion(&fc, vnode);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break,
+ &data_version, scb);
ret = afs_end_vnode_operation(&fc);
}
@@ -254,6 +263,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
&afs_v2net(vnode)->n_fetch_bytes);
}
+ kfree(scb);
_leave(" = %d", ret);
return ret;
}
@@ -404,10 +414,10 @@ static int afs_readpage(struct file *file, struct page *page)
/*
* Make pages available as they're filled.
*/
-static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req)
+static void afs_readpages_page_done(struct afs_read *req)
{
#ifdef CONFIG_AFS_FSCACHE
- struct afs_vnode *vnode = call->reply[0];
+ struct afs_vnode *vnode = req->vnode;
#endif
struct page *page = req->pages[req->index];
@@ -461,6 +471,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping,
return -ENOMEM;
refcount_set(&req->usage, 1);
+ req->vnode = vnode;
req->page_done = afs_readpages_page_done;
req->pos = first->index;
req->pos <<= PAGE_SHIFT;
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index adc88eff7849..ed3ac03682d7 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -41,9 +41,6 @@ void afs_lock_may_be_available(struct afs_vnode *vnode)
{
_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
- if (vnode->lock_state != AFS_VNODE_LOCK_WAITING_FOR_CB)
- return;
-
spin_lock(&vnode->lock);
if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
afs_next_locker(vnode, 0);
@@ -77,7 +74,7 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode)
*/
void afs_lock_op_done(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
+ struct afs_vnode *vnode = call->lvnode;
if (call->error == 0) {
spin_lock(&vnode->lock);
@@ -185,6 +182,7 @@ static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
afs_lock_type_t type)
{
+ struct afs_status_cb *scb;
struct afs_fs_cursor fc;
int ret;
@@ -195,18 +193,23 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
vnode->fid.unique,
key_serial(key), type);
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ return -ENOMEM;
+
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_set_lock(&fc, type);
+ afs_fs_set_lock(&fc, type, scb);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_check_for_remote_deletion(&fc, vnode);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
ret = afs_end_vnode_operation(&fc);
}
+ kfree(scb);
_leave(" = %d", ret);
return ret;
}
@@ -216,6 +219,7 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
*/
static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
{
+ struct afs_status_cb *scb;
struct afs_fs_cursor fc;
int ret;
@@ -226,18 +230,23 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
vnode->fid.unique,
key_serial(key));
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ return -ENOMEM;
+
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
while (afs_select_current_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_extend_lock(&fc);
+ afs_fs_extend_lock(&fc, scb);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_check_for_remote_deletion(&fc, vnode);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
ret = afs_end_vnode_operation(&fc);
}
+ kfree(scb);
_leave(" = %d", ret);
return ret;
}
@@ -247,6 +256,7 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
*/
static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
{
+ struct afs_status_cb *scb;
struct afs_fs_cursor fc;
int ret;
@@ -257,18 +267,23 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
vnode->fid.unique,
key_serial(key));
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ return -ENOMEM;
+
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
while (afs_select_current_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_release_lock(&fc);
+ afs_fs_release_lock(&fc, scb);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_check_for_remote_deletion(&fc, vnode);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
ret = afs_end_vnode_operation(&fc);
}
+ kfree(scb);
_leave(" = %d", ret);
return ret;
}
@@ -736,7 +751,7 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
posix_test_lock(file, fl);
if (fl->fl_type == F_UNLCK) {
/* no local locks; consult the server */
- ret = afs_fetch_status(vnode, key, false);
+ ret = afs_fetch_status(vnode, key, false, NULL);
if (ret < 0)
goto error;
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index 5d3abde52a0f..9b7266209343 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -33,8 +33,8 @@ static bool afs_fs_probe_done(struct afs_server *server)
void afs_fileserver_probe_result(struct afs_call *call)
{
struct afs_addr_list *alist = call->alist;
- struct afs_server *server = call->reply[0];
- unsigned int server_index = (long)call->reply[1];
+ struct afs_server *server = call->server;
+ unsigned int server_index = call->server_index;
unsigned int index = call->addr_ix;
unsigned int rtt = UINT_MAX;
bool have_result = false;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 1296f5dc4c1e..48298408d6ac 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -60,78 +60,17 @@ static void xdr_dump_bad(const __be32 *bp)
}
/*
- * Update the core inode struct from a returned status record.
- */
-void afs_update_inode_from_status(struct afs_vnode *vnode,
- struct afs_file_status *status,
- const afs_dataversion_t *expected_version,
- u8 flags)
-{
- struct timespec64 t;
- umode_t mode;
-
- t = status->mtime_client;
- vnode->vfs_inode.i_ctime = t;
- vnode->vfs_inode.i_mtime = t;
- vnode->vfs_inode.i_atime = t;
-
- if (flags & (AFS_VNODE_META_CHANGED | AFS_VNODE_NOT_YET_SET)) {
- vnode->vfs_inode.i_uid = make_kuid(&init_user_ns, status->owner);
- vnode->vfs_inode.i_gid = make_kgid(&init_user_ns, status->group);
- set_nlink(&vnode->vfs_inode, status->nlink);
-
- mode = vnode->vfs_inode.i_mode;
- mode &= ~S_IALLUGO;
- mode |= status->mode;
- barrier();
- vnode->vfs_inode.i_mode = mode;
- }
-
- if (!(flags & AFS_VNODE_NOT_YET_SET)) {
- if (expected_version &&
- *expected_version != status->data_version) {
- _debug("vnode modified %llx on {%llx:%llu} [exp %llx]",
- (unsigned long long) status->data_version,
- vnode->fid.vid, vnode->fid.vnode,
- (unsigned long long) *expected_version);
- vnode->invalid_before = status->data_version;
- if (vnode->status.type == AFS_FTYPE_DIR) {
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- afs_stat_v(vnode, n_inval);
- } else {
- set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
- }
- } else if (vnode->status.type == AFS_FTYPE_DIR) {
- /* Expected directory change is handled elsewhere so
- * that we can locally edit the directory and save on a
- * download.
- */
- if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- flags &= ~AFS_VNODE_DATA_CHANGED;
- }
- }
-
- if (flags & (AFS_VNODE_DATA_CHANGED | AFS_VNODE_NOT_YET_SET)) {
- inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
- i_size_write(&vnode->vfs_inode, status->size);
- }
-}
-
-/*
* decode an AFSFetchStatus block
*/
-static int xdr_decode_AFSFetchStatus(struct afs_call *call,
- const __be32 **_bp,
- struct afs_file_status *status,
- struct afs_vnode *vnode,
- const afs_dataversion_t *expected_version,
- struct afs_read *read_req)
+static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
+ struct afs_call *call,
+ struct afs_status_cb *scb)
{
const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp;
+ struct afs_file_status *status = &scb->status;
bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus);
u64 data_version, size;
u32 type, abort_code;
- u8 flags = 0;
abort_code = ntohl(xdr->abort_code);
@@ -144,6 +83,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
* case.
*/
status->abort_code = abort_code;
+ scb->have_error = true;
return 0;
}
@@ -161,44 +101,25 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
case AFS_FTYPE_FILE:
case AFS_FTYPE_DIR:
case AFS_FTYPE_SYMLINK:
- if (type != status->type &&
- vnode &&
- !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
- pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n",
- vnode->fid.vid,
- vnode->fid.vnode,
- vnode->fid.unique,
- status->type, type);
- goto bad;
- }
status->type = type;
break;
default:
goto bad;
}
-#define EXTRACT_M(FIELD) \
- do { \
- u32 x = ntohl(xdr->FIELD); \
- if (status->FIELD != x) { \
- flags |= AFS_VNODE_META_CHANGED; \
- status->FIELD = x; \
- } \
- } while (0)
-
- EXTRACT_M(nlink);
- EXTRACT_M(author);
- EXTRACT_M(owner);
- EXTRACT_M(caller_access); /* call ticket dependent */
- EXTRACT_M(anon_access);
- EXTRACT_M(mode);
- EXTRACT_M(group);
+ status->nlink = ntohl(xdr->nlink);
+ status->author = ntohl(xdr->author);
+ status->owner = ntohl(xdr->owner);
+ status->caller_access = ntohl(xdr->caller_access); /* Ticket dependent */
+ status->anon_access = ntohl(xdr->anon_access);
+ status->mode = ntohl(xdr->mode) & S_IALLUGO;
+ status->group = ntohl(xdr->group);
+ status->lock_count = ntohl(xdr->lock_count);
status->mtime_client.tv_sec = ntohl(xdr->mtime_client);
status->mtime_client.tv_nsec = 0;
status->mtime_server.tv_sec = ntohl(xdr->mtime_server);
status->mtime_server.tv_nsec = 0;
- status->lock_count = ntohl(xdr->lock_count);
size = (u64)ntohl(xdr->size_lo);
size |= (u64)ntohl(xdr->size_hi) << 32;
@@ -206,25 +127,10 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
data_version = (u64)ntohl(xdr->data_version_lo);
data_version |= (u64)ntohl(xdr->data_version_hi) << 32;
- if (data_version != status->data_version) {
- status->data_version = data_version;
- flags |= AFS_VNODE_DATA_CHANGED;
- }
-
- if (read_req) {
- read_req->data_version = data_version;
- read_req->file_size = size;
- }
+ status->data_version = data_version;
+ scb->have_status = true;
*_bp = (const void *)*_bp + sizeof(*xdr);
-
- if (vnode) {
- if (test_bit(AFS_VNODE_UNSET, &vnode->flags))
- flags |= AFS_VNODE_NOT_YET_SET;
- afs_update_inode_from_status(vnode, status, expected_version,
- flags);
- }
-
return 0;
bad:
@@ -232,77 +138,22 @@ bad:
return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
}
-/*
- * Decode the file status. We need to lock the target vnode if we're going to
- * update its status so that stat() sees the attributes update atomically.
- */
-static int afs_decode_status(struct afs_call *call,
- const __be32 **_bp,
- struct afs_file_status *status,
- struct afs_vnode *vnode,
- const afs_dataversion_t *expected_version,
- struct afs_read *read_req)
+static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
{
- int ret;
-
- if (!vnode)
- return xdr_decode_AFSFetchStatus(call, _bp, status, vnode,
- expected_version, read_req);
-
- write_seqlock(&vnode->cb_lock);
- ret = xdr_decode_AFSFetchStatus(call, _bp, status, vnode,
- expected_version, read_req);
- write_sequnlock(&vnode->cb_lock);
- return ret;
+ return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
}
-/*
- * decode an AFSCallBack block
- */
-static void xdr_decode_AFSCallBack(struct afs_call *call,
- struct afs_vnode *vnode,
- const __be32 **_bp)
+static void xdr_decode_AFSCallBack(const __be32 **_bp,
+ struct afs_call *call,
+ struct afs_status_cb *scb)
{
- struct afs_cb_interest *old, *cbi = call->cbi;
+ struct afs_callback *cb = &scb->callback;
const __be32 *bp = *_bp;
- u32 cb_expiry;
-
- write_seqlock(&vnode->cb_lock);
-
- if (!afs_cb_is_broken(call->cb_break, vnode, cbi)) {
- vnode->cb_version = ntohl(*bp++);
- cb_expiry = ntohl(*bp++);
- vnode->cb_type = ntohl(*bp++);
- vnode->cb_expires_at = cb_expiry + ktime_get_real_seconds();
- old = vnode->cb_interest;
- if (old != call->cbi) {
- vnode->cb_interest = cbi;
- cbi = old;
- }
- set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
- } else {
- bp += 3;
- }
- write_sequnlock(&vnode->cb_lock);
- call->cbi = cbi;
- *_bp = bp;
-}
-
-static ktime_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
-{
- return ktime_add_ns(call->reply_time, expiry * NSEC_PER_SEC);
-}
-
-static void xdr_decode_AFSCallBack_raw(struct afs_call *call,
- const __be32 **_bp,
- struct afs_callback *cb)
-{
- const __be32 *bp = *_bp;
-
- cb->version = ntohl(*bp++);
+ bp++; /* version */
cb->expires_at = xdr_decode_expiry(call, ntohl(*bp++));
- cb->type = ntohl(*bp++);
+ bp++; /* type */
+ scb->have_cb = true;
*_bp = bp;
}
@@ -395,7 +246,6 @@ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp,
*/
static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
@@ -403,16 +253,13 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
if (ret < 0)
return ret;
- _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
-
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- xdr_decode_AFSCallBack(call, vnode, &bp);
- xdr_decode_AFSVolSync(&bp, call->reply[1]);
+ xdr_decode_AFSCallBack(&bp, call, call->out_scb);
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -431,8 +278,8 @@ static const struct afs_call_type afs_RXFSFetchStatus_vnode = {
/*
* fetch the status information for a file
*/
-int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsync,
- bool new_inode)
+int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
+ struct afs_volsync *volsync)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -440,7 +287,7 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_fetch_file_status(fc, volsync, new_inode);
+ return yfs_fs_fetch_file_status(fc, scb, volsync);
_enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
@@ -453,10 +300,8 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
}
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = volsync;
- call->expected_version = new_inode ? 1 : vnode->status.data_version;
- call->want_reply_time = true;
+ call->out_scb = scb;
+ call->out_volsync = volsync;
/* marshall the parameters */
bp = call->request;
@@ -465,10 +310,10 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
bp[2] = htonl(vnode->fid.vnode);
bp[3] = htonl(vnode->fid.unique);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -478,8 +323,7 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
*/
static int afs_deliver_fs_fetch_data(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
- struct afs_read *req = call->reply[2];
+ struct afs_read *req = call->read_request;
const __be32 *bp;
unsigned int size;
int ret;
@@ -541,7 +385,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
if (req->offset == PAGE_SIZE) {
req->offset = 0;
if (req->page_done)
- req->page_done(call, req);
+ req->page_done(req);
req->index++;
if (req->remain > 0)
goto begin_page;
@@ -575,12 +419,14 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = afs_decode_status(call, &bp, &vnode->status, vnode,
- &vnode->status.data_version, req);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- xdr_decode_AFSCallBack(call, vnode, &bp);
- xdr_decode_AFSVolSync(&bp, call->reply[1]);
+ xdr_decode_AFSCallBack(&bp, call, call->out_scb);
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
+
+ req->data_version = call->out_scb->status.data_version;
+ req->file_size = call->out_scb->status.size;
call->unmarshall++;
@@ -593,7 +439,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
zero_user_segment(req->pages[req->index],
req->offset, PAGE_SIZE);
if (req->page_done)
- req->page_done(call, req);
+ req->page_done(req);
req->offset = 0;
}
@@ -603,7 +449,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
static void afs_fetch_data_destructor(struct afs_call *call)
{
- struct afs_read *req = call->reply[2];
+ struct afs_read *req = call->read_request;
afs_put_read(req);
afs_flat_call_destructor(call);
@@ -629,7 +475,9 @@ static const struct afs_call_type afs_RXFSFetchData64 = {
/*
* fetch data from a very large file
*/
-static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, struct afs_read *req)
+static int afs_fs_fetch_data64(struct afs_fs_cursor *fc,
+ struct afs_status_cb *scb,
+ struct afs_read *req)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -643,11 +491,9 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, struct afs_read *req)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = NULL; /* volsync */
- call->reply[2] = req;
- call->expected_version = vnode->status.data_version;
- call->want_reply_time = true;
+ call->out_scb = scb;
+ call->out_volsync = NULL;
+ call->read_request = req;
/* marshall the parameters */
bp = call->request;
@@ -661,9 +507,9 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, struct afs_read *req)
bp[7] = htonl(lower_32_bits(req->len));
refcount_inc(&req->usage);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -671,7 +517,9 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, struct afs_read *req)
/*
* fetch data from a file
*/
-int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
+int afs_fs_fetch_data(struct afs_fs_cursor *fc,
+ struct afs_status_cb *scb,
+ struct afs_read *req)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -679,12 +527,12 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_fetch_data(fc, req);
+ return yfs_fs_fetch_data(fc, scb, req);
if (upper_32_bits(req->pos) ||
upper_32_bits(req->len) ||
upper_32_bits(req->pos + req->len))
- return afs_fs_fetch_data64(fc, req);
+ return afs_fs_fetch_data64(fc, scb, req);
_enter("");
@@ -693,11 +541,9 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = NULL; /* volsync */
- call->reply[2] = req;
- call->expected_version = vnode->status.data_version;
- call->want_reply_time = true;
+ call->out_scb = scb;
+ call->out_volsync = NULL;
+ call->read_request = req;
/* marshall the parameters */
bp = call->request;
@@ -709,9 +555,9 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
bp[5] = htonl(lower_32_bits(req->len));
refcount_inc(&req->usage);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -721,28 +567,24 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
*/
static int afs_deliver_fs_create_vnode(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
- _enter("{%u}", call->unmarshall);
-
ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFid(&bp, call->reply[1]);
- ret = afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+ xdr_decode_AFSFid(&bp, call->out_fid);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- ret = afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- xdr_decode_AFSCallBack_raw(call, &bp, call->reply[3]);
- /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
+ xdr_decode_AFSCallBack(&bp, call, call->out_scb);
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -771,24 +613,23 @@ static const struct afs_call_type afs_RXFSMakeDir = {
int afs_fs_create(struct afs_fs_cursor *fc,
const char *name,
umode_t mode,
- u64 current_data_version,
+ struct afs_status_cb *dvnode_scb,
struct afs_fid *newfid,
- struct afs_file_status *newstatus,
- struct afs_callback *newcb)
+ struct afs_status_cb *new_scb)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)){
if (S_ISDIR(mode))
- return yfs_fs_make_dir(fc, name, mode, current_data_version,
- newfid, newstatus, newcb);
+ return yfs_fs_make_dir(fc, name, mode, dvnode_scb,
+ newfid, new_scb);
else
- return yfs_fs_create_file(fc, name, mode, current_data_version,
- newfid, newstatus, newcb);
+ return yfs_fs_create_file(fc, name, mode, dvnode_scb,
+ newfid, new_scb);
}
_enter("");
@@ -804,19 +645,16 @@ int afs_fs_create(struct afs_fs_cursor *fc,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = newfid;
- call->reply[2] = newstatus;
- call->reply[3] = newcb;
- call->expected_version = current_data_version + 1;
- call->want_reply_time = true;
+ call->out_dir_scb = dvnode_scb;
+ call->out_fid = newfid;
+ call->out_scb = new_scb;
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(S_ISDIR(mode) ? FSMAKEDIR : FSCREATEFILE);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(dvnode->fid.vid);
+ *bp++ = htonl(dvnode->fid.vnode);
+ *bp++ = htonl(dvnode->fid.unique);
*bp++ = htonl(namesz);
memcpy(bp, name, namesz);
bp = (void *) bp + namesz;
@@ -825,41 +663,38 @@ int afs_fs_create(struct afs_fs_cursor *fc,
bp = (void *) bp + padsz;
}
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
- *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(dvnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(mode & S_IALLUGO); /* unix mode */
*bp++ = 0; /* segment size */
afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &vnode->fid, name);
+ trace_afs_make_fs_call1(call, &dvnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
/*
- * Deliver reply data to any operation that returns file status and volume
+ * Deliver reply data to any operation that returns directory status and volume
* sync.
*/
-static int afs_deliver_fs_status_and_vol(struct afs_call *call)
+static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
- _enter("{%u}", call->unmarshall);
-
ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -871,14 +706,14 @@ static int afs_deliver_fs_status_and_vol(struct afs_call *call)
static const struct afs_call_type afs_RXFSRemoveFile = {
.name = "FS.RemoveFile",
.op = afs_FS_RemoveFile,
- .deliver = afs_deliver_fs_status_and_vol,
+ .deliver = afs_deliver_fs_dir_status_and_vol,
.destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSRemoveDir = {
.name = "FS.RemoveDir",
.op = afs_FS_RemoveDir,
- .deliver = afs_deliver_fs_status_and_vol,
+ .deliver = afs_deliver_fs_dir_status_and_vol,
.destructor = afs_flat_call_destructor,
};
@@ -886,7 +721,7 @@ static const struct afs_call_type afs_RXFSRemoveDir = {
* remove a file or directory
*/
int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, bool isdir, u64 current_data_version)
+ const char *name, bool isdir, struct afs_status_cb *dvnode_scb)
{
struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
@@ -895,7 +730,7 @@ int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_remove(fc, vnode, name, isdir, current_data_version);
+ return yfs_fs_remove(fc, vnode, name, isdir, dvnode_scb);
_enter("");
@@ -910,9 +745,7 @@ int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = dvnode;
- call->reply[1] = vnode;
- call->expected_version = current_data_version + 1;
+ call->out_dir_scb = dvnode_scb;
/* marshall the parameters */
bp = call->request;
@@ -930,6 +763,7 @@ int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call1(call, &dvnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -939,7 +773,6 @@ int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int afs_deliver_fs_link(struct afs_call *call)
{
- struct afs_vnode *dvnode = call->reply[0], *vnode = call->reply[1];
const __be32 *bp;
int ret;
@@ -951,14 +784,13 @@ static int afs_deliver_fs_link(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = afs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- ret = afs_decode_status(call, &bp, &dvnode->status, dvnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -978,7 +810,9 @@ static const struct afs_call_type afs_RXFSLink = {
* make a hard link
*/
int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, u64 current_data_version)
+ const char *name,
+ struct afs_status_cb *dvnode_scb,
+ struct afs_status_cb *vnode_scb)
{
struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
@@ -987,7 +821,7 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_link(fc, vnode, name, current_data_version);
+ return yfs_fs_link(fc, vnode, name, dvnode_scb, vnode_scb);
_enter("");
@@ -1000,9 +834,8 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = dvnode;
- call->reply[1] = vnode;
- call->expected_version = current_data_version + 1;
+ call->out_dir_scb = dvnode_scb;
+ call->out_scb = vnode_scb;
/* marshall the parameters */
bp = call->request;
@@ -1023,6 +856,7 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call1(call, &vnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1032,7 +866,6 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int afs_deliver_fs_symlink(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
@@ -1044,15 +877,14 @@ static int afs_deliver_fs_symlink(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFid(&bp, call->reply[1]);
- ret = afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+ xdr_decode_AFSFid(&bp, call->out_fid);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- ret = afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -1074,19 +906,19 @@ static const struct afs_call_type afs_RXFSSymlink = {
int afs_fs_symlink(struct afs_fs_cursor *fc,
const char *name,
const char *contents,
- u64 current_data_version,
+ struct afs_status_cb *dvnode_scb,
struct afs_fid *newfid,
- struct afs_file_status *newstatus)
+ struct afs_status_cb *new_scb)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, padsz, c_namesz, c_padsz;
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_symlink(fc, name, contents, current_data_version,
- newfid, newstatus);
+ return yfs_fs_symlink(fc, name, contents, dvnode_scb,
+ newfid, new_scb);
_enter("");
@@ -1104,17 +936,16 @@ int afs_fs_symlink(struct afs_fs_cursor *fc,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = newfid;
- call->reply[2] = newstatus;
- call->expected_version = current_data_version + 1;
+ call->out_dir_scb = dvnode_scb;
+ call->out_fid = newfid;
+ call->out_scb = new_scb;
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSYMLINK);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(dvnode->fid.vid);
+ *bp++ = htonl(dvnode->fid.vnode);
+ *bp++ = htonl(dvnode->fid.unique);
*bp++ = htonl(namesz);
memcpy(bp, name, namesz);
bp = (void *) bp + namesz;
@@ -1130,14 +961,15 @@ int afs_fs_symlink(struct afs_fs_cursor *fc,
bp = (void *) bp + c_padsz;
}
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
- *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(dvnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(S_IRWXUGO); /* unix mode */
*bp++ = 0; /* segment size */
afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &vnode->fid, name);
+ trace_afs_make_fs_call1(call, &dvnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1147,29 +979,24 @@ int afs_fs_symlink(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_rename(struct afs_call *call)
{
- struct afs_vnode *orig_dvnode = call->reply[0], *new_dvnode = call->reply[1];
const __be32 *bp;
int ret;
- _enter("{%u}", call->unmarshall);
-
ret = afs_transfer_reply(call);
if (ret < 0)
return ret;
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = afs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- if (new_dvnode != orig_dvnode) {
- ret = afs_decode_status(call, &bp, &new_dvnode->status, new_dvnode,
- &call->expected_version_2, NULL);
+ if (call->out_dir_scb != call->out_scb) {
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
}
- /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -1186,14 +1013,14 @@ static const struct afs_call_type afs_RXFSRename = {
};
/*
- * create a symbolic link
+ * Rename/move a file or directory.
*/
int afs_fs_rename(struct afs_fs_cursor *fc,
const char *orig_name,
struct afs_vnode *new_dvnode,
const char *new_name,
- u64 current_orig_data_version,
- u64 current_new_data_version)
+ struct afs_status_cb *orig_dvnode_scb,
+ struct afs_status_cb *new_dvnode_scb)
{
struct afs_vnode *orig_dvnode = fc->vnode;
struct afs_call *call;
@@ -1204,8 +1031,8 @@ int afs_fs_rename(struct afs_fs_cursor *fc,
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
return yfs_fs_rename(fc, orig_name,
new_dvnode, new_name,
- current_orig_data_version,
- current_new_data_version);
+ orig_dvnode_scb,
+ new_dvnode_scb);
_enter("");
@@ -1225,10 +1052,8 @@ int afs_fs_rename(struct afs_fs_cursor *fc,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = orig_dvnode;
- call->reply[1] = new_dvnode;
- call->expected_version = current_orig_data_version + 1;
- call->expected_version_2 = current_new_data_version + 1;
+ call->out_dir_scb = orig_dvnode_scb;
+ call->out_scb = new_dvnode_scb;
/* marshall the parameters */
bp = call->request;
@@ -1257,6 +1082,7 @@ int afs_fs_rename(struct afs_fs_cursor *fc,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1266,7 +1092,6 @@ int afs_fs_rename(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_store_data(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
@@ -1278,13 +1103,10 @@ static int afs_deliver_fs_store_data(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
-
- afs_pages_written_back(vnode, call);
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -1314,7 +1136,8 @@ static int afs_fs_store_data64(struct afs_fs_cursor *fc,
struct address_space *mapping,
pgoff_t first, pgoff_t last,
unsigned offset, unsigned to,
- loff_t size, loff_t pos, loff_t i_size)
+ loff_t size, loff_t pos, loff_t i_size,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1332,13 +1155,12 @@ static int afs_fs_store_data64(struct afs_fs_cursor *fc,
call->key = fc->key;
call->mapping = mapping;
- call->reply[0] = vnode;
call->first = first;
call->last = last;
call->first_offset = offset;
call->last_to = to;
call->send_pages = true;
- call->expected_version = vnode->status.data_version + 1;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1362,6 +1184,7 @@ static int afs_fs_store_data64(struct afs_fs_cursor *fc,
*bp++ = htonl((u32) i_size);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1371,7 +1194,8 @@ static int afs_fs_store_data64(struct afs_fs_cursor *fc,
*/
int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to)
+ unsigned offset, unsigned to,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1380,7 +1204,7 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_store_data(fc, mapping, first, last, offset, to);
+ return yfs_fs_store_data(fc, mapping, first, last, offset, to, scb);
_enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
@@ -1401,7 +1225,7 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
if (pos >> 32 || i_size >> 32 || size >> 32 || (pos + size) >> 32)
return afs_fs_store_data64(fc, mapping, first, last, offset, to,
- size, pos, i_size);
+ size, pos, i_size, scb);
call = afs_alloc_flat_call(net, &afs_RXFSStoreData,
(4 + 6 + 3) * 4,
@@ -1411,13 +1235,12 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
call->key = fc->key;
call->mapping = mapping;
- call->reply[0] = vnode;
call->first = first;
call->last = last;
call->first_offset = offset;
call->last_to = to;
call->send_pages = true;
- call->expected_version = vnode->status.data_version + 1;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1439,6 +1262,7 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1448,7 +1272,6 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
*/
static int afs_deliver_fs_store_status(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
@@ -1460,11 +1283,10 @@ static int afs_deliver_fs_store_status(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -1498,7 +1320,8 @@ static const struct afs_call_type afs_RXFSStoreData64_as_Status = {
* set the attributes on a very large file, using FS.StoreData rather than
* FS.StoreStatus so as to alter the file size also
*/
-static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
+static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1517,8 +1340,7 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->expected_version = vnode->status.data_version + 1;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1538,6 +1360,7 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1546,7 +1369,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
* set the attributes on a file, using FS.StoreData rather than FS.StoreStatus
* so as to alter the file size also
*/
-static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
+static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1558,7 +1382,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
ASSERT(attr->ia_valid & ATTR_SIZE);
if (attr->ia_size >> 32)
- return afs_fs_setattr_size64(fc, attr);
+ return afs_fs_setattr_size64(fc, attr, scb);
call = afs_alloc_flat_call(net, &afs_RXFSStoreData_as_Status,
(4 + 6 + 3) * 4,
@@ -1567,8 +1391,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->expected_version = vnode->status.data_version + 1;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1585,6 +1408,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1593,7 +1417,8 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
* set the attributes on a file, using FS.StoreData if there's a change in file
* size, and FS.StoreStatus otherwise
*/
-int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
+int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1601,10 +1426,10 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_setattr(fc, attr);
+ return yfs_fs_setattr(fc, attr, scb);
if (attr->ia_valid & ATTR_SIZE)
- return afs_fs_setattr_size(fc, attr);
+ return afs_fs_setattr_size(fc, attr, scb);
_enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
@@ -1616,8 +1441,7 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->expected_version = vnode->status.data_version;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1630,6 +1454,7 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1659,7 +1484,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_AFSFetchVolumeStatus(&bp, call->reply[1]);
+ xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus);
call->unmarshall++;
afs_extract_to_tmp(call);
@@ -1675,7 +1500,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
return afs_protocol_error(call, -EBADMSG,
afs_eproto_volname_len);
size = (call->count + 3) & ~3; /* It's padded */
- afs_extract_begin(call, call->reply[2], size);
+ afs_extract_to_buf(call, size);
call->unmarshall++;
/* Fall through - and extract the volume name */
@@ -1685,7 +1510,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
if (ret < 0)
return ret;
- p = call->reply[2];
+ p = call->buffer;
p[call->count] = 0;
_debug("volname '%s'", p);
afs_extract_to_tmp(call);
@@ -1703,7 +1528,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
return afs_protocol_error(call, -EBADMSG,
afs_eproto_offline_msg_len);
size = (call->count + 3) & ~3; /* It's padded */
- afs_extract_begin(call, call->reply[2], size);
+ afs_extract_to_buf(call, size);
call->unmarshall++;
/* Fall through - and extract the offline message */
@@ -1713,7 +1538,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
if (ret < 0)
return ret;
- p = call->reply[2];
+ p = call->buffer;
p[call->count] = 0;
_debug("offline '%s'", p);
@@ -1732,7 +1557,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
return afs_protocol_error(call, -EBADMSG,
afs_eproto_motd_len);
size = (call->count + 3) & ~3; /* It's padded */
- afs_extract_begin(call, call->reply[2], size);
+ afs_extract_to_buf(call, size);
call->unmarshall++;
/* Fall through - and extract the message of the day */
@@ -1742,7 +1567,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
if (ret < 0)
return ret;
- p = call->reply[2];
+ p = call->buffer;
p[call->count] = 0;
_debug("motd '%s'", p);
@@ -1757,23 +1582,13 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
}
/*
- * destroy an FS.GetVolumeStatus call
- */
-static void afs_get_volume_status_call_destructor(struct afs_call *call)
-{
- kfree(call->reply[2]);
- call->reply[2] = NULL;
- afs_flat_call_destructor(call);
-}
-
-/*
* FS.GetVolumeStatus operation type
*/
static const struct afs_call_type afs_RXFSGetVolumeStatus = {
.name = "FS.GetVolumeStatus",
.op = afs_FS_GetVolumeStatus,
.deliver = afs_deliver_fs_get_volume_status,
- .destructor = afs_get_volume_status_call_destructor,
+ .destructor = afs_flat_call_destructor,
};
/*
@@ -1786,27 +1601,19 @@ int afs_fs_get_volume_status(struct afs_fs_cursor *fc,
struct afs_call *call;
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- void *tmpbuf;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
return yfs_fs_get_volume_status(fc, vs);
_enter("");
- tmpbuf = kmalloc(AFSOPAQUEMAX, GFP_KERNEL);
- if (!tmpbuf)
- return -ENOMEM;
-
- call = afs_alloc_flat_call(net, &afs_RXFSGetVolumeStatus, 2 * 4, 12 * 4);
- if (!call) {
- kfree(tmpbuf);
+ call = afs_alloc_flat_call(net, &afs_RXFSGetVolumeStatus, 2 * 4,
+ max(12 * 4, AFSOPAQUEMAX + 1));
+ if (!call)
return -ENOMEM;
- }
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = vs;
- call->reply[2] = tmpbuf;
+ call->out_volstatus = vs;
/* marshall the parameters */
bp = call->request;
@@ -1815,6 +1622,7 @@ int afs_fs_get_volume_status(struct afs_fs_cursor *fc,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1835,7 +1643,7 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -1876,7 +1684,8 @@ static const struct afs_call_type afs_RXFSReleaseLock = {
/*
* Set a lock on a file
*/
-int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
+int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1884,7 +1693,7 @@ int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_set_lock(fc, type);
+ return yfs_fs_set_lock(fc, type, scb);
_enter("");
@@ -1893,8 +1702,8 @@ int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->want_reply_time = true;
+ call->lvnode = vnode;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1906,6 +1715,7 @@ int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_calli(call, &vnode->fid, type);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1913,7 +1723,7 @@ int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
/*
* extend a lock on a file
*/
-int afs_fs_extend_lock(struct afs_fs_cursor *fc)
+int afs_fs_extend_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1921,7 +1731,7 @@ int afs_fs_extend_lock(struct afs_fs_cursor *fc)
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_extend_lock(fc);
+ return yfs_fs_extend_lock(fc, scb);
_enter("");
@@ -1930,8 +1740,8 @@ int afs_fs_extend_lock(struct afs_fs_cursor *fc)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->want_reply_time = true;
+ call->lvnode = vnode;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1942,6 +1752,7 @@ int afs_fs_extend_lock(struct afs_fs_cursor *fc)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1949,7 +1760,7 @@ int afs_fs_extend_lock(struct afs_fs_cursor *fc)
/*
* release a lock on a file
*/
-int afs_fs_release_lock(struct afs_fs_cursor *fc)
+int afs_fs_release_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1957,7 +1768,7 @@ int afs_fs_release_lock(struct afs_fs_cursor *fc)
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_release_lock(fc);
+ return yfs_fs_release_lock(fc, scb);
_enter("");
@@ -1966,7 +1777,8 @@ int afs_fs_release_lock(struct afs_fs_cursor *fc)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
+ call->lvnode = vnode;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1977,6 +1789,7 @@ int afs_fs_release_lock(struct afs_fs_cursor *fc)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -2071,14 +1884,6 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
return 0;
}
-static void afs_destroy_fs_get_capabilities(struct afs_call *call)
-{
- struct afs_server *server = call->reply[0];
-
- afs_put_server(call->net, server);
- afs_flat_call_destructor(call);
-}
-
/*
* FS.GetCapabilities operation type
*/
@@ -2087,7 +1892,7 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
.op = afs_FS_GetCapabilities,
.deliver = afs_deliver_fs_get_capabilities,
.done = afs_fileserver_probe_result,
- .destructor = afs_destroy_fs_get_capabilities,
+ .destructor = afs_flat_call_destructor,
};
/*
@@ -2110,11 +1915,11 @@ struct afs_call *afs_fs_get_capabilities(struct afs_net *net,
return ERR_PTR(-ENOMEM);
call->key = key;
- call->reply[0] = afs_get_server(server);
- call->reply[1] = (void *)(long)server_index;
+ call->server = afs_get_server(server);
+ call->server_index = server_index;
call->upgrade = true;
- call->want_reply_time = true;
call->async = true;
+ call->max_lifespan = AFS_PROBE_MAX_LIFESPAN;
/* marshall the parameters */
bp = call->request;
@@ -2131,10 +1936,6 @@ struct afs_call *afs_fs_get_capabilities(struct afs_net *net,
*/
static int afs_deliver_fs_fetch_status(struct afs_call *call)
{
- struct afs_file_status *status = call->reply[1];
- struct afs_callback *callback = call->reply[2];
- struct afs_volsync *volsync = call->reply[3];
- struct afs_fid *fid = call->reply[0];
const __be32 *bp;
int ret;
@@ -2142,16 +1943,13 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call)
if (ret < 0)
return ret;
- _enter("{%llx:%llu}", fid->vid, fid->vnode);
-
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = afs_decode_status(call, &bp, status, NULL,
- &call->expected_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- xdr_decode_AFSCallBack_raw(call, &bp, callback);
- xdr_decode_AFSVolSync(&bp, volsync);
+ xdr_decode_AFSCallBack(&bp, call, call->out_scb);
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -2173,15 +1971,14 @@ static const struct afs_call_type afs_RXFSFetchStatus = {
int afs_fs_fetch_status(struct afs_fs_cursor *fc,
struct afs_net *net,
struct afs_fid *fid,
- struct afs_file_status *status,
- struct afs_callback *callback,
+ struct afs_status_cb *scb,
struct afs_volsync *volsync)
{
struct afs_call *call;
__be32 *bp;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_fetch_status(fc, net, fid, status, callback, volsync);
+ return yfs_fs_fetch_status(fc, net, fid, scb, volsync);
_enter(",%x,{%llx:%llu},,",
key_serial(fc->key), fid->vid, fid->vnode);
@@ -2193,12 +1990,9 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc,
}
call->key = fc->key;
- call->reply[0] = fid;
- call->reply[1] = status;
- call->reply[2] = callback;
- call->reply[3] = volsync;
- call->expected_version = 1; /* vnode->status.data_version */
- call->want_reply_time = true;
+ call->out_fid = fid;
+ call->out_scb = scb;
+ call->out_volsync = volsync;
/* marshall the parameters */
bp = call->request;
@@ -2207,9 +2001,9 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc,
bp[2] = htonl(fid->vnode);
bp[3] = htonl(fid->unique);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -2219,9 +2013,7 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
{
- struct afs_file_status *statuses;
- struct afs_callback *callbacks;
- struct afs_vnode *vnode = call->reply[0];
+ struct afs_status_cb *scb;
const __be32 *bp;
u32 tmp;
int ret;
@@ -2260,10 +2052,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
bp = call->buffer;
- statuses = call->reply[1];
- ret = afs_decode_status(call, &bp, &statuses[call->count],
- call->count == 0 ? vnode : NULL,
- NULL, NULL);
+ scb = &call->out_scb[call->count];
+ ret = xdr_decode_AFSFetchStatus(&bp, call, scb);
if (ret < 0)
return ret;
@@ -2302,13 +2092,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
_debug("unmarshall CB array");
bp = call->buffer;
- callbacks = call->reply[2];
- callbacks[call->count].version = ntohl(bp[0]);
- callbacks[call->count].expires_at = xdr_decode_expiry(call, ntohl(bp[1]));
- callbacks[call->count].type = ntohl(bp[2]);
- statuses = call->reply[1];
- if (call->count == 0 && vnode && statuses[0].abort_code == 0)
- xdr_decode_AFSCallBack(call, vnode, &bp);
+ scb = &call->out_scb[call->count];
+ xdr_decode_AFSCallBack(&bp, call, scb);
call->count++;
if (call->count < call->count2)
goto more_cbs;
@@ -2323,7 +2108,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_AFSVolSync(&bp, call->reply[3]);
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
call->unmarshall++;
@@ -2351,8 +2136,7 @@ static const struct afs_call_type afs_RXFSInlineBulkStatus = {
int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
struct afs_net *net,
struct afs_fid *fids,
- struct afs_file_status *statuses,
- struct afs_callback *callbacks,
+ struct afs_status_cb *statuses,
unsigned int nr_fids,
struct afs_volsync *volsync)
{
@@ -2361,7 +2145,7 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
int i;
if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_inline_bulk_status(fc, net, fids, statuses, callbacks,
+ return yfs_fs_inline_bulk_status(fc, net, fids, statuses,
nr_fids, volsync);
_enter(",%x,{%llx:%llu},%u",
@@ -2376,12 +2160,9 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
}
call->key = fc->key;
- call->reply[0] = NULL; /* vnode for fid[0] */
- call->reply[1] = statuses;
- call->reply[2] = callbacks;
- call->reply[3] = volsync;
+ call->out_scb = statuses;
+ call->out_volsync = volsync;
call->count2 = nr_fids;
- call->want_reply_time = true;
/* marshall the parameters */
bp = call->request;
@@ -2393,9 +2174,9 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
*bp++ = htonl(fids[i].unique);
}
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &fids[0]);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -2405,7 +2186,6 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_fetch_acl(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[1];
struct afs_acl *acl;
const __be32 *bp;
unsigned int size;
@@ -2430,7 +2210,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
acl = kmalloc(struct_size(acl, data, size), GFP_KERNEL);
if (!acl)
return -ENOMEM;
- call->reply[0] = acl;
+ call->ret_acl = acl;
acl->size = call->count2;
afs_extract_begin(call, acl->data, size);
call->unmarshall++;
@@ -2451,11 +2231,10 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = afs_decode_status(call, &bp, &vnode->status, vnode,
- &vnode->status.data_version, NULL);
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- xdr_decode_AFSVolSync(&bp, call->reply[2]);
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
call->unmarshall++;
@@ -2469,7 +2248,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
static void afs_destroy_fs_fetch_acl(struct afs_call *call)
{
- kfree(call->reply[0]);
+ kfree(call->ret_acl);
afs_flat_call_destructor(call);
}
@@ -2486,7 +2265,8 @@ static const struct afs_call_type afs_RXFSFetchACL = {
/*
* Fetch the ACL for a file.
*/
-struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *fc)
+struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *fc,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -2503,10 +2283,9 @@ struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *fc)
}
call->key = fc->key;
- call->reply[0] = NULL;
- call->reply[1] = vnode;
- call->reply[2] = NULL; /* volsync */
- call->ret_reply0 = true;
+ call->ret_acl = NULL;
+ call->out_scb = scb;
+ call->out_volsync = NULL;
/* marshall the parameters */
bp = call->request;
@@ -2515,7 +2294,6 @@ struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *fc)
bp[2] = htonl(vnode->fid.vnode);
bp[3] = htonl(vnode->fid.unique);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
afs_make_call(&fc->ac, call, GFP_KERNEL);
@@ -2523,19 +2301,43 @@ struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *fc)
}
/*
+ * Deliver reply data to any operation that returns file status and volume
+ * sync.
+ */
+static int afs_deliver_fs_file_status_and_vol(struct afs_call *call)
+{
+ const __be32 *bp;
+ int ret;
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
+ if (ret < 0)
+ return ret;
+ xdr_decode_AFSVolSync(&bp, call->out_volsync);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
* FS.StoreACL operation type
*/
static const struct afs_call_type afs_RXFSStoreACL = {
.name = "FS.StoreACL",
.op = afs_FS_StoreACL,
- .deliver = afs_deliver_fs_status_and_vol,
+ .deliver = afs_deliver_fs_file_status_and_vol,
.destructor = afs_flat_call_destructor,
};
/*
* Fetch the ACL for a file.
*/
-int afs_fs_store_acl(struct afs_fs_cursor *fc, const struct afs_acl *acl)
+int afs_fs_store_acl(struct afs_fs_cursor *fc, const struct afs_acl *acl,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -2555,8 +2357,8 @@ int afs_fs_store_acl(struct afs_fs_cursor *fc, const struct afs_acl *acl)
}
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[2] = NULL; /* volsync */
+ call->out_scb = scb;
+ call->out_volsync = NULL;
/* marshall the parameters */
bp = call->request;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index c4652b42d545..b42d9d09669c 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -23,6 +23,7 @@
#include <linux/namei.h>
#include <linux/iversion.h>
#include "internal.h"
+#include "afs_fs.h"
static const struct inode_operations afs_symlink_inode_operations = {
.get_link = page_get_link,
@@ -58,38 +59,50 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
* Initialise an inode from the vnode status.
*/
static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
- struct afs_vnode *parent_vnode)
+ struct afs_cb_interest *cbi,
+ struct afs_vnode *parent_vnode,
+ struct afs_status_cb *scb)
{
+ struct afs_cb_interest *old_cbi = NULL;
+ struct afs_file_status *status = &scb->status;
struct inode *inode = AFS_VNODE_TO_I(vnode);
+ struct timespec64 t;
_debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu",
- vnode->status.type,
- vnode->status.nlink,
- (unsigned long long) vnode->status.size,
- vnode->status.data_version,
- vnode->status.mode);
+ status->type,
+ status->nlink,
+ (unsigned long long) status->size,
+ status->data_version,
+ status->mode);
- read_seqlock_excl(&vnode->cb_lock);
+ write_seqlock(&vnode->cb_lock);
- afs_update_inode_from_status(vnode, &vnode->status, NULL,
- AFS_VNODE_NOT_YET_SET);
+ vnode->status = *status;
- switch (vnode->status.type) {
+ t = status->mtime_client;
+ inode->i_ctime = t;
+ inode->i_mtime = t;
+ inode->i_atime = t;
+ inode->i_uid = make_kuid(&init_user_ns, status->owner);
+ inode->i_gid = make_kgid(&init_user_ns, status->group);
+ set_nlink(&vnode->vfs_inode, status->nlink);
+
+ switch (status->type) {
case AFS_FTYPE_FILE:
- inode->i_mode = S_IFREG | vnode->status.mode;
+ inode->i_mode = S_IFREG | status->mode;
inode->i_op = &afs_file_inode_operations;
inode->i_fop = &afs_file_operations;
inode->i_mapping->a_ops = &afs_fs_aops;
break;
case AFS_FTYPE_DIR:
- inode->i_mode = S_IFDIR | vnode->status.mode;
+ inode->i_mode = S_IFDIR | status->mode;
inode->i_op = &afs_dir_inode_operations;
inode->i_fop = &afs_dir_file_operations;
inode->i_mapping->a_ops = &afs_dir_aops;
break;
case AFS_FTYPE_SYMLINK:
/* Symlinks with a mode of 0644 are actually mountpoints. */
- if ((vnode->status.mode & 0777) == 0644) {
+ if ((status->mode & 0777) == 0644) {
inode->i_flags |= S_AUTOMOUNT;
set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
@@ -99,7 +112,7 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
inode->i_fop = &afs_mntpt_file_operations;
inode->i_mapping->a_ops = &afs_fs_aops;
} else {
- inode->i_mode = S_IFLNK | vnode->status.mode;
+ inode->i_mode = S_IFLNK | status->mode;
inode->i_op = &afs_symlink_inode_operations;
inode->i_mapping->a_ops = &afs_fs_aops;
}
@@ -107,7 +120,7 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
break;
default:
dump_vnode(vnode, parent_vnode);
- read_sequnlock_excl(&vnode->cb_lock);
+ write_sequnlock(&vnode->cb_lock);
return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type);
}
@@ -116,17 +129,175 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
* for consistency with other AFS clients.
*/
inode->i_blocks = ((i_size_read(inode) + 1023) >> 10) << 1;
- vnode->invalid_before = vnode->status.data_version;
+ i_size_write(&vnode->vfs_inode, status->size);
+
+ vnode->invalid_before = status->data_version;
+ inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
+
+ if (!scb->have_cb) {
+ /* it's a symlink we just created (the fileserver
+ * didn't give us a callback) */
+ vnode->cb_expires_at = ktime_get_real_seconds();
+ } else {
+ vnode->cb_expires_at = scb->callback.expires_at;
+ old_cbi = rcu_dereference_protected(vnode->cb_interest,
+ lockdep_is_held(&vnode->cb_lock.lock));
+ if (cbi != old_cbi)
+ rcu_assign_pointer(vnode->cb_interest, afs_get_cb_interest(cbi));
+ else
+ old_cbi = NULL;
+ set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
- read_sequnlock_excl(&vnode->cb_lock);
+ write_sequnlock(&vnode->cb_lock);
+ afs_put_cb_interest(afs_v2net(vnode), old_cbi);
return 0;
}
/*
+ * Update the core inode struct from a returned status record.
+ */
+static void afs_apply_status(struct afs_fs_cursor *fc,
+ struct afs_vnode *vnode,
+ struct afs_status_cb *scb,
+ const afs_dataversion_t *expected_version)
+{
+ struct afs_file_status *status = &scb->status;
+ struct timespec64 t;
+ umode_t mode;
+ bool data_changed = false;
+
+ BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags));
+
+ if (status->type != vnode->status.type) {
+ pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n",
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique,
+ status->type, vnode->status.type);
+ afs_protocol_error(NULL, -EBADMSG, afs_eproto_bad_status);
+ return;
+ }
+
+ if (status->nlink != vnode->status.nlink)
+ set_nlink(&vnode->vfs_inode, status->nlink);
+
+ if (status->owner != vnode->status.owner)
+ vnode->vfs_inode.i_uid = make_kuid(&init_user_ns, status->owner);
+
+ if (status->group != vnode->status.group)
+ vnode->vfs_inode.i_gid = make_kgid(&init_user_ns, status->group);
+
+ if (status->mode != vnode->status.mode) {
+ mode = vnode->vfs_inode.i_mode;
+ mode &= ~S_IALLUGO;
+ mode |= status->mode;
+ WRITE_ONCE(vnode->vfs_inode.i_mode, mode);
+ }
+
+ t = status->mtime_client;
+ vnode->vfs_inode.i_ctime = t;
+ vnode->vfs_inode.i_mtime = t;
+ vnode->vfs_inode.i_atime = t;
+
+ if (vnode->status.data_version != status->data_version)
+ data_changed = true;
+
+ vnode->status = *status;
+
+ if (expected_version &&
+ *expected_version != status->data_version) {
+ kdebug("vnode modified %llx on {%llx:%llu} [exp %llx] %s",
+ (unsigned long long) status->data_version,
+ vnode->fid.vid, vnode->fid.vnode,
+ (unsigned long long) *expected_version,
+ fc->type ? fc->type->name : "???");
+ vnode->invalid_before = status->data_version;
+ if (vnode->status.type == AFS_FTYPE_DIR) {
+ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ afs_stat_v(vnode, n_inval);
+ } else {
+ set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
+ }
+ } else if (vnode->status.type == AFS_FTYPE_DIR) {
+ /* Expected directory change is handled elsewhere so
+ * that we can locally edit the directory and save on a
+ * download.
+ */
+ if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ data_changed = false;
+ }
+
+ if (data_changed) {
+ inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
+ i_size_write(&vnode->vfs_inode, status->size);
+ }
+}
+
+/*
+ * Apply a callback to a vnode.
+ */
+static void afs_apply_callback(struct afs_fs_cursor *fc,
+ struct afs_vnode *vnode,
+ struct afs_status_cb *scb,
+ unsigned int cb_break)
+{
+ struct afs_cb_interest *old;
+ struct afs_callback *cb = &scb->callback;
+
+ if (!afs_cb_is_broken(cb_break, vnode, fc->cbi)) {
+ vnode->cb_expires_at = cb->expires_at;
+ old = rcu_dereference_protected(vnode->cb_interest,
+ lockdep_is_held(&vnode->cb_lock.lock));
+ if (old != fc->cbi) {
+ rcu_assign_pointer(vnode->cb_interest, afs_get_cb_interest(fc->cbi));
+ afs_put_cb_interest(afs_v2net(vnode), old);
+ }
+ set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
+}
+
+/*
+ * Apply the received status and callback to an inode all in the same critical
+ * section to avoid races with afs_validate().
+ */
+void afs_vnode_commit_status(struct afs_fs_cursor *fc,
+ struct afs_vnode *vnode,
+ unsigned int cb_break,
+ const afs_dataversion_t *expected_version,
+ struct afs_status_cb *scb)
+{
+ if (fc->ac.error != 0)
+ return;
+
+ write_seqlock(&vnode->cb_lock);
+
+ if (scb->have_error) {
+ if (scb->status.abort_code == VNOVNODE) {
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ clear_nlink(&vnode->vfs_inode);
+ __afs_break_callback(vnode);
+ }
+ } else {
+ if (scb->have_status)
+ afs_apply_status(fc, vnode, scb, expected_version);
+ if (scb->have_cb)
+ afs_apply_callback(fc, vnode, scb, cb_break);
+ }
+
+ write_sequnlock(&vnode->cb_lock);
+
+ if (fc->ac.error == 0 && scb->have_status)
+ afs_cache_permit(vnode, fc->key, cb_break, scb);
+}
+
+/*
* Fetch file status from the volume.
*/
-int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
+int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool is_new,
+ afs_access_t *_caller_access)
{
+ struct afs_status_cb *scb;
struct afs_fs_cursor fc;
int ret;
@@ -135,18 +306,38 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
vnode->flags);
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ return -ENOMEM;
+
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
+ afs_dataversion_t data_version = vnode->status.data_version;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_fetch_file_status(&fc, NULL, new_inode);
+ afs_fs_fetch_file_status(&fc, scb, NULL);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ if (fc.error) {
+ /* Do nothing. */
+ } else if (is_new) {
+ ret = afs_inode_init_from_status(vnode, key, fc.cbi,
+ NULL, scb);
+ fc.error = ret;
+ if (ret == 0)
+ afs_cache_permit(vnode, key, fc.cb_break, scb);
+ } else {
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break,
+ &data_version, scb);
+ }
+ afs_check_for_remote_deletion(&fc, vnode);
ret = afs_end_vnode_operation(&fc);
}
+ if (ret == 0 && _caller_access)
+ *_caller_access = scb->status.caller_access;
+ kfree(scb);
_leave(" = %d", ret);
return ret;
}
@@ -156,10 +347,10 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
*/
int afs_iget5_test(struct inode *inode, void *opaque)
{
- struct afs_iget_data *data = opaque;
+ struct afs_iget_data *iget_data = opaque;
struct afs_vnode *vnode = AFS_FS_I(inode);
- return memcmp(&vnode->fid, &data->fid, sizeof(data->fid)) == 0;
+ return memcmp(&vnode->fid, &iget_data->fid, sizeof(iget_data->fid)) == 0;
}
/*
@@ -177,17 +368,19 @@ static int afs_iget5_pseudo_dir_test(struct inode *inode, void *opaque)
*/
static int afs_iget5_set(struct inode *inode, void *opaque)
{
- struct afs_iget_data *data = opaque;
+ struct afs_iget_data *iget_data = opaque;
struct afs_vnode *vnode = AFS_FS_I(inode);
- vnode->fid = data->fid;
- vnode->volume = data->volume;
+ vnode->fid = iget_data->fid;
+ vnode->volume = iget_data->volume;
+ vnode->cb_v_break = iget_data->cb_v_break;
+ vnode->cb_s_break = iget_data->cb_s_break;
/* YFS supports 96-bit vnode IDs, but Linux only supports
* 64-bit inode numbers.
*/
- inode->i_ino = data->fid.vnode;
- inode->i_generation = data->fid.unique;
+ inode->i_ino = iget_data->fid.vnode;
+ inode->i_generation = iget_data->fid.unique;
return 0;
}
@@ -197,38 +390,42 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
*/
struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
{
- struct afs_iget_data data;
struct afs_super_info *as;
struct afs_vnode *vnode;
struct inode *inode;
static atomic_t afs_autocell_ino;
+ struct afs_iget_data iget_data = {
+ .cb_v_break = 0,
+ .cb_s_break = 0,
+ };
+
_enter("");
as = sb->s_fs_info;
if (as->volume) {
- data.volume = as->volume;
- data.fid.vid = as->volume->vid;
+ iget_data.volume = as->volume;
+ iget_data.fid.vid = as->volume->vid;
}
if (root) {
- data.fid.vnode = 1;
- data.fid.unique = 1;
+ iget_data.fid.vnode = 1;
+ iget_data.fid.unique = 1;
} else {
- data.fid.vnode = atomic_inc_return(&afs_autocell_ino);
- data.fid.unique = 0;
+ iget_data.fid.vnode = atomic_inc_return(&afs_autocell_ino);
+ iget_data.fid.unique = 0;
}
- inode = iget5_locked(sb, data.fid.vnode,
+ inode = iget5_locked(sb, iget_data.fid.vnode,
afs_iget5_pseudo_dir_test, afs_iget5_set,
- &data);
+ &iget_data);
if (!inode) {
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
_debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }",
- inode, inode->i_ino, data.fid.vid, data.fid.vnode,
- data.fid.unique);
+ inode, inode->i_ino, iget_data.fid.vid, iget_data.fid.vnode,
+ iget_data.fid.unique);
vnode = AFS_FS_I(inode);
@@ -299,23 +496,24 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
* inode retrieval
*/
struct inode *afs_iget(struct super_block *sb, struct key *key,
- struct afs_fid *fid, struct afs_file_status *status,
- struct afs_callback *cb, struct afs_cb_interest *cbi,
+ struct afs_iget_data *iget_data,
+ struct afs_status_cb *scb,
+ struct afs_cb_interest *cbi,
struct afs_vnode *parent_vnode)
{
- struct afs_iget_data data = { .fid = *fid };
struct afs_super_info *as;
struct afs_vnode *vnode;
+ struct afs_fid *fid = &iget_data->fid;
struct inode *inode;
int ret;
_enter(",{%llx:%llu.%u},,", fid->vid, fid->vnode, fid->unique);
as = sb->s_fs_info;
- data.volume = as->volume;
+ iget_data->volume = as->volume;
inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set,
- &data);
+ iget_data);
if (!inode) {
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
@@ -332,43 +530,25 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
return inode;
}
- if (!status) {
+ if (!scb) {
/* it's a remotely extant inode */
- ret = afs_fetch_status(vnode, key, true);
+ ret = afs_fetch_status(vnode, key, true, NULL);
if (ret < 0)
goto bad_inode;
} else {
- /* it's an inode we just created */
- memcpy(&vnode->status, status, sizeof(vnode->status));
-
- if (!cb) {
- /* it's a symlink we just created (the fileserver
- * didn't give us a callback) */
- vnode->cb_version = 0;
- vnode->cb_type = 0;
- vnode->cb_expires_at = ktime_get();
- } else {
- vnode->cb_version = cb->version;
- vnode->cb_type = cb->type;
- vnode->cb_expires_at = cb->expires_at;
- vnode->cb_interest = afs_get_cb_interest(cbi);
- set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
- }
-
- vnode->cb_expires_at += ktime_get_real_seconds();
+ ret = afs_inode_init_from_status(vnode, key, cbi, parent_vnode,
+ scb);
+ if (ret < 0)
+ goto bad_inode;
}
- ret = afs_inode_init_from_status(vnode, key, parent_vnode);
- if (ret < 0)
- goto bad_inode;
-
afs_get_inode_cache(vnode);
/* success */
clear_bit(AFS_VNODE_UNSET, &vnode->flags);
inode->i_flags |= S_NOATIME;
unlock_new_inode(inode);
- _leave(" = %p [CB { v=%u t=%u }]", inode, vnode->cb_version, vnode->cb_type);
+ _leave(" = %p", inode);
return inode;
/* failure */
@@ -400,6 +580,66 @@ void afs_zap_data(struct afs_vnode *vnode)
}
/*
+ * Check the validity of a vnode/inode.
+ */
+bool afs_check_validity(struct afs_vnode *vnode)
+{
+ struct afs_cb_interest *cbi;
+ struct afs_server *server;
+ struct afs_volume *volume = vnode->volume;
+ time64_t now = ktime_get_real_seconds();
+ bool valid, need_clear = false;
+ unsigned int cb_break, cb_s_break, cb_v_break;
+ int seq = 0;
+
+ do {
+ read_seqbegin_or_lock(&vnode->cb_lock, &seq);
+ cb_v_break = READ_ONCE(volume->cb_v_break);
+ cb_break = vnode->cb_break;
+
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+ cbi = rcu_dereference(vnode->cb_interest);
+ server = rcu_dereference(cbi->server);
+ cb_s_break = READ_ONCE(server->cb_s_break);
+
+ if (vnode->cb_s_break != cb_s_break ||
+ vnode->cb_v_break != cb_v_break) {
+ vnode->cb_s_break = cb_s_break;
+ vnode->cb_v_break = cb_v_break;
+ need_clear = true;
+ valid = false;
+ } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+ need_clear = true;
+ valid = false;
+ } else if (vnode->cb_expires_at - 10 <= now) {
+ need_clear = true;
+ valid = false;
+ } else {
+ valid = true;
+ }
+ } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
+ valid = true;
+ } else {
+ vnode->cb_v_break = cb_v_break;
+ valid = false;
+ }
+
+ } while (need_seqretry(&vnode->cb_lock, seq));
+
+ done_seqretry(&vnode->cb_lock, seq);
+
+ if (need_clear) {
+ write_seqlock(&vnode->cb_lock);
+ if (cb_break == vnode->cb_break)
+ __afs_break_callback(vnode);
+ write_sequnlock(&vnode->cb_lock);
+ valid = false;
+ }
+
+ return valid;
+}
+
+/*
* validate a vnode/inode
* - there are several things we need to check
* - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
@@ -410,7 +650,6 @@ void afs_zap_data(struct afs_vnode *vnode)
*/
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
- time64_t now = ktime_get_real_seconds();
bool valid;
int ret;
@@ -418,36 +657,9 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
vnode->fid.vid, vnode->fid.vnode, vnode->flags,
key_serial(key));
- /* Quickly check the callback state. Ideally, we'd use read_seqbegin
- * here, but we have no way to pass the net namespace to the RCU
- * cleanup for the server record.
- */
- read_seqlock_excl(&vnode->cb_lock);
-
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
- if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break ||
- vnode->cb_v_break != vnode->volume->cb_v_break) {
- vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
- vnode->cb_v_break = vnode->volume->cb_v_break;
- valid = false;
- } else if (vnode->status.type == AFS_FTYPE_DIR &&
- (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
- vnode->cb_expires_at - 10 <= now)) {
- valid = false;
- } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
- vnode->cb_expires_at - 10 <= now) {
- valid = false;
- } else {
- valid = true;
- }
- } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- valid = true;
- } else {
- vnode->cb_v_break = vnode->volume->cb_v_break;
- valid = false;
- }
-
- read_sequnlock_excl(&vnode->cb_lock);
+ rcu_read_lock();
+ valid = afs_check_validity(vnode);
+ rcu_read_unlock();
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
clear_nlink(&vnode->vfs_inode);
@@ -463,7 +675,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
* access */
if (!test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
_debug("not promised");
- ret = afs_fetch_status(vnode, key, false);
+ ret = afs_fetch_status(vnode, key, false, NULL);
if (ret < 0) {
if (ret == -ENOENT) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
@@ -534,6 +746,7 @@ int afs_drop_inode(struct inode *inode)
*/
void afs_evict_inode(struct inode *inode)
{
+ struct afs_cb_interest *cbi;
struct afs_vnode *vnode;
vnode = AFS_FS_I(inode);
@@ -550,10 +763,14 @@ void afs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
- if (vnode->cb_interest) {
- afs_put_cb_interest(afs_i2net(inode), vnode->cb_interest);
- vnode->cb_interest = NULL;
+ write_seqlock(&vnode->cb_lock);
+ cbi = rcu_dereference_protected(vnode->cb_interest,
+ lockdep_is_held(&vnode->cb_lock.lock));
+ if (cbi) {
+ afs_put_cb_interest(afs_i2net(inode), cbi);
+ rcu_assign_pointer(vnode->cb_interest, NULL);
}
+ write_sequnlock(&vnode->cb_lock);
while (!list_empty(&vnode->wb_keys)) {
struct afs_wb_key *wbk = list_entry(vnode->wb_keys.next,
@@ -573,6 +790,7 @@ void afs_evict_inode(struct inode *inode)
}
#endif
+ afs_prune_wb_keys(vnode);
afs_put_permits(rcu_access_pointer(vnode->permit_cache));
key_put(vnode->silly_key);
vnode->silly_key = NULL;
@@ -587,9 +805,10 @@ void afs_evict_inode(struct inode *inode)
int afs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct afs_fs_cursor fc;
+ struct afs_status_cb *scb;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
struct key *key;
- int ret;
+ int ret = -ENOMEM;
_enter("{%llx:%llu},{n=%pd},%x",
vnode->fid.vid, vnode->fid.vnode, dentry,
@@ -601,6 +820,10 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
return 0;
}
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
+ if (!scb)
+ goto error;
+
/* flush any dirty data outstanding on a regular file */
if (S_ISREG(vnode->vfs_inode.i_mode))
filemap_write_and_wait(vnode->vfs_inode.i_mapping);
@@ -611,25 +834,33 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
- goto error;
+ goto error_scb;
}
}
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
+ afs_dataversion_t data_version = vnode->status.data_version;
+
+ if (attr->ia_valid & ATTR_SIZE)
+ data_version++;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_setattr(&fc, attr);
+ afs_fs_setattr(&fc, attr, scb);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_check_for_remote_deletion(&fc, vnode);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break,
+ &data_version, scb);
ret = afs_end_vnode_operation(&fc);
}
if (!(attr->ia_valid & ATTR_FILE))
key_put(key);
+error_scb:
+ kfree(scb);
error:
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index b3cd6e8ad59d..2073c1a3ab4b 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -66,6 +66,8 @@ struct afs_fs_context {
struct afs_iget_data {
struct afs_fid fid;
struct afs_volume *volume; /* volume on which resides */
+ unsigned int cb_v_break; /* Pre-fetch volume break count */
+ unsigned int cb_s_break; /* Pre-fetch server break count */
};
enum afs_call_state {
@@ -111,8 +113,12 @@ struct afs_call {
struct rxrpc_call *rxcall; /* RxRPC call handle */
struct key *key; /* security for this call */
struct afs_net *net; /* The network namespace */
- struct afs_server *cm_server; /* Server affected by incoming CM call */
+ union {
+ struct afs_server *server;
+ struct afs_vlserver *vlserver;
+ };
struct afs_cb_interest *cbi; /* Callback interest for server used */
+ struct afs_vnode *lvnode; /* vnode being locked */
void *request; /* request data (first part) */
struct address_space *mapping; /* Pages being written from */
struct iov_iter iter; /* Buffer iterator */
@@ -122,7 +128,20 @@ struct afs_call {
struct bio_vec bvec[1];
};
void *buffer; /* reply receive buffer */
- void *reply[4]; /* Where to put the reply */
+ union {
+ long ret0; /* Value to reply with instead of 0 */
+ struct afs_addr_list *ret_alist;
+ struct afs_vldb_entry *ret_vldb;
+ struct afs_acl *ret_acl;
+ };
+ struct afs_fid *out_fid;
+ struct afs_status_cb *out_dir_scb;
+ struct afs_status_cb *out_scb;
+ struct yfs_acl *out_yacl;
+ struct afs_volsync *out_volsync;
+ struct afs_volume_status *out_volstatus;
+ struct afs_read *read_request;
+ unsigned int server_index;
pgoff_t first; /* first page in mapping to deal with */
pgoff_t last; /* last page in mapping to deal with */
atomic_t usage;
@@ -131,10 +150,10 @@ struct afs_call {
int error; /* error code */
u32 abort_code; /* Remote abort ID or 0 */
u32 epoch;
+ unsigned int max_lifespan; /* Maximum lifespan to set if not 0 */
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
unsigned first_offset; /* offset into mapping[first] */
- unsigned int cb_break; /* cb_break + cb_s_break before the call */
union {
unsigned last_to; /* amount of mapping[last] */
unsigned count2; /* count used in unmarshalling */
@@ -145,9 +164,9 @@ struct afs_call {
bool send_pages; /* T if data from mapping should be sent */
bool need_attention; /* T if RxRPC poked us */
bool async; /* T if asynchronous */
- bool ret_reply0; /* T if should return reply[0] on success */
bool upgrade; /* T to request service upgrade */
- bool want_reply_time; /* T if want reply_time */
+ bool have_reply_time; /* T if have got reply_time */
+ bool intr; /* T if interruptible */
u16 service_id; /* Actual service ID (after upgrade) */
unsigned int debug_id; /* Trace ID */
u32 operation_ID; /* operation ID for an incoming call */
@@ -159,8 +178,6 @@ struct afs_call {
} __attribute__((packed));
__be64 tmp64;
};
- afs_dataversion_t expected_version; /* Updated version expected from store */
- afs_dataversion_t expected_version_2; /* 2nd updated version expected from store */
ktime_t reply_time; /* Time of first reply packet */
};
@@ -221,7 +238,8 @@ struct afs_read {
unsigned int index; /* Which page we're reading into */
unsigned int nr_pages;
unsigned int offset; /* offset into current page */
- void (*page_done)(struct afs_call *, struct afs_read *);
+ struct afs_vnode *vnode;
+ void (*page_done)(struct afs_read *);
struct page **pages;
struct page *array[];
};
@@ -367,13 +385,13 @@ struct afs_cell {
time64_t last_inactive; /* Time of last drop of usage count */
atomic_t usage;
unsigned long flags;
-#define AFS_CELL_FL_NOT_READY 0 /* The cell record is not ready for use */
-#define AFS_CELL_FL_NO_GC 1 /* The cell was added manually, don't auto-gc */
-#define AFS_CELL_FL_NOT_FOUND 2 /* Permanent DNS error */
-#define AFS_CELL_FL_DNS_FAIL 3 /* Failed to access DNS */
-#define AFS_CELL_FL_NO_LOOKUP_YET 4 /* Not completed first DNS lookup yet */
+#define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */
+#define AFS_CELL_FL_DO_LOOKUP 1 /* DNS lookup requested */
enum afs_cell_state state;
short error;
+ enum dns_record_source dns_source:8; /* Latest source of data from lookup */
+ enum dns_lookup_status dns_status:8; /* Latest status of data from lookup */
+ unsigned int dns_lookup_count; /* Counter of DNS lookups */
/* Active fileserver interaction state. */
struct list_head proc_volumes; /* procfs volume list */
@@ -538,7 +556,10 @@ struct afs_server {
struct afs_vol_interest {
struct hlist_node srv_link; /* Link in server->cb_volumes */
struct hlist_head cb_interests; /* List of callback interests on the server */
- afs_volid_t vid; /* Volume ID to match */
+ union {
+ struct rcu_head rcu;
+ afs_volid_t vid; /* Volume ID to match */
+ };
unsigned int usage;
};
@@ -550,7 +571,10 @@ struct afs_cb_interest {
struct afs_vol_interest *vol_interest;
struct afs_server *server; /* Server on which this interest resides */
struct super_block *sb; /* Superblock on which inodes reside */
- afs_volid_t vid; /* Volume ID to match */
+ union {
+ struct rcu_head rcu;
+ afs_volid_t vid; /* Volume ID to match */
+ };
refcount_t usage;
};
@@ -660,15 +684,13 @@ struct afs_vnode {
afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
- struct afs_cb_interest *cb_interest; /* Server on which this resides */
+ struct afs_cb_interest __rcu *cb_interest; /* Server on which this resides */
unsigned int cb_s_break; /* Mass break counter on ->server */
unsigned int cb_v_break; /* Mass break counter on ->volume */
unsigned int cb_break; /* Break counter on vnode */
seqlock_t cb_lock; /* Lock for ->cb_interest, ->status, ->cb_*break */
time64_t cb_expires_at; /* time at which callback expires */
- unsigned cb_version; /* callback version */
- afs_callback_type_t cb_type; /* type of callback */
};
static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
@@ -755,6 +777,7 @@ struct afs_vl_cursor {
* Cursor for iterating over a set of fileservers.
*/
struct afs_fs_cursor {
+ const struct afs_call_type *type; /* Type of call done */
struct afs_addr_cursor ac;
struct afs_vnode *vnode;
struct afs_server_list *server_list; /* Current server list (pins ref) */
@@ -772,6 +795,7 @@ struct afs_fs_cursor {
#define AFS_FS_CURSOR_VNOVOL 0x0008 /* Set if seen VNOVOL */
#define AFS_FS_CURSOR_CUR_ONLY 0x0010 /* Set if current server only (file lock held) */
#define AFS_FS_CURSOR_NO_VSLEEP 0x0020 /* Set to prevent sleep on VBUSY, VOFFLINE, ... */
+#define AFS_FS_CURSOR_INTR 0x0040 /* Set if op is interruptible */
unsigned short nr_iterations; /* Number of server iterations */
};
@@ -882,7 +906,6 @@ extern const struct address_space_operations afs_dir_aops;
extern const struct dentry_operations afs_fs_dentry_operations;
extern void afs_d_release(struct dentry *);
-extern int afs_dir_remove_link(struct dentry *, struct key *, unsigned long, unsigned long);
/*
* dir_edit.c
@@ -940,50 +963,48 @@ extern int afs_flock(struct file *, int, struct file_lock *);
/*
* fsclient.c
*/
-#define AFS_VNODE_NOT_YET_SET 0x01
-#define AFS_VNODE_META_CHANGED 0x02
-#define AFS_VNODE_DATA_CHANGED 0x04
-extern void afs_update_inode_from_status(struct afs_vnode *, struct afs_file_status *,
- const afs_dataversion_t *, u8);
-
-extern int afs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_volsync *, bool);
+extern int afs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_status_cb *,
+ struct afs_volsync *);
extern int afs_fs_give_up_callbacks(struct afs_net *, struct afs_server *);
-extern int afs_fs_fetch_data(struct afs_fs_cursor *, struct afs_read *);
-extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t, u64,
- struct afs_fid *, struct afs_file_status *, struct afs_callback *);
-extern int afs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, u64);
-extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
-extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, u64,
- struct afs_fid *, struct afs_file_status *);
+extern int afs_fs_fetch_data(struct afs_fs_cursor *, struct afs_status_cb *, struct afs_read *);
+extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t,
+ struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *);
+extern int afs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool,
+ struct afs_status_cb *);
+extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *,
+ struct afs_status_cb *, struct afs_status_cb *);
+extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *,
+ struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *);
extern int afs_fs_rename(struct afs_fs_cursor *, const char *,
- struct afs_vnode *, const char *, u64, u64);
+ struct afs_vnode *, const char *,
+ struct afs_status_cb *, struct afs_status_cb *);
extern int afs_fs_store_data(struct afs_fs_cursor *, struct address_space *,
- pgoff_t, pgoff_t, unsigned, unsigned);
-extern int afs_fs_setattr(struct afs_fs_cursor *, struct iattr *);
+ pgoff_t, pgoff_t, unsigned, unsigned, struct afs_status_cb *);
+extern int afs_fs_setattr(struct afs_fs_cursor *, struct iattr *, struct afs_status_cb *);
extern int afs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *);
-extern int afs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t);
-extern int afs_fs_extend_lock(struct afs_fs_cursor *);
-extern int afs_fs_release_lock(struct afs_fs_cursor *);
+extern int afs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t, struct afs_status_cb *);
+extern int afs_fs_extend_lock(struct afs_fs_cursor *, struct afs_status_cb *);
+extern int afs_fs_release_lock(struct afs_fs_cursor *, struct afs_status_cb *);
extern int afs_fs_give_up_all_callbacks(struct afs_net *, struct afs_server *,
struct afs_addr_cursor *, struct key *);
extern struct afs_call *afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
struct afs_addr_cursor *, struct key *,
unsigned int);
extern int afs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_file_status *,
- struct afs_callback *, unsigned int,
- struct afs_volsync *);
+ struct afs_fid *, struct afs_status_cb *,
+ unsigned int, struct afs_volsync *);
extern int afs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_file_status *,
- struct afs_callback *, struct afs_volsync *);
+ struct afs_fid *, struct afs_status_cb *,
+ struct afs_volsync *);
struct afs_acl {
u32 size;
u8 data[];
};
-extern struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *);
-extern int afs_fs_store_acl(struct afs_fs_cursor *, const struct afs_acl *);
+extern struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *, struct afs_status_cb *);
+extern int afs_fs_store_acl(struct afs_fs_cursor *, const struct afs_acl *,
+ struct afs_status_cb *);
/*
* fs_probe.c
@@ -995,15 +1016,20 @@ extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long);
/*
* inode.c
*/
-extern int afs_fetch_status(struct afs_vnode *, struct key *, bool);
+extern void afs_vnode_commit_status(struct afs_fs_cursor *,
+ struct afs_vnode *,
+ unsigned int,
+ const afs_dataversion_t *,
+ struct afs_status_cb *);
+extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
extern int afs_iget5_test(struct inode *, void *);
extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool);
extern struct inode *afs_iget(struct super_block *, struct key *,
- struct afs_fid *, struct afs_file_status *,
- struct afs_callback *,
+ struct afs_iget_data *, struct afs_status_cb *,
struct afs_cb_interest *,
struct afs_vnode *);
extern void afs_zap_data(struct afs_vnode *);
+extern bool afs_check_validity(struct afs_vnode *);
extern int afs_validate(struct afs_vnode *, struct key *);
extern int afs_getattr(const struct path *, struct kstat *, u32, unsigned int);
extern int afs_setattr(struct dentry *, struct iattr *);
@@ -1096,7 +1122,7 @@ static inline void afs_put_sysnames(struct afs_sysnames *sysnames) {}
* rotate.c
*/
extern bool afs_begin_vnode_operation(struct afs_fs_cursor *, struct afs_vnode *,
- struct key *);
+ struct key *, bool);
extern bool afs_select_fileserver(struct afs_fs_cursor *);
extern bool afs_select_current_fileserver(struct afs_fs_cursor *);
extern int afs_end_vnode_operation(struct afs_fs_cursor *);
@@ -1121,6 +1147,12 @@ extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
extern int afs_extract_data(struct afs_call *, bool);
extern int afs_protocol_error(struct afs_call *, int, enum afs_eproto_cause);
+static inline void afs_set_fc_call(struct afs_call *call, struct afs_fs_cursor *fc)
+{
+ call->intr = fc->flags & AFS_FS_CURSOR_INTR;
+ fc->type = call->type;
+}
+
static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size)
{
call->kvec[0].iov_base = buf;
@@ -1201,7 +1233,8 @@ static inline void afs_set_call_complete(struct afs_call *call,
*/
extern void afs_put_permits(struct afs_permits *);
extern void afs_clear_permits(struct afs_vnode *);
-extern void afs_cache_permit(struct afs_vnode *, struct key *, unsigned int);
+extern void afs_cache_permit(struct afs_vnode *, struct key *, unsigned int,
+ struct afs_status_cb *);
extern void afs_zap_permits(struct rcu_head *);
extern struct key *afs_request_key(struct afs_cell *);
extern int afs_check_permit(struct afs_vnode *, struct key *, afs_access_t *);
@@ -1327,7 +1360,6 @@ extern int afs_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata);
extern int afs_writepage(struct page *, struct writeback_control *);
extern int afs_writepages(struct address_space *, struct writeback_control *);
-extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
@@ -1343,33 +1375,36 @@ extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
/*
* yfsclient.c
*/
-extern int yfs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_volsync *, bool);
-extern int yfs_fs_fetch_data(struct afs_fs_cursor *, struct afs_read *);
-extern int yfs_fs_create_file(struct afs_fs_cursor *, const char *, umode_t, u64,
- struct afs_fid *, struct afs_file_status *, struct afs_callback *);
-extern int yfs_fs_make_dir(struct afs_fs_cursor *, const char *, umode_t, u64,
- struct afs_fid *, struct afs_file_status *, struct afs_callback *);
-extern int yfs_fs_remove_file2(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
-extern int yfs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, u64);
-extern int yfs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
-extern int yfs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, u64,
- struct afs_fid *, struct afs_file_status *);
-extern int yfs_fs_rename(struct afs_fs_cursor *, const char *,
- struct afs_vnode *, const char *, u64, u64);
+extern int yfs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_status_cb *,
+ struct afs_volsync *);
+extern int yfs_fs_fetch_data(struct afs_fs_cursor *, struct afs_status_cb *, struct afs_read *);
+extern int yfs_fs_create_file(struct afs_fs_cursor *, const char *, umode_t, struct afs_status_cb *,
+ struct afs_fid *, struct afs_status_cb *);
+extern int yfs_fs_make_dir(struct afs_fs_cursor *, const char *, umode_t, struct afs_status_cb *,
+ struct afs_fid *, struct afs_status_cb *);
+extern int yfs_fs_remove_file2(struct afs_fs_cursor *, struct afs_vnode *, const char *,
+ struct afs_status_cb *, struct afs_status_cb *);
+extern int yfs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool,
+ struct afs_status_cb *);
+extern int yfs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *,
+ struct afs_status_cb *, struct afs_status_cb *);
+extern int yfs_fs_symlink(struct afs_fs_cursor *, const char *, const char *,
+ struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *);
+extern int yfs_fs_rename(struct afs_fs_cursor *, const char *, struct afs_vnode *, const char *,
+ struct afs_status_cb *, struct afs_status_cb *);
extern int yfs_fs_store_data(struct afs_fs_cursor *, struct address_space *,
- pgoff_t, pgoff_t, unsigned, unsigned);
-extern int yfs_fs_setattr(struct afs_fs_cursor *, struct iattr *);
+ pgoff_t, pgoff_t, unsigned, unsigned, struct afs_status_cb *);
+extern int yfs_fs_setattr(struct afs_fs_cursor *, struct iattr *, struct afs_status_cb *);
extern int yfs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *);
-extern int yfs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t);
-extern int yfs_fs_extend_lock(struct afs_fs_cursor *);
-extern int yfs_fs_release_lock(struct afs_fs_cursor *);
+extern int yfs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t, struct afs_status_cb *);
+extern int yfs_fs_extend_lock(struct afs_fs_cursor *, struct afs_status_cb *);
+extern int yfs_fs_release_lock(struct afs_fs_cursor *, struct afs_status_cb *);
extern int yfs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_file_status *,
- struct afs_callback *, struct afs_volsync *);
+ struct afs_fid *, struct afs_status_cb *,
+ struct afs_volsync *);
extern int yfs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_file_status *,
- struct afs_callback *, unsigned int,
- struct afs_volsync *);
+ struct afs_fid *, struct afs_status_cb *,
+ unsigned int, struct afs_volsync *);
struct yfs_acl {
struct afs_acl *acl; /* Dir/file/symlink ACL */
@@ -1382,8 +1417,10 @@ struct yfs_acl {
};
extern void yfs_free_opaque_acl(struct yfs_acl *);
-extern struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *, unsigned int);
-extern int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *, const struct afs_acl *);
+extern struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *, struct yfs_acl *,
+ struct afs_status_cb *);
+extern int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *, const struct afs_acl *,
+ struct afs_status_cb *);
/*
* Miscellaneous inline functions.
@@ -1398,14 +1435,6 @@ static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
return &vnode->vfs_inode;
}
-static inline void afs_vnode_commit_status(struct afs_fs_cursor *fc,
- struct afs_vnode *vnode,
- unsigned int cb_break)
-{
- if (fc->ac.error == 0)
- afs_cache_permit(vnode, fc->key, cb_break);
-}
-
static inline void afs_check_for_remote_deletion(struct afs_fs_cursor *fc,
struct afs_vnode *vnode)
{
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index be2ee3bbd0a9..371501d28e08 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -53,7 +53,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
seq_printf(m, "%3u %6lld %2u %s\n",
atomic_read(&cell->usage),
cell->dns_expiry - ktime_get_real_seconds(),
- vllist ? vllist->nr_servers : 0,
+ vllist->nr_servers,
cell->name);
return 0;
}
@@ -296,8 +296,8 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
if (v == SEQ_START_TOKEN) {
seq_printf(m, "# source %s, status %s\n",
- dns_record_sources[vllist->source],
- dns_lookup_statuses[vllist->status]);
+ dns_record_sources[vllist ? vllist->source : 0],
+ dns_lookup_statuses[vllist ? vllist->status : 0]);
return 0;
}
@@ -336,7 +336,7 @@ static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
if (pos == 0)
return SEQ_START_TOKEN;
- if (!vllist || pos - 1 >= vllist->nr_servers)
+ if (pos - 1 >= vllist->nr_servers)
return NULL;
return &vllist->servers[pos - 1];
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index c3ae324781f8..b00c739e0e63 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -25,7 +25,7 @@
* them here also using the io_lock.
*/
bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- struct key *key)
+ struct key *key, bool intr)
{
memset(fc, 0, sizeof(*fc));
fc->vnode = vnode;
@@ -33,10 +33,15 @@ bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode
fc->ac.error = SHRT_MAX;
fc->error = -EDESTADDRREQ;
- if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
- fc->error = -EINTR;
- fc->flags |= AFS_FS_CURSOR_STOP;
- return false;
+ if (intr) {
+ fc->flags |= AFS_FS_CURSOR_INTR;
+ if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
+ fc->error = -EINTR;
+ fc->flags |= AFS_FS_CURSOR_STOP;
+ return false;
+ }
+ } else {
+ mutex_lock(&vnode->io_lock);
}
if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
@@ -61,7 +66,8 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
fc->untried = (1UL << fc->server_list->nr_servers) - 1;
fc->index = READ_ONCE(fc->server_list->preferred);
- cbi = vnode->cb_interest;
+ cbi = rcu_dereference_protected(vnode->cb_interest,
+ lockdep_is_held(&vnode->io_lock));
if (cbi) {
/* See if the vnode's preferred record is still available */
for (i = 0; i < fc->server_list->nr_servers; i++) {
@@ -82,8 +88,8 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
/* Note that the callback promise is effectively broken */
write_seqlock(&vnode->cb_lock);
- ASSERTCMP(cbi, ==, vnode->cb_interest);
- vnode->cb_interest = NULL;
+ ASSERTCMP(cbi, ==, rcu_access_pointer(vnode->cb_interest));
+ rcu_assign_pointer(vnode->cb_interest, NULL);
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags))
vnode->cb_break++;
write_sequnlock(&vnode->cb_lock);
@@ -118,10 +124,14 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
*/
static bool afs_sleep_and_retry(struct afs_fs_cursor *fc)
{
- msleep_interruptible(1000);
- if (signal_pending(current)) {
- fc->error = -ERESTARTSYS;
- return false;
+ if (fc->flags & AFS_FS_CURSOR_INTR) {
+ msleep_interruptible(1000);
+ if (signal_pending(current)) {
+ fc->error = -ERESTARTSYS;
+ return false;
+ }
+ } else {
+ msleep(1000);
}
return true;
@@ -408,7 +418,9 @@ selected_server:
if (error < 0)
goto failed_set_error;
- fc->cbi = afs_get_cb_interest(vnode->cb_interest);
+ fc->cbi = afs_get_cb_interest(
+ rcu_dereference_protected(vnode->cb_interest,
+ lockdep_is_held(&vnode->io_lock)));
read_lock(&server->fs_lock);
alist = rcu_dereference_protected(server->addresses,
@@ -459,6 +471,8 @@ no_more_servers:
s->probe.abort_code);
}
+ error = e.error;
+
failed_set_error:
fc->error = error;
failed:
@@ -476,12 +490,15 @@ failed:
bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
{
struct afs_vnode *vnode = fc->vnode;
- struct afs_cb_interest *cbi = vnode->cb_interest;
+ struct afs_cb_interest *cbi;
struct afs_addr_list *alist;
int error = fc->ac.error;
_enter("");
+ cbi = rcu_dereference_protected(vnode->cb_interest,
+ lockdep_is_held(&vnode->io_lock));
+
switch (error) {
case SHRT_MAX:
if (!cbi) {
@@ -490,7 +507,7 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
return false;
}
- fc->cbi = afs_get_cb_interest(vnode->cb_interest);
+ fc->cbi = afs_get_cb_interest(cbi);
read_lock(&cbi->server->fs_lock);
alist = rcu_dereference_protected(cbi->server->addresses,
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index a34a89c75c6a..4fa5ce92b9b9 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -188,7 +188,7 @@ void afs_put_call(struct afs_call *call)
if (call->type->destructor)
call->type->destructor(call);
- afs_put_server(call->net, call->cm_server);
+ afs_put_server(call->net, call->server);
afs_put_cb_interest(call->net, call->cbi);
afs_put_addrlist(call->alist);
kfree(call->request);
@@ -417,6 +417,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
afs_wake_up_async_call :
afs_wake_up_call_waiter),
call->upgrade,
+ call->intr,
call->debug_id);
if (IS_ERR(rxcall)) {
ret = PTR_ERR(rxcall);
@@ -426,6 +427,10 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
call->rxcall = rxcall;
+ if (call->max_lifespan)
+ rxrpc_kernel_set_max_life(call->net->socket, rxcall,
+ call->max_lifespan);
+
/* send the request */
iov[0].iov_base = call->request;
iov[0].iov_len = call->request_size;
@@ -529,11 +534,11 @@ static void afs_deliver_to_call(struct afs_call *call)
return;
}
- if (call->want_reply_time &&
+ if (!call->have_reply_time &&
rxrpc_kernel_get_reply_time(call->net->socket,
call->rxcall,
&call->reply_time))
- call->want_reply_time = false;
+ call->have_reply_time = true;
ret = call->type->deliver(call);
state = READ_ONCE(call->state);
@@ -648,7 +653,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
break;
}
- if (timeout == 0 &&
+ if (call->intr && timeout == 0 &&
life == last_life && signal_pending(current)) {
if (stalled)
break;
@@ -691,10 +696,9 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
ret = ac->error;
switch (ret) {
case 0:
- if (call->ret_reply0) {
- ret = (long)call->reply[0];
- call->reply[0] = NULL;
- }
+ ret = call->ret0;
+ call->ret0 = 0;
+
/* Fall through */
case -ECONNABORTED:
ac->responded = true;
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 5f58a9a17e69..5d8ece98561e 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -87,11 +87,9 @@ void afs_clear_permits(struct afs_vnode *vnode)
permits = rcu_dereference_protected(vnode->permit_cache,
lockdep_is_held(&vnode->lock));
RCU_INIT_POINTER(vnode->permit_cache, NULL);
- vnode->cb_break++;
spin_unlock(&vnode->lock);
- if (permits)
- afs_put_permits(permits);
+ afs_put_permits(permits);
}
/*
@@ -118,10 +116,10 @@ static void afs_hash_permits(struct afs_permits *permits)
* as the ACL *may* have changed.
*/
void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
- unsigned int cb_break)
+ unsigned int cb_break, struct afs_status_cb *scb)
{
struct afs_permits *permits, *xpermits, *replacement, *zap, *new = NULL;
- afs_access_t caller_access = READ_ONCE(vnode->status.caller_access);
+ afs_access_t caller_access = scb->status.caller_access;
size_t size = 0;
bool changed = false;
int i, j;
@@ -148,7 +146,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
}
if (afs_cb_is_broken(cb_break, vnode,
- vnode->cb_interest)) {
+ rcu_dereference(vnode->cb_interest))) {
changed = true;
break;
}
@@ -178,7 +176,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
}
}
- if (afs_cb_is_broken(cb_break, vnode, vnode->cb_interest))
+ if (afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest)))
goto someone_else_changed_it;
/* We need a ref on any permits list we want to copy as we'll have to
@@ -255,14 +253,16 @@ found:
kfree(new);
+ rcu_read_lock();
spin_lock(&vnode->lock);
zap = rcu_access_pointer(vnode->permit_cache);
- if (!afs_cb_is_broken(cb_break, vnode, vnode->cb_interest) &&
+ if (!afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest)) &&
zap == permits)
rcu_assign_pointer(vnode->permit_cache, replacement);
else
zap = replacement;
spin_unlock(&vnode->lock);
+ rcu_read_unlock();
afs_put_permits(zap);
out_put:
afs_put_permits(permits);
@@ -322,13 +322,12 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key,
*/
_debug("no valid permit");
- ret = afs_fetch_status(vnode, key, false);
+ ret = afs_fetch_status(vnode, key, false, _access);
if (ret < 0) {
*_access = 0;
_leave(" = %d", ret);
return ret;
}
- *_access = vnode->status.caller_access;
}
_leave(" = 0 [access %x]", *_access);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 65b33b6da48b..52c170b59cfd 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -521,8 +521,15 @@ static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct a
alist = afs_vl_lookup_addrs(fc->vnode->volume->cell, fc->key,
&server->uuid);
if (IS_ERR(alist)) {
- fc->ac.error = PTR_ERR(alist);
- _leave(" = f [%d]", fc->ac.error);
+ if ((PTR_ERR(alist) == -ERESTARTSYS ||
+ PTR_ERR(alist) == -EINTR) &&
+ !(fc->flags & AFS_FS_CURSOR_INTR) &&
+ server->addresses) {
+ _leave(" = t [intr]");
+ return true;
+ }
+ fc->error = PTR_ERR(alist);
+ _leave(" = f [%d]", fc->error);
return false;
}
@@ -574,7 +581,11 @@ retry:
ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING,
TASK_INTERRUPTIBLE);
if (ret == -ERESTARTSYS) {
- fc->ac.error = ret;
+ if (!(fc->flags & AFS_FS_CURSOR_INTR) && server->addresses) {
+ _leave(" = t [intr]");
+ return true;
+ }
+ fc->error = ret;
_leave(" = f [intr]");
return false;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 783c68cd1a35..f18911e8d770 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -426,7 +426,7 @@ static int afs_set_super(struct super_block *sb, struct fs_context *fc)
static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
{
struct afs_super_info *as = AFS_FS_S(sb);
- struct afs_fid fid;
+ struct afs_iget_data iget_data;
struct inode *inode = NULL;
int ret;
@@ -451,11 +451,13 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
} else {
sprintf(sb->s_id, "%llu", as->volume->vid);
afs_activate_volume(as->volume);
- fid.vid = as->volume->vid;
- fid.vnode = 1;
- fid.vnode_hi = 0;
- fid.unique = 1;
- inode = afs_iget(sb, ctx->key, &fid, NULL, NULL, NULL, NULL);
+ iget_data.fid.vid = as->volume->vid;
+ iget_data.fid.vnode = 1;
+ iget_data.fid.vnode_hi = 0;
+ iget_data.fid.unique = 1;
+ iget_data.cb_v_break = as->volume->cb_v_break;
+ iget_data.cb_s_break = 0;
+ inode = afs_iget(sb, ctx->key, &iget_data, NULL, NULL, NULL);
}
if (IS_ERR(inode))
@@ -677,13 +679,12 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
vnode->volume = NULL;
vnode->lock_key = NULL;
vnode->permit_cache = NULL;
- vnode->cb_interest = NULL;
+ RCU_INIT_POINTER(vnode->cb_interest, NULL);
#ifdef CONFIG_AFS_FSCACHE
vnode->cache = NULL;
#endif
vnode->flags = 1 << AFS_VNODE_UNSET;
- vnode->cb_type = 0;
vnode->lock_state = AFS_VNODE_LOCK_NONE;
init_rwsem(&vnode->rmdir_lock);
@@ -708,7 +709,7 @@ static void afs_destroy_inode(struct inode *inode)
_debug("DESTROY INODE %p", inode);
- ASSERTCMP(vnode->cb_interest, ==, NULL);
+ ASSERTCMP(rcu_access_pointer(vnode->cb_interest), ==, NULL);
atomic_dec(&afs_count_active_inodes);
}
@@ -741,7 +742,7 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
return PTR_ERR(key);
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
fc.flags |= AFS_FS_CURSOR_NO_VSLEEP;
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
@@ -749,7 +750,6 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
ret = afs_end_vnode_operation(&fc);
}
diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c
index b4f1a84519b9..61e25010ff33 100644
--- a/fs/afs/vl_list.c
+++ b/fs/afs/vl_list.c
@@ -232,18 +232,16 @@ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
if (bs.status > NR__dns_lookup_status)
bs.status = NR__dns_lookup_status;
+ /* See if we can update an old server record */
server = NULL;
- if (previous) {
- /* See if we can update an old server record */
- for (i = 0; i < previous->nr_servers; i++) {
- struct afs_vlserver *p = previous->servers[i].server;
-
- if (p->name_len == bs.name_len &&
- p->port == bs.port &&
- strncasecmp(b, p->name, bs.name_len) == 0) {
- server = afs_get_vlserver(p);
- break;
- }
+ for (i = 0; i < previous->nr_servers; i++) {
+ struct afs_vlserver *p = previous->servers[i].server;
+
+ if (p->name_len == bs.name_len &&
+ p->port == bs.port &&
+ strncasecmp(b, p->name, bs.name_len) == 0) {
+ server = afs_get_vlserver(p);
+ break;
}
}
diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
index b05e0de04f42..beb991563939 100644
--- a/fs/afs/vl_probe.c
+++ b/fs/afs/vl_probe.c
@@ -33,8 +33,8 @@ static bool afs_vl_probe_done(struct afs_vlserver *server)
void afs_vlserver_probe_result(struct afs_call *call)
{
struct afs_addr_list *alist = call->alist;
- struct afs_vlserver *server = call->reply[0];
- unsigned int server_index = (long)call->reply[1];
+ struct afs_vlserver *server = call->vlserver;
+ unsigned int server_index = call->server_index;
unsigned int index = call->addr_ix;
unsigned int rtt = UINT_MAX;
bool have_result = false;
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index 7adde83a0648..3f845489a9f0 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -43,11 +43,29 @@ bool afs_begin_vlserver_operation(struct afs_vl_cursor *vc, struct afs_cell *cel
static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
{
struct afs_cell *cell = vc->cell;
+ unsigned int dns_lookup_count;
+
+ if (cell->dns_source == DNS_RECORD_UNAVAILABLE ||
+ cell->dns_expiry <= ktime_get_real_seconds()) {
+ dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
+ set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
+ queue_work(afs_wq, &cell->manager);
+
+ if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
+ if (wait_var_event_interruptible(
+ &cell->dns_lookup_count,
+ smp_load_acquire(&cell->dns_lookup_count)
+ != dns_lookup_count) < 0) {
+ vc->error = -ERESTARTSYS;
+ return false;
+ }
+ }
- if (wait_on_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET,
- TASK_INTERRUPTIBLE)) {
- vc->error = -ERESTARTSYS;
- return false;
+ /* Status load is ordered after lookup counter load */
+ if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
+ vc->error = -EDESTADDRREQ;
+ return false;
+ }
}
read_lock(&cell->vl_servers_lock);
@@ -55,7 +73,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
rcu_dereference_protected(cell->vl_servers,
lockdep_is_held(&cell->vl_servers_lock)));
read_unlock(&cell->vl_servers_lock);
- if (!vc->server_list || !vc->server_list->nr_servers)
+ if (!vc->server_list->nr_servers)
return false;
vc->untried = (1UL << vc->server_list->nr_servers) - 1;
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index dd9ba4e96fb3..3d4b9836a2e2 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -34,7 +34,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
uvldb = call->buffer;
- entry = call->reply[0];
+ entry = call->ret_vldb;
nr_servers = ntohl(uvldb->nServers);
if (nr_servers > AFS_NMAXNSERVERS)
@@ -110,7 +110,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
static void afs_destroy_vl_get_entry_by_name_u(struct afs_call *call)
{
- kfree(call->reply[0]);
+ kfree(call->ret_vldb);
afs_flat_call_destructor(call);
}
@@ -155,8 +155,8 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *vc,
}
call->key = vc->key;
- call->reply[0] = entry;
- call->ret_reply0 = true;
+ call->ret_vldb = entry;
+ call->max_lifespan = AFS_VL_MAX_LIFESPAN;
/* Marshall the parameters */
bp = call->request;
@@ -214,7 +214,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
if (!alist)
return -ENOMEM;
alist->version = uniquifier;
- call->reply[0] = alist;
+ call->ret_alist = alist;
call->count = count;
call->count2 = nentries;
call->unmarshall++;
@@ -229,7 +229,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
if (ret < 0)
return ret;
- alist = call->reply[0];
+ alist = call->ret_alist;
bp = call->buffer;
count = min(call->count, 4U);
for (i = 0; i < count; i++)
@@ -249,8 +249,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
static void afs_vl_get_addrs_u_destructor(struct afs_call *call)
{
- afs_put_server(call->net, (struct afs_server *)call->reply[0]);
- kfree(call->reply[1]);
+ afs_put_addrlist(call->ret_alist);
return afs_flat_call_destructor(call);
}
@@ -287,8 +286,8 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc,
return ERR_PTR(-ENOMEM);
call->key = vc->key;
- call->reply[0] = NULL;
- call->ret_reply0 = true;
+ call->ret_alist = NULL;
+ call->max_lifespan = AFS_VL_MAX_LIFESPAN;
/* Marshall the parameters */
bp = call->request;
@@ -358,9 +357,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call)
static void afs_destroy_vl_get_capabilities(struct afs_call *call)
{
- struct afs_vlserver *server = call->reply[0];
-
- afs_put_vlserver(call->net, server);
+ afs_put_vlserver(call->net, call->vlserver);
afs_flat_call_destructor(call);
}
@@ -398,11 +395,11 @@ struct afs_call *afs_vl_get_capabilities(struct afs_net *net,
return ERR_PTR(-ENOMEM);
call->key = key;
- call->reply[0] = afs_get_vlserver(server);
- call->reply[1] = (void *)(long)server_index;
+ call->vlserver = afs_get_vlserver(server);
+ call->server_index = server_index;
call->upgrade = true;
- call->want_reply_time = true;
call->async = true;
+ call->max_lifespan = AFS_PROBE_MAX_LIFESPAN;
/* marshall the parameters */
bp = call->request;
@@ -460,7 +457,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
if (!alist)
return -ENOMEM;
alist->version = uniquifier;
- call->reply[0] = alist;
+ call->ret_alist = alist;
if (call->count == 0)
goto extract_volendpoints;
@@ -488,7 +485,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
if (ret < 0)
return ret;
- alist = call->reply[0];
+ alist = call->ret_alist;
bp = call->buffer;
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
@@ -609,7 +606,6 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
break;
}
- alist = call->reply[0];
_leave(" = 0 [done]");
return 0;
}
@@ -644,8 +640,8 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
return ERR_PTR(-ENOMEM);
call->key = vc->key;
- call->reply[0] = NULL;
- call->ret_reply0 = true;
+ call->ret_alist = NULL;
+ call->max_lifespan = AFS_VL_MAX_LIFESPAN;
/* Marshall the parameters */
bp = call->request;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 0122d7445fba..8bcab95f1127 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -314,6 +314,46 @@ static void afs_redirty_pages(struct writeback_control *wbc,
}
/*
+ * completion of write to server
+ */
+static void afs_pages_written_back(struct afs_vnode *vnode,
+ pgoff_t first, pgoff_t last)
+{
+ struct pagevec pv;
+ unsigned long priv;
+ unsigned count, loop;
+
+ _enter("{%llx:%llu},{%lx-%lx}",
+ vnode->fid.vid, vnode->fid.vnode, first, last);
+
+ pagevec_init(&pv);
+
+ do {
+ _debug("done %lx-%lx", first, last);
+
+ count = last - first + 1;
+ if (count > PAGEVEC_SIZE)
+ count = PAGEVEC_SIZE;
+ pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
+ first, count, pv.pages);
+ ASSERTCMP(pv.nr, ==, count);
+
+ for (loop = 0; loop < count; loop++) {
+ priv = page_private(pv.pages[loop]);
+ trace_afs_page_dirty(vnode, tracepoint_string("clear"),
+ pv.pages[loop]->index, priv);
+ set_page_private(pv.pages[loop], 0);
+ end_page_writeback(pv.pages[loop]);
+ }
+ first += count;
+ __pagevec_release(&pv);
+ } while (first <= last);
+
+ afs_prune_wb_keys(vnode);
+ _leave("");
+}
+
+/*
* write to a file
*/
static int afs_store_data(struct address_space *mapping,
@@ -322,6 +362,7 @@ static int afs_store_data(struct address_space *mapping,
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
struct afs_fs_cursor fc;
+ struct afs_status_cb *scb;
struct afs_wb_key *wbk = NULL;
struct list_head *p;
int ret = -ENOKEY, ret2;
@@ -333,6 +374,10 @@ static int afs_store_data(struct address_space *mapping,
vnode->fid.unique,
first, last, offset, to);
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
+ if (!scb)
+ return -ENOMEM;
+
spin_lock(&vnode->wb_lock);
p = vnode->wb_keys.next;
@@ -351,6 +396,7 @@ try_next_key:
spin_unlock(&vnode->wb_lock);
afs_put_wb_key(wbk);
+ kfree(scb);
_leave(" = %d [no keys]", ret);
return ret;
@@ -361,14 +407,19 @@ found_key:
_debug("USE WB KEY %u", key_serial(wbk->key));
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, wbk->key, false)) {
+ afs_dataversion_t data_version = vnode->status.data_version + 1;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_store_data(&fc, mapping, first, last, offset, to);
+ afs_fs_store_data(&fc, mapping, first, last, offset, to, scb);
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_check_for_remote_deletion(&fc, vnode);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break,
+ &data_version, scb);
+ if (fc.ac.error == 0)
+ afs_pages_written_back(vnode, first, last);
ret = afs_end_vnode_operation(&fc);
}
@@ -393,6 +444,7 @@ found_key:
}
afs_put_wb_key(wbk);
+ kfree(scb);
_leave(" = %d", ret);
return ret;
}
@@ -679,46 +731,6 @@ int afs_writepages(struct address_space *mapping,
}
/*
- * completion of write to server
- */
-void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
-{
- struct pagevec pv;
- unsigned long priv;
- unsigned count, loop;
- pgoff_t first = call->first, last = call->last;
-
- _enter("{%llx:%llu},{%lx-%lx}",
- vnode->fid.vid, vnode->fid.vnode, first, last);
-
- pagevec_init(&pv);
-
- do {
- _debug("done %lx-%lx", first, last);
-
- count = last - first + 1;
- if (count > PAGEVEC_SIZE)
- count = PAGEVEC_SIZE;
- pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
- first, count, pv.pages);
- ASSERTCMP(pv.nr, ==, count);
-
- for (loop = 0; loop < count; loop++) {
- priv = page_private(pv.pages[loop]);
- trace_afs_page_dirty(vnode, tracepoint_string("clear"),
- pv.pages[loop]->index, priv);
- set_page_private(pv.pages[loop], 0);
- end_page_writeback(pv.pages[loop]);
- }
- first += count;
- __pagevec_release(&pv);
- } while (first <= last);
-
- afs_prune_wb_keys(vnode);
- _leave("");
-}
-
-/*
* write to an AFS file
*/
ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index c81f85003fc7..17f58fea7ec1 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -47,40 +47,52 @@ static int afs_xattr_get_acl(const struct xattr_handler *handler,
void *buffer, size_t size)
{
struct afs_fs_cursor fc;
+ struct afs_status_cb *scb;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_acl *acl = NULL;
struct key *key;
- int ret;
+ int ret = -ENOMEM;
+
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
+ if (!scb)
+ goto error;
key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key))
- return PTR_ERR(key);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error_scb;
+ }
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
+ afs_dataversion_t data_version = vnode->status.data_version;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- acl = afs_fs_fetch_acl(&fc);
+ acl = afs_fs_fetch_acl(&fc, scb);
}
afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break,
+ &data_version, scb);
ret = afs_end_vnode_operation(&fc);
}
if (ret == 0) {
ret = acl->size;
if (size > 0) {
- ret = -ERANGE;
- if (acl->size > size)
- return -ERANGE;
- memcpy(buffer, acl->data, acl->size);
- ret = acl->size;
+ if (acl->size <= size)
+ memcpy(buffer, acl->data, acl->size);
+ else
+ ret = -ERANGE;
}
kfree(acl);
}
key_put(key);
+error_scb:
+ kfree(scb);
+error:
return ret;
}
@@ -93,41 +105,53 @@ static int afs_xattr_set_acl(const struct xattr_handler *handler,
const void *buffer, size_t size, int flags)
{
struct afs_fs_cursor fc;
+ struct afs_status_cb *scb;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_acl *acl = NULL;
struct key *key;
- int ret;
+ int ret = -ENOMEM;
if (flags == XATTR_CREATE)
return -EINVAL;
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key))
- return PTR_ERR(key);
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
+ if (!scb)
+ goto error;
acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL);
- if (!acl) {
- key_put(key);
- return -ENOMEM;
+ if (!acl)
+ goto error_scb;
+
+ key = afs_request_key(vnode->volume->cell);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error_acl;
}
acl->size = size;
memcpy(acl->data, buffer, size);
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
+ afs_dataversion_t data_version = vnode->status.data_version;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_store_acl(&fc, acl);
+ afs_fs_store_acl(&fc, acl, scb);
}
afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break,
+ &data_version, scb);
ret = afs_end_vnode_operation(&fc);
}
- kfree(acl);
key_put(key);
+error_acl:
+ kfree(acl);
+error_scb:
+ kfree(scb);
+error:
return ret;
}
@@ -146,12 +170,12 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
void *buffer, size_t size)
{
struct afs_fs_cursor fc;
+ struct afs_status_cb *scb;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct yfs_acl *yacl = NULL;
struct key *key;
- unsigned int flags = 0;
char buf[16], *data;
- int which = 0, dsize, ret;
+ int which = 0, dsize, ret = -ENOMEM;
if (strcmp(name, "acl") == 0)
which = 0;
@@ -164,65 +188,81 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
else
return -EOPNOTSUPP;
+ yacl = kzalloc(sizeof(struct yfs_acl), GFP_KERNEL);
+ if (!yacl)
+ goto error;
+
if (which == 0)
- flags |= YFS_ACL_WANT_ACL;
+ yacl->flags |= YFS_ACL_WANT_ACL;
else if (which == 3)
- flags |= YFS_ACL_WANT_VOL_ACL;
+ yacl->flags |= YFS_ACL_WANT_VOL_ACL;
+
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
+ if (!scb)
+ goto error_yacl;
key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key))
- return PTR_ERR(key);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error_scb;
+ }
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
+ afs_dataversion_t data_version = vnode->status.data_version;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- yacl = yfs_fs_fetch_opaque_acl(&fc, flags);
+ yfs_fs_fetch_opaque_acl(&fc, yacl, scb);
}
afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break,
+ &data_version, scb);
ret = afs_end_vnode_operation(&fc);
}
- if (ret == 0) {
- switch (which) {
- case 0:
- data = yacl->acl->data;
- dsize = yacl->acl->size;
- break;
- case 1:
- data = buf;
- dsize = snprintf(buf, sizeof(buf), "%u",
- yacl->inherit_flag);
- break;
- case 2:
- data = buf;
- dsize = snprintf(buf, sizeof(buf), "%u",
- yacl->num_cleaned);
- break;
- case 3:
- data = yacl->vol_acl->data;
- dsize = yacl->vol_acl->size;
- break;
- default:
- ret = -EOPNOTSUPP;
- goto out;
- }
+ if (ret < 0)
+ goto error_key;
+
+ switch (which) {
+ case 0:
+ data = yacl->acl->data;
+ dsize = yacl->acl->size;
+ break;
+ case 1:
+ data = buf;
+ dsize = snprintf(buf, sizeof(buf), "%u", yacl->inherit_flag);
+ break;
+ case 2:
+ data = buf;
+ dsize = snprintf(buf, sizeof(buf), "%u", yacl->num_cleaned);
+ break;
+ case 3:
+ data = yacl->vol_acl->data;
+ dsize = yacl->vol_acl->size;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto error_key;
+ }
- ret = dsize;
- if (size > 0) {
- if (dsize > size) {
- ret = -ERANGE;
- goto out;
- }
- memcpy(buffer, data, dsize);
+ ret = dsize;
+ if (size > 0) {
+ if (dsize > size) {
+ ret = -ERANGE;
+ goto error_key;
}
+ memcpy(buffer, data, dsize);
}
-out:
- yfs_free_opaque_acl(yacl);
+error_key:
key_put(key);
+error_scb:
+ kfree(scb);
+error_yacl:
+ yfs_free_opaque_acl(yacl);
+error:
return ret;
}
@@ -235,42 +275,54 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
const void *buffer, size_t size, int flags)
{
struct afs_fs_cursor fc;
+ struct afs_status_cb *scb;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_acl *acl = NULL;
struct key *key;
- int ret;
+ int ret = -ENOMEM;
if (flags == XATTR_CREATE ||
strcmp(name, "acl") != 0)
return -EINVAL;
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key))
- return PTR_ERR(key);
+ scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
+ if (!scb)
+ goto error;
acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL);
- if (!acl) {
- key_put(key);
- return -ENOMEM;
- }
+ if (!acl)
+ goto error_scb;
acl->size = size;
memcpy(acl->data, buffer, size);
+ key = afs_request_key(vnode->volume->cell);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error_acl;
+ }
+
ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
+ if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
+ afs_dataversion_t data_version = vnode->status.data_version;
+
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- yfs_fs_store_opaque_acl2(&fc, acl);
+ yfs_fs_store_opaque_acl2(&fc, acl, scb);
}
afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break,
+ &data_version, scb);
ret = afs_end_vnode_operation(&fc);
}
+error_acl:
kfree(acl);
key_put(key);
+error_scb:
+ kfree(scb);
+error:
return ret;
}
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 6cf7d161baa1..10de675dc6fc 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -183,24 +183,19 @@ static void xdr_dump_bad(const __be32 *bp)
/*
* Decode a YFSFetchStatus block
*/
-static int xdr_decode_YFSFetchStatus(struct afs_call *call,
- const __be32 **_bp,
- struct afs_file_status *status,
- struct afs_vnode *vnode,
- const afs_dataversion_t *expected_version,
- struct afs_read *read_req)
+static int xdr_decode_YFSFetchStatus(const __be32 **_bp,
+ struct afs_call *call,
+ struct afs_status_cb *scb)
{
const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp;
+ struct afs_file_status *status = &scb->status;
u32 type;
- u8 flags = 0;
status->abort_code = ntohl(xdr->abort_code);
if (status->abort_code != 0) {
- if (vnode && status->abort_code == VNOVNODE) {
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ if (status->abort_code == VNOVNODE)
status->nlink = 0;
- __afs_break_callback(vnode);
- }
+ scb->have_error = true;
return 0;
}
@@ -209,77 +204,28 @@ static int xdr_decode_YFSFetchStatus(struct afs_call *call,
case AFS_FTYPE_FILE:
case AFS_FTYPE_DIR:
case AFS_FTYPE_SYMLINK:
- if (type != status->type &&
- vnode &&
- !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
- pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n",
- vnode->fid.vid,
- vnode->fid.vnode,
- vnode->fid.unique,
- status->type, type);
- goto bad;
- }
status->type = type;
break;
default:
goto bad;
}
-#define EXTRACT_M4(FIELD) \
- do { \
- u32 x = ntohl(xdr->FIELD); \
- if (status->FIELD != x) { \
- flags |= AFS_VNODE_META_CHANGED; \
- status->FIELD = x; \
- } \
- } while (0)
-
-#define EXTRACT_M8(FIELD) \
- do { \
- u64 x = xdr_to_u64(xdr->FIELD); \
- if (status->FIELD != x) { \
- flags |= AFS_VNODE_META_CHANGED; \
- status->FIELD = x; \
- } \
- } while (0)
-
-#define EXTRACT_D8(FIELD) \
- do { \
- u64 x = xdr_to_u64(xdr->FIELD); \
- if (status->FIELD != x) { \
- flags |= AFS_VNODE_DATA_CHANGED; \
- status->FIELD = x; \
- } \
- } while (0)
-
- EXTRACT_M4(nlink);
- EXTRACT_D8(size);
- EXTRACT_D8(data_version);
- EXTRACT_M8(author);
- EXTRACT_M8(owner);
- EXTRACT_M8(group);
- EXTRACT_M4(mode);
- EXTRACT_M4(caller_access); /* call ticket dependent */
- EXTRACT_M4(anon_access);
-
- status->mtime_client = xdr_to_time(xdr->mtime_client);
- status->mtime_server = xdr_to_time(xdr->mtime_server);
- status->lock_count = ntohl(xdr->lock_count);
-
- if (read_req) {
- read_req->data_version = status->data_version;
- read_req->file_size = status->size;
- }
+ status->nlink = ntohl(xdr->nlink);
+ status->author = xdr_to_u64(xdr->author);
+ status->owner = xdr_to_u64(xdr->owner);
+ status->caller_access = ntohl(xdr->caller_access); /* Ticket dependent */
+ status->anon_access = ntohl(xdr->anon_access);
+ status->mode = ntohl(xdr->mode) & S_IALLUGO;
+ status->group = xdr_to_u64(xdr->group);
+ status->lock_count = ntohl(xdr->lock_count);
+
+ status->mtime_client = xdr_to_time(xdr->mtime_client);
+ status->mtime_server = xdr_to_time(xdr->mtime_server);
+ status->size = xdr_to_u64(xdr->size);
+ status->data_version = xdr_to_u64(xdr->data_version);
+ scb->have_status = true;
*_bp += xdr_size(xdr);
-
- if (vnode) {
- if (test_bit(AFS_VNODE_UNSET, &vnode->flags))
- flags |= AFS_VNODE_NOT_YET_SET;
- afs_update_inode_from_status(vnode, status, expected_version,
- flags);
- }
-
return 0;
bad:
@@ -288,73 +234,20 @@ bad:
}
/*
- * Decode the file status. We need to lock the target vnode if we're going to
- * update its status so that stat() sees the attributes update atomically.
- */
-static int yfs_decode_status(struct afs_call *call,
- const __be32 **_bp,
- struct afs_file_status *status,
- struct afs_vnode *vnode,
- const afs_dataversion_t *expected_version,
- struct afs_read *read_req)
-{
- int ret;
-
- if (!vnode)
- return xdr_decode_YFSFetchStatus(call, _bp, status, vnode,
- expected_version, read_req);
-
- write_seqlock(&vnode->cb_lock);
- ret = xdr_decode_YFSFetchStatus(call, _bp, status, vnode,
- expected_version, read_req);
- write_sequnlock(&vnode->cb_lock);
- return ret;
-}
-
-/*
* Decode a YFSCallBack block
*/
-static void xdr_decode_YFSCallBack(struct afs_call *call,
- struct afs_vnode *vnode,
- const __be32 **_bp)
-{
- struct yfs_xdr_YFSCallBack *xdr = (void *)*_bp;
- struct afs_cb_interest *old, *cbi = call->cbi;
- u64 cb_expiry;
-
- write_seqlock(&vnode->cb_lock);
-
- if (!afs_cb_is_broken(call->cb_break, vnode, cbi)) {
- cb_expiry = xdr_to_u64(xdr->expiration_time);
- do_div(cb_expiry, 10 * 1000 * 1000);
- vnode->cb_version = ntohl(xdr->version);
- vnode->cb_type = ntohl(xdr->type);
- vnode->cb_expires_at = cb_expiry + ktime_get_real_seconds();
- old = vnode->cb_interest;
- if (old != call->cbi) {
- vnode->cb_interest = cbi;
- cbi = old;
- }
- set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
- }
-
- write_sequnlock(&vnode->cb_lock);
- call->cbi = cbi;
- *_bp += xdr_size(xdr);
-}
-
-static void xdr_decode_YFSCallBack_raw(const __be32 **_bp,
- struct afs_callback *cb)
+static void xdr_decode_YFSCallBack(const __be32 **_bp,
+ struct afs_call *call,
+ struct afs_status_cb *scb)
{
struct yfs_xdr_YFSCallBack *x = (void *)*_bp;
- u64 cb_expiry;
-
- cb_expiry = xdr_to_u64(x->expiration_time);
- do_div(cb_expiry, 10 * 1000 * 1000);
- cb->version = ntohl(x->version);
- cb->type = ntohl(x->type);
- cb->expires_at = cb_expiry + ktime_get_real_seconds();
+ struct afs_callback *cb = &scb->callback;
+ ktime_t cb_expiry;
+ cb_expiry = call->reply_time;
+ cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100);
+ cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC);
+ scb->have_cb = true;
*_bp += xdr_size(x);
}
@@ -442,11 +335,10 @@ static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp,
}
/*
- * deliver reply data to an FS.FetchStatus
+ * Deliver a reply that's a status, callback and volsync.
*/
-static int yfs_deliver_fs_fetch_status_vnode(struct afs_call *call)
+static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
@@ -454,16 +346,36 @@ static int yfs_deliver_fs_fetch_status_vnode(struct afs_call *call)
if (ret < 0)
return ret;
- _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
-
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- xdr_decode_YFSCallBack(call, vnode, &bp);
- xdr_decode_YFSVolSync(&bp, call->reply[1]);
+ xdr_decode_YFSCallBack(&bp, call, call->out_scb);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * Deliver reply data to operations that just return a file status and a volume
+ * sync record.
+ */
+static int yfs_deliver_status_and_volsync(struct afs_call *call)
+{
+ const __be32 *bp;
+ int ret;
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -475,15 +387,15 @@ static int yfs_deliver_fs_fetch_status_vnode(struct afs_call *call)
static const struct afs_call_type yfs_RXYFSFetchStatus_vnode = {
.name = "YFS.FetchStatus(vnode)",
.op = yfs_FS_FetchStatus,
- .deliver = yfs_deliver_fs_fetch_status_vnode,
+ .deliver = yfs_deliver_fs_status_cb_and_volsync,
.destructor = afs_flat_call_destructor,
};
/*
* Fetch the status information for a file.
*/
-int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsync,
- bool new_inode)
+int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
+ struct afs_volsync *volsync)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -505,9 +417,8 @@ int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
}
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = volsync;
- call->expected_version = new_inode ? 1 : vnode->status.data_version;
+ call->out_scb = scb;
+ call->out_volsync = volsync;
/* marshall the parameters */
bp = call->request;
@@ -516,9 +427,9 @@ int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
bp = xdr_encode_YFSFid(bp, &vnode->fid);
yfs_check_req(call, bp);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -528,8 +439,7 @@ int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
*/
static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
- struct afs_read *req = call->reply[2];
+ struct afs_read *req = call->read_request;
const __be32 *bp;
unsigned int size;
int ret;
@@ -586,7 +496,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
if (req->offset == PAGE_SIZE) {
req->offset = 0;
if (req->page_done)
- req->page_done(call, req);
+ req->page_done(req);
req->index++;
if (req->remain > 0)
goto begin_page;
@@ -623,12 +533,14 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
- &vnode->status.data_version, req);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- xdr_decode_YFSCallBack(call, vnode, &bp);
- xdr_decode_YFSVolSync(&bp, call->reply[1]);
+ xdr_decode_YFSCallBack(&bp, call, call->out_scb);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
+
+ req->data_version = call->out_scb->status.data_version;
+ req->file_size = call->out_scb->status.size;
call->unmarshall++;
@@ -642,7 +554,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
zero_user_segment(req->pages[req->index],
req->offset, PAGE_SIZE);
if (req->page_done)
- req->page_done(call, req);
+ req->page_done(req);
req->offset = 0;
}
@@ -652,9 +564,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
static void yfs_fetch_data_destructor(struct afs_call *call)
{
- struct afs_read *req = call->reply[2];
-
- afs_put_read(req);
+ afs_put_read(call->read_request);
afs_flat_call_destructor(call);
}
@@ -671,7 +581,8 @@ static const struct afs_call_type yfs_RXYFSFetchData64 = {
/*
* Fetch data from a file.
*/
-int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
+int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
+ struct afs_read *req)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -693,11 +604,9 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = NULL; /* volsync */
- call->reply[2] = req;
- call->expected_version = vnode->status.data_version;
- call->want_reply_time = true;
+ call->out_scb = scb;
+ call->out_volsync = NULL;
+ call->read_request = req;
/* marshall the parameters */
bp = call->request;
@@ -709,9 +618,9 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
yfs_check_req(call, bp);
refcount_inc(&req->usage);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -721,7 +630,6 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
*/
static int yfs_deliver_fs_create_vnode(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
@@ -733,16 +641,15 @@ static int yfs_deliver_fs_create_vnode(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_YFSFid(&bp, call->reply[1]);
- ret = yfs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+ xdr_decode_YFSFid(&bp, call->out_fid);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- xdr_decode_YFSCallBack_raw(&bp, call->reply[3]);
- xdr_decode_YFSVolSync(&bp, NULL);
+ xdr_decode_YFSCallBack(&bp, call, call->out_scb);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -764,14 +671,13 @@ static const struct afs_call_type afs_RXFSCreateFile = {
int yfs_fs_create_file(struct afs_fs_cursor *fc,
const char *name,
umode_t mode,
- u64 current_data_version,
+ struct afs_status_cb *dvnode_scb,
struct afs_fid *newfid,
- struct afs_file_status *newstatus,
- struct afs_callback *newcb)
+ struct afs_status_cb *new_scb)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, rplsz;
__be32 *bp;
@@ -795,24 +701,23 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = newfid;
- call->reply[2] = newstatus;
- call->reply[3] = newcb;
- call->expected_version = current_data_version + 1;
+ call->out_dir_scb = dvnode_scb;
+ call->out_fid = newfid;
+ call->out_scb = new_scb;
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSCREATEFILE);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &dvnode->fid);
bp = xdr_encode_string(bp, name, namesz);
bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
yfs_check_req(call, bp);
afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &vnode->fid, name);
+ trace_afs_make_fs_call1(call, &dvnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -830,14 +735,13 @@ static const struct afs_call_type yfs_RXFSMakeDir = {
int yfs_fs_make_dir(struct afs_fs_cursor *fc,
const char *name,
umode_t mode,
- u64 current_data_version,
+ struct afs_status_cb *dvnode_scb,
struct afs_fid *newfid,
- struct afs_file_status *newstatus,
- struct afs_callback *newcb)
+ struct afs_status_cb *new_scb)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, rplsz;
__be32 *bp;
@@ -860,23 +764,22 @@ int yfs_fs_make_dir(struct afs_fs_cursor *fc,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = newfid;
- call->reply[2] = newstatus;
- call->reply[3] = newcb;
- call->expected_version = current_data_version + 1;
+ call->out_dir_scb = dvnode_scb;
+ call->out_fid = newfid;
+ call->out_scb = new_scb;
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSMAKEDIR);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &dvnode->fid);
bp = xdr_encode_string(bp, name, namesz);
bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
yfs_check_req(call, bp);
afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &vnode->fid, name);
+ trace_afs_make_fs_call1(call, &dvnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -886,8 +789,6 @@ int yfs_fs_make_dir(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_remove_file2(struct afs_call *call)
{
- struct afs_vnode *dvnode = call->reply[0];
- struct afs_vnode *vnode = call->reply[1];
struct afs_fid fid;
const __be32 *bp;
int ret;
@@ -898,20 +799,18 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call)
if (ret < 0)
return ret;
- /* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
xdr_decode_YFSFid(&bp, &fid);
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
/* Was deleted if vnode->status.abort_code == VNOVNODE. */
- xdr_decode_YFSVolSync(&bp, NULL);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
return 0;
}
@@ -929,7 +828,8 @@ static const struct afs_call_type yfs_RXYFSRemoveFile2 = {
* Remove a file and retrieve new file status.
*/
int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, u64 current_data_version)
+ const char *name, struct afs_status_cb *dvnode_scb,
+ struct afs_status_cb *vnode_scb)
{
struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
@@ -954,9 +854,8 @@ int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = dvnode;
- call->reply[1] = vnode;
- call->expected_version = current_data_version + 1;
+ call->out_dir_scb = dvnode_scb;
+ call->out_scb = vnode_scb;
/* marshall the parameters */
bp = call->request;
@@ -968,6 +867,7 @@ int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call1(call, &dvnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -977,7 +877,6 @@ int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int yfs_deliver_fs_remove(struct afs_call *call)
{
- struct afs_vnode *dvnode = call->reply[0];
const __be32 *bp;
int ret;
@@ -987,14 +886,12 @@ static int yfs_deliver_fs_remove(struct afs_call *call)
if (ret < 0)
return ret;
- /* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- xdr_decode_YFSVolSync(&bp, NULL);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
return 0;
}
@@ -1019,7 +916,8 @@ static const struct afs_call_type yfs_RXYFSRemoveDir = {
* remove a file or directory
*/
int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, bool isdir, u64 current_data_version)
+ const char *name, bool isdir,
+ struct afs_status_cb *dvnode_scb)
{
struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
@@ -1042,9 +940,7 @@ int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = dvnode;
- call->reply[1] = vnode;
- call->expected_version = current_data_version + 1;
+ call->out_dir_scb = dvnode_scb;
/* marshall the parameters */
bp = call->request;
@@ -1056,6 +952,7 @@ int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call1(call, &dvnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1065,7 +962,6 @@ int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int yfs_deliver_fs_link(struct afs_call *call)
{
- struct afs_vnode *dvnode = call->reply[0], *vnode = call->reply[1];
const __be32 *bp;
int ret;
@@ -1075,16 +971,14 @@ static int yfs_deliver_fs_link(struct afs_call *call)
if (ret < 0)
return ret;
- /* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- xdr_decode_YFSVolSync(&bp, NULL);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
}
@@ -1103,7 +997,9 @@ static const struct afs_call_type yfs_RXYFSLink = {
* Make a hard link.
*/
int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, u64 current_data_version)
+ const char *name,
+ struct afs_status_cb *dvnode_scb,
+ struct afs_status_cb *vnode_scb)
{
struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
@@ -1127,9 +1023,8 @@ int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = dvnode;
- call->reply[1] = vnode;
- call->expected_version = current_data_version + 1;
+ call->out_dir_scb = dvnode_scb;
+ call->out_scb = vnode_scb;
/* marshall the parameters */
bp = call->request;
@@ -1142,6 +1037,7 @@ int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call1(call, &vnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1151,7 +1047,6 @@ int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int yfs_deliver_fs_symlink(struct afs_call *call)
{
- struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
@@ -1163,15 +1058,14 @@ static int yfs_deliver_fs_symlink(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_YFSFid(&bp, call->reply[1]);
- ret = yfs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+ xdr_decode_YFSFid(&bp, call->out_fid);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- xdr_decode_YFSVolSync(&bp, NULL);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
@@ -1193,9 +1087,9 @@ static const struct afs_call_type yfs_RXYFSSymlink = {
int yfs_fs_symlink(struct afs_fs_cursor *fc,
const char *name,
const char *contents,
- u64 current_data_version,
+ struct afs_status_cb *dvnode_scb,
struct afs_fid *newfid,
- struct afs_file_status *newstatus)
+ struct afs_status_cb *vnode_scb)
{
struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
@@ -1222,10 +1116,9 @@ int yfs_fs_symlink(struct afs_fs_cursor *fc,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = dvnode;
- call->reply[1] = newfid;
- call->reply[2] = newstatus;
- call->expected_version = current_data_version + 1;
+ call->out_dir_scb = dvnode_scb;
+ call->out_fid = newfid;
+ call->out_scb = vnode_scb;
/* marshall the parameters */
bp = call->request;
@@ -1239,6 +1132,7 @@ int yfs_fs_symlink(struct afs_fs_cursor *fc,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call1(call, &dvnode->fid, name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1248,8 +1142,6 @@ int yfs_fs_symlink(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_rename(struct afs_call *call)
{
- struct afs_vnode *orig_dvnode = call->reply[0];
- struct afs_vnode *new_dvnode = call->reply[1];
const __be32 *bp;
int ret;
@@ -1259,20 +1151,17 @@ static int yfs_deliver_fs_rename(struct afs_call *call)
if (ret < 0)
return ret;
- /* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = yfs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
if (ret < 0)
return ret;
- if (new_dvnode != orig_dvnode) {
- ret = yfs_decode_status(call, &bp, &new_dvnode->status, new_dvnode,
- &call->expected_version_2, NULL);
+ if (call->out_dir_scb != call->out_scb) {
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
}
- xdr_decode_YFSVolSync(&bp, NULL);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
_leave(" = 0 [done]");
return 0;
}
@@ -1294,8 +1183,8 @@ int yfs_fs_rename(struct afs_fs_cursor *fc,
const char *orig_name,
struct afs_vnode *new_dvnode,
const char *new_name,
- u64 current_orig_data_version,
- u64 current_new_data_version)
+ struct afs_status_cb *orig_dvnode_scb,
+ struct afs_status_cb *new_dvnode_scb)
{
struct afs_vnode *orig_dvnode = fc->vnode;
struct afs_call *call;
@@ -1321,10 +1210,8 @@ int yfs_fs_rename(struct afs_fs_cursor *fc,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = orig_dvnode;
- call->reply[1] = new_dvnode;
- call->expected_version = current_orig_data_version + 1;
- call->expected_version_2 = current_new_data_version + 1;
+ call->out_dir_scb = orig_dvnode_scb;
+ call->out_scb = new_dvnode_scb;
/* marshall the parameters */
bp = call->request;
@@ -1338,46 +1225,18 @@ int yfs_fs_rename(struct afs_fs_cursor *fc,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
/*
- * Deliver reply data to a YFS.StoreData64 operation.
- */
-static int yfs_deliver_fs_store_data(struct afs_call *call)
-{
- struct afs_vnode *vnode = call->reply[0];
- const __be32 *bp;
- int ret;
-
- _enter("");
-
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- /* unmarshall the reply once we've received all of it */
- bp = call->buffer;
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, NULL);
-
- afs_pages_written_back(vnode, call);
-
- _leave(" = 0 [done]");
- return 0;
-}
-
-/*
* YFS.StoreData64 operation type.
*/
static const struct afs_call_type yfs_RXYFSStoreData64 = {
.name = "YFS.StoreData64",
.op = yfs_FS_StoreData64,
- .deliver = yfs_deliver_fs_store_data,
+ .deliver = yfs_deliver_status_and_volsync,
.destructor = afs_flat_call_destructor,
};
@@ -1386,7 +1245,8 @@ static const struct afs_call_type yfs_RXYFSStoreData64 = {
*/
int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to)
+ unsigned offset, unsigned to,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1424,13 +1284,12 @@ int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
call->key = fc->key;
call->mapping = mapping;
- call->reply[0] = vnode;
call->first = first;
call->last = last;
call->first_offset = offset;
call->last_to = to;
call->send_pages = true;
- call->expected_version = vnode->status.data_version + 1;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1445,51 +1304,25 @@ int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
/*
- * deliver reply data to an FS.StoreStatus
- */
-static int yfs_deliver_fs_store_status(struct afs_call *call)
-{
- struct afs_vnode *vnode = call->reply[0];
- const __be32 *bp;
- int ret;
-
- _enter("");
-
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- /* unmarshall the reply once we've received all of it */
- bp = call->buffer;
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, NULL);
-
- _leave(" = 0 [done]");
- return 0;
-}
-
-/*
* YFS.StoreStatus operation type
*/
static const struct afs_call_type yfs_RXYFSStoreStatus = {
.name = "YFS.StoreStatus",
.op = yfs_FS_StoreStatus,
- .deliver = yfs_deliver_fs_store_status,
+ .deliver = yfs_deliver_status_and_volsync,
.destructor = afs_flat_call_destructor,
};
static const struct afs_call_type yfs_RXYFSStoreData64_as_Status = {
.name = "YFS.StoreData64",
.op = yfs_FS_StoreData64,
- .deliver = yfs_deliver_fs_store_status,
+ .deliver = yfs_deliver_status_and_volsync,
.destructor = afs_flat_call_destructor,
};
@@ -1497,7 +1330,8 @@ static const struct afs_call_type yfs_RXYFSStoreData64_as_Status = {
* Set the attributes on a file, using YFS.StoreData64 rather than
* YFS.StoreStatus so as to alter the file size also.
*/
-static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
+static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1518,8 +1352,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->expected_version = vnode->status.data_version + 1;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1534,6 +1367,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1542,7 +1376,8 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
* Set the attributes on a file, using YFS.StoreData64 if there's a change in
* file size, and YFS.StoreStatus otherwise.
*/
-int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
+int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1550,7 +1385,7 @@ int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
__be32 *bp;
if (attr->ia_valid & ATTR_SIZE)
- return yfs_fs_setattr_size(fc, attr);
+ return yfs_fs_setattr_size(fc, attr, scb);
_enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
@@ -1565,8 +1400,7 @@ int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->expected_version = vnode->status.data_version;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1578,6 +1412,7 @@ int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1607,7 +1442,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_YFSFetchVolumeStatus(&bp, call->reply[1]);
+ xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus);
call->unmarshall++;
afs_extract_to_tmp(call);
@@ -1623,7 +1458,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
return afs_protocol_error(call, -EBADMSG,
afs_eproto_volname_len);
size = (call->count + 3) & ~3; /* It's padded */
- afs_extract_begin(call, call->reply[2], size);
+ afs_extract_to_buf(call, size);
call->unmarshall++;
/* Fall through - and extract the volume name */
@@ -1633,7 +1468,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
if (ret < 0)
return ret;
- p = call->reply[2];
+ p = call->buffer;
p[call->count] = 0;
_debug("volname '%s'", p);
afs_extract_to_tmp(call);
@@ -1651,7 +1486,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
return afs_protocol_error(call, -EBADMSG,
afs_eproto_offline_msg_len);
size = (call->count + 3) & ~3; /* It's padded */
- afs_extract_begin(call, call->reply[2], size);
+ afs_extract_to_buf(call, size);
call->unmarshall++;
/* Fall through - and extract the offline message */
@@ -1661,7 +1496,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
if (ret < 0)
return ret;
- p = call->reply[2];
+ p = call->buffer;
p[call->count] = 0;
_debug("offline '%s'", p);
@@ -1680,7 +1515,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
return afs_protocol_error(call, -EBADMSG,
afs_eproto_motd_len);
size = (call->count + 3) & ~3; /* It's padded */
- afs_extract_begin(call, call->reply[2], size);
+ afs_extract_to_buf(call, size);
call->unmarshall++;
/* Fall through - and extract the message of the day */
@@ -1690,7 +1525,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
if (ret < 0)
return ret;
- p = call->reply[2];
+ p = call->buffer;
p[call->count] = 0;
_debug("motd '%s'", p);
@@ -1706,23 +1541,13 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
}
/*
- * Destroy a YFS.GetVolumeStatus call.
- */
-static void yfs_get_volume_status_call_destructor(struct afs_call *call)
-{
- kfree(call->reply[2]);
- call->reply[2] = NULL;
- afs_flat_call_destructor(call);
-}
-
-/*
* YFS.GetVolumeStatus operation type
*/
static const struct afs_call_type yfs_RXYFSGetVolumeStatus = {
.name = "YFS.GetVolumeStatus",
.op = yfs_FS_GetVolumeStatus,
.deliver = yfs_deliver_fs_get_volume_status,
- .destructor = yfs_get_volume_status_call_destructor,
+ .destructor = afs_flat_call_destructor,
};
/*
@@ -1735,28 +1560,21 @@ int yfs_fs_get_volume_status(struct afs_fs_cursor *fc,
struct afs_call *call;
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- void *tmpbuf;
_enter("");
- tmpbuf = kmalloc(AFSOPAQUEMAX, GFP_KERNEL);
- if (!tmpbuf)
- return -ENOMEM;
-
call = afs_alloc_flat_call(net, &yfs_RXYFSGetVolumeStatus,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_u64),
- sizeof(struct yfs_xdr_YFSFetchVolumeStatus) +
- sizeof(__be32));
- if (!call) {
- kfree(tmpbuf);
+ max_t(size_t,
+ sizeof(struct yfs_xdr_YFSFetchVolumeStatus) +
+ sizeof(__be32),
+ AFSOPAQUEMAX + 1));
+ if (!call)
return -ENOMEM;
- }
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[1] = vs;
- call->reply[2] = tmpbuf;
+ call->out_volstatus = vs;
/* marshall the parameters */
bp = call->request;
@@ -1767,39 +1585,12 @@ int yfs_fs_get_volume_status(struct afs_fs_cursor *fc,
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
/*
- * Deliver reply data to operations that just return a file status and a volume
- * sync record.
- */
-static int yfs_deliver_status_and_volsync(struct afs_call *call)
-{
- struct afs_vnode *vnode = call->reply[0];
- const __be32 *bp;
- int ret;
-
- _enter("{%u}", call->unmarshall);
-
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- /* unmarshall the reply once we've received all of it */
- bp = call->buffer;
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, NULL);
-
- _leave(" = 0 [done]");
- return 0;
-}
-
-/*
* YFS.SetLock operation type
*/
static const struct afs_call_type yfs_RXYFSSetLock = {
@@ -1834,7 +1625,8 @@ static const struct afs_call_type yfs_RXYFSReleaseLock = {
/*
* Set a lock on a file
*/
-int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
+int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1853,8 +1645,8 @@ int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->want_reply_time = true;
+ call->lvnode = vnode;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1866,6 +1658,7 @@ int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_calli(call, &vnode->fid, type);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1873,7 +1666,7 @@ int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
/*
* extend a lock on a file
*/
-int yfs_fs_extend_lock(struct afs_fs_cursor *fc)
+int yfs_fs_extend_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1891,8 +1684,8 @@ int yfs_fs_extend_lock(struct afs_fs_cursor *fc)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
- call->want_reply_time = true;
+ call->lvnode = vnode;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1903,6 +1696,7 @@ int yfs_fs_extend_lock(struct afs_fs_cursor *fc)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -1910,7 +1704,7 @@ int yfs_fs_extend_lock(struct afs_fs_cursor *fc)
/*
* release a lock on a file
*/
-int yfs_fs_release_lock(struct afs_fs_cursor *fc)
+int yfs_fs_release_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -1928,7 +1722,8 @@ int yfs_fs_release_lock(struct afs_fs_cursor *fc)
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
+ call->lvnode = vnode;
+ call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
@@ -1939,48 +1734,18 @@ int yfs_fs_release_lock(struct afs_fs_cursor *fc)
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
/*
- * Deliver reply data to an FS.FetchStatus with no vnode.
- */
-static int yfs_deliver_fs_fetch_status(struct afs_call *call)
-{
- struct afs_file_status *status = call->reply[1];
- struct afs_callback *callback = call->reply[2];
- struct afs_volsync *volsync = call->reply[3];
- struct afs_vnode *vnode = call->reply[0];
- const __be32 *bp;
- int ret;
-
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
-
- /* unmarshall the reply once we've received all of it */
- bp = call->buffer;
- ret = yfs_decode_status(call, &bp, status, vnode,
- &call->expected_version, NULL);
- if (ret < 0)
- return ret;
- xdr_decode_YFSCallBack_raw(&bp, callback);
- xdr_decode_YFSVolSync(&bp, volsync);
-
- _leave(" = 0 [done]");
- return 0;
-}
-
-/*
* YFS.FetchStatus operation type
*/
static const struct afs_call_type yfs_RXYFSFetchStatus = {
.name = "YFS.FetchStatus",
.op = yfs_FS_FetchStatus,
- .deliver = yfs_deliver_fs_fetch_status,
+ .deliver = yfs_deliver_fs_status_cb_and_volsync,
.destructor = afs_flat_call_destructor,
};
@@ -1990,8 +1755,7 @@ static const struct afs_call_type yfs_RXYFSFetchStatus = {
int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
struct afs_net *net,
struct afs_fid *fid,
- struct afs_file_status *status,
- struct afs_callback *callback,
+ struct afs_status_cb *scb,
struct afs_volsync *volsync)
{
struct afs_call *call;
@@ -2012,11 +1776,8 @@ int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
}
call->key = fc->key;
- call->reply[0] = NULL; /* vnode for fid[0] */
- call->reply[1] = status;
- call->reply[2] = callback;
- call->reply[3] = volsync;
- call->expected_version = 1; /* vnode->status.data_version */
+ call->out_scb = scb;
+ call->out_volsync = volsync;
/* marshall the parameters */
bp = call->request;
@@ -2025,9 +1786,9 @@ int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
bp = xdr_encode_YFSFid(bp, fid);
yfs_check_req(call, bp);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, fid);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -2037,9 +1798,7 @@ int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
{
- struct afs_file_status *statuses;
- struct afs_callback *callbacks;
- struct afs_vnode *vnode = call->reply[0];
+ struct afs_status_cb *scb;
const __be32 *bp;
u32 tmp;
int ret;
@@ -2078,10 +1837,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
bp = call->buffer;
- statuses = call->reply[1];
- ret = yfs_decode_status(call, &bp, &statuses[call->count],
- call->count == 0 ? vnode : NULL,
- NULL, NULL);
+ scb = &call->out_scb[call->count];
+ ret = xdr_decode_YFSFetchStatus(&bp, call, scb);
if (ret < 0)
return ret;
@@ -2120,13 +1877,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
_debug("unmarshall CB array");
bp = call->buffer;
- callbacks = call->reply[2];
- xdr_decode_YFSCallBack_raw(&bp, &callbacks[call->count]);
- statuses = call->reply[1];
- if (call->count == 0 && vnode && statuses[0].abort_code == 0) {
- bp = call->buffer;
- xdr_decode_YFSCallBack(call, vnode, &bp);
- }
+ scb = &call->out_scb[call->count];
+ xdr_decode_YFSCallBack(&bp, call, scb);
call->count++;
if (call->count < call->count2)
goto more_cbs;
@@ -2141,7 +1893,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_YFSVolSync(&bp, call->reply[3]);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
call->unmarshall++;
@@ -2170,8 +1922,7 @@ static const struct afs_call_type yfs_RXYFSInlineBulkStatus = {
int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
struct afs_net *net,
struct afs_fid *fids,
- struct afs_file_status *statuses,
- struct afs_callback *callbacks,
+ struct afs_status_cb *statuses,
unsigned int nr_fids,
struct afs_volsync *volsync)
{
@@ -2194,10 +1945,8 @@ int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
}
call->key = fc->key;
- call->reply[0] = NULL; /* vnode for fid[0] */
- call->reply[1] = statuses;
- call->reply[2] = callbacks;
- call->reply[3] = volsync;
+ call->out_scb = statuses;
+ call->out_volsync = volsync;
call->count2 = nr_fids;
/* marshall the parameters */
@@ -2209,9 +1958,9 @@ int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
bp = xdr_encode_YFSFid(bp, &fids[i]);
yfs_check_req(call, bp);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &fids[0]);
+ afs_set_fc_call(call, fc);
afs_make_call(&fc->ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, &fc->ac);
}
@@ -2221,9 +1970,7 @@ int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
{
- struct afs_volsync *volsync = call->reply[2];
- struct afs_vnode *vnode = call->reply[1];
- struct yfs_acl *yacl = call->reply[0];
+ struct yfs_acl *yacl = call->out_yacl;
struct afs_acl *acl;
const __be32 *bp;
unsigned int size;
@@ -2308,11 +2055,10 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
bp = call->buffer;
yacl->inherit_flag = ntohl(*bp++);
yacl->num_cleaned = ntohl(*bp++);
- ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL);
+ ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
if (ret < 0)
return ret;
- xdr_decode_YFSVolSync(&bp, volsync);
+ xdr_decode_YFSVolSync(&bp, call->out_volsync);
call->unmarshall++;
@@ -2333,12 +2079,6 @@ void yfs_free_opaque_acl(struct yfs_acl *yacl)
}
}
-static void yfs_destroy_fs_fetch_opaque_acl(struct afs_call *call)
-{
- yfs_free_opaque_acl(call->reply[0]);
- afs_flat_call_destructor(call);
-}
-
/*
* YFS.FetchOpaqueACL operation type
*/
@@ -2346,18 +2086,18 @@ static const struct afs_call_type yfs_RXYFSFetchOpaqueACL = {
.name = "YFS.FetchOpaqueACL",
.op = yfs_FS_FetchOpaqueACL,
.deliver = yfs_deliver_fs_fetch_opaque_acl,
- .destructor = yfs_destroy_fs_fetch_opaque_acl,
+ .destructor = afs_flat_call_destructor,
};
/*
* Fetch the YFS advanced ACLs for a file.
*/
struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *fc,
- unsigned int flags)
+ struct yfs_acl *yacl,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
- struct yfs_acl *yacl;
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
@@ -2370,19 +2110,15 @@ struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *fc,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
- if (!call)
- goto nomem;
-
- yacl = kzalloc(sizeof(struct yfs_acl), GFP_KERNEL);
- if (!yacl)
- goto nomem_call;
+ if (!call) {
+ fc->ac.error = -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+ }
- yacl->flags = flags;
call->key = fc->key;
- call->reply[0] = yacl;
- call->reply[1] = vnode;
- call->reply[2] = NULL; /* volsync */
- call->ret_reply0 = true;
+ call->out_yacl = yacl;
+ call->out_scb = scb;
+ call->out_volsync = NULL;
/* marshall the parameters */
bp = call->request;
@@ -2391,17 +2127,10 @@ struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *fc,
bp = xdr_encode_YFSFid(bp, &vnode->fid);
yfs_check_req(call, bp);
- call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
afs_make_call(&fc->ac, call, GFP_KERNEL);
return (struct yfs_acl *)afs_wait_for_call_to_complete(call, &fc->ac);
-
-nomem_call:
- afs_put_call(call);
-nomem:
- fc->ac.error = -ENOMEM;
- return ERR_PTR(-ENOMEM);
}
/*
@@ -2417,7 +2146,8 @@ static const struct afs_call_type yfs_RXYFSStoreOpaqueACL2 = {
/*
* Fetch the YFS ACL for a file.
*/
-int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl)
+int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl,
+ struct afs_status_cb *scb)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -2441,8 +2171,8 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
}
call->key = fc->key;
- call->reply[0] = vnode;
- call->reply[2] = NULL; /* volsync */
+ call->out_scb = scb;
+ call->out_volsync = NULL;
/* marshall the parameters */
bp = call->request;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 36a8dc699448..72f8e1311392 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -892,8 +892,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
int have = ci->i_snap_caps;
if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask %p snap issued %s"
- " (mask %s)\n", &ci->vfs_inode,
+ dout("__ceph_caps_issued_mask ino 0x%lx snap issued %s"
+ " (mask %s)\n", ci->vfs_inode.i_ino,
ceph_cap_string(have),
ceph_cap_string(mask));
return 1;
@@ -904,8 +904,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
if (!__cap_is_valid(cap))
continue;
if ((cap->issued & mask) == mask) {
- dout("__ceph_caps_issued_mask %p cap %p issued %s"
- " (mask %s)\n", &ci->vfs_inode, cap,
+ dout("__ceph_caps_issued_mask ino 0x%lx cap %p issued %s"
+ " (mask %s)\n", ci->vfs_inode.i_ino, cap,
ceph_cap_string(cap->issued),
ceph_cap_string(mask));
if (touch)
@@ -916,8 +916,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
/* does a combination of caps satisfy mask? */
have |= cap->issued;
if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask %p combo issued %s"
- " (mask %s)\n", &ci->vfs_inode,
+ dout("__ceph_caps_issued_mask ino 0x%lx combo issued %s"
+ " (mask %s)\n", ci->vfs_inode.i_ino,
ceph_cap_string(cap->issued),
ceph_cap_string(mask));
if (touch) {
@@ -2257,8 +2257,6 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (datasync)
goto out;
- inode_lock(inode);
-
dirty = try_flush_caps(inode, &flush_tid);
dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
@@ -2273,7 +2271,6 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
ret = wait_event_interruptible(ci->i_cap_wq,
caps_are_flushed(inode, flush_tid));
}
- inode_unlock(inode);
out:
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
return ret;
@@ -2528,9 +2525,14 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got,
* to (when applicable), and check against max_size here as well.
* Note that caller is responsible for ensuring max_size increases are
* requested from the MDS.
+ *
+ * Returns 0 if caps were not able to be acquired (yet), a 1 if they were,
+ * or a negative error code.
+ *
+ * FIXME: how does a 0 return differ from -EAGAIN?
*/
static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
- loff_t endoff, bool nonblock, int *got, int *err)
+ loff_t endoff, bool nonblock, int *got)
{
struct inode *inode = &ci->vfs_inode;
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
@@ -2550,8 +2552,7 @@ again:
if ((file_wanted & need) != need) {
dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
ceph_cap_string(need), ceph_cap_string(file_wanted));
- *err = -EBADF;
- ret = 1;
+ ret = -EBADF;
goto out_unlock;
}
@@ -2572,10 +2573,8 @@ again:
if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
inode, endoff, ci->i_max_size);
- if (endoff > ci->i_requested_max_size) {
- *err = -EAGAIN;
- ret = 1;
- }
+ if (endoff > ci->i_requested_max_size)
+ ret = -EAGAIN;
goto out_unlock;
}
/*
@@ -2610,8 +2609,7 @@ again:
* task isn't in TASK_RUNNING state
*/
if (nonblock) {
- *err = -EAGAIN;
- ret = 1;
+ ret = -EAGAIN;
goto out_unlock;
}
@@ -2640,8 +2638,7 @@ again:
if (session_readonly) {
dout("get_cap_refs %p needed %s but mds%d readonly\n",
inode, ceph_cap_string(need), ci->i_auth_cap->mds);
- *err = -EROFS;
- ret = 1;
+ ret = -EROFS;
goto out_unlock;
}
@@ -2650,16 +2647,14 @@ again:
if (READ_ONCE(mdsc->fsc->mount_state) ==
CEPH_MOUNT_SHUTDOWN) {
dout("get_cap_refs %p forced umount\n", inode);
- *err = -EIO;
- ret = 1;
+ ret = -EIO;
goto out_unlock;
}
mds_wanted = __ceph_caps_mds_wanted(ci, false);
if (need & ~(mds_wanted & need)) {
dout("get_cap_refs %p caps were dropped"
" (session killed?)\n", inode);
- *err = -ESTALE;
- ret = 1;
+ ret = -ESTALE;
goto out_unlock;
}
if (!(file_wanted & ~mds_wanted))
@@ -2710,7 +2705,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want,
bool nonblock, int *got)
{
- int ret, err = 0;
+ int ret;
BUG_ON(need & ~CEPH_CAP_FILE_RD);
BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO|CEPH_CAP_FILE_SHARED));
@@ -2718,15 +2713,8 @@ int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want,
if (ret < 0)
return ret;
- ret = try_get_cap_refs(ci, need, want, 0, nonblock, got, &err);
- if (ret) {
- if (err == -EAGAIN) {
- ret = 0;
- } else if (err < 0) {
- ret = err;
- }
- }
- return ret;
+ ret = try_get_cap_refs(ci, need, want, 0, nonblock, got);
+ return ret == -EAGAIN ? 0 : ret;
}
/*
@@ -2737,7 +2725,7 @@ int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want,
int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
loff_t endoff, int *got, struct page **pinned_page)
{
- int _got, ret, err = 0;
+ int _got, ret;
ret = ceph_pool_perm_check(ci, need);
if (ret < 0)
@@ -2747,21 +2735,19 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
if (endoff > 0)
check_max_size(&ci->vfs_inode, endoff);
- err = 0;
_got = 0;
ret = try_get_cap_refs(ci, need, want, endoff,
- false, &_got, &err);
- if (ret) {
- if (err == -EAGAIN)
- continue;
- if (err < 0)
- ret = err;
- } else {
+ false, &_got);
+ if (ret == -EAGAIN) {
+ continue;
+ } else if (!ret) {
+ int err;
+
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&ci->i_cap_wq, &wait);
- while (!try_get_cap_refs(ci, need, want, endoff,
- true, &_got, &err)) {
+ while (!(err = try_get_cap_refs(ci, need, want, endoff,
+ true, &_got))) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -2770,19 +2756,14 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
}
remove_wait_queue(&ci->i_cap_wq, &wait);
-
if (err == -EAGAIN)
continue;
- if (err < 0)
- ret = err;
}
- if (ret < 0) {
- if (err == -ESTALE) {
- /* session was killed, try renew caps */
- ret = ceph_renew_caps(&ci->vfs_inode);
- if (ret == 0)
- continue;
- }
+ if (ret == -ESTALE) {
+ /* session was killed, try renew caps */
+ ret = ceph_renew_caps(&ci->vfs_inode);
+ if (ret == 0)
+ continue;
return ret;
}
@@ -4099,7 +4080,7 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
}
/*
- * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
+ * For a soon-to-be unlinked file, drop the LINK caps. If it
* looks like the link count will hit 0, drop any other caps (other
* than PIN) we don't specifically want (due to the file still being
* open).
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 98365e74cb4a..b3fc5fe26a1a 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -37,7 +37,7 @@ static int mdsmap_show(struct seq_file *s, void *p)
struct ceph_entity_addr *addr = &mdsmap->m_info[i].addr;
int state = mdsmap->m_info[i].state;
seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
- ceph_pr_addr(&addr->in_addr),
+ ceph_pr_addr(addr),
ceph_mds_state_name(state));
}
return 0;
@@ -88,7 +88,7 @@ static int mdsc_show(struct seq_file *s, void *p)
req->r_dentry,
path ? path : "");
spin_unlock(&req->r_dentry->d_lock);
- kfree(path);
+ ceph_mdsc_free_path(path, pathlen);
} else if (req->r_path1) {
seq_printf(s, " #%llx/%s", req->r_ino1.ino,
req->r_path1);
@@ -108,7 +108,7 @@ static int mdsc_show(struct seq_file *s, void *p)
req->r_old_dentry,
path ? path : "");
spin_unlock(&req->r_old_dentry->d_lock);
- kfree(path);
+ ceph_mdsc_free_path(path, pathlen);
} else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
if (req->r_ino2.ino)
seq_printf(s, " #%llx/%s", req->r_ino2.ino,
@@ -124,18 +124,48 @@ static int mdsc_show(struct seq_file *s, void *p)
return 0;
}
+static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p)
+{
+ struct seq_file *s = p;
+
+ seq_printf(s, "0x%-17lx%-17s%-17s\n", inode->i_ino,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->implemented));
+ return 0;
+}
+
static int caps_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
- int total, avail, used, reserved, min;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ int total, avail, used, reserved, min, i;
ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
seq_printf(s, "total\t\t%d\n"
"avail\t\t%d\n"
"used\t\t%d\n"
"reserved\t%d\n"
- "min\t%d\n",
+ "min\t\t%d\n\n",
total, avail, used, reserved, min);
+ seq_printf(s, "ino issued implemented\n");
+ seq_printf(s, "-----------------------------------------------\n");
+
+ mutex_lock(&mdsc->mutex);
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ struct ceph_mds_session *session;
+
+ session = __ceph_lookup_mds_session(mdsc, i);
+ if (!session)
+ continue;
+ mutex_unlock(&mdsc->mutex);
+ mutex_lock(&session->s_mutex);
+ ceph_iterate_session_caps(session, caps_show_cb, s);
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ mutex_lock(&mdsc->mutex);
+ }
+ mutex_unlock(&mdsc->mutex);
+
return 0;
}
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 3c59ad180ef0..d3ef7ee429ec 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -22,18 +22,77 @@ struct ceph_nfs_confh {
u64 ino, parent_ino;
} __attribute__ ((packed));
+/*
+ * fh for snapped inode
+ */
+struct ceph_nfs_snapfh {
+ u64 ino;
+ u64 snapid;
+ u64 parent_ino;
+ u32 hash;
+} __attribute__ ((packed));
+
+static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
+ struct inode *parent_inode)
+{
+ const static int snap_handle_length =
+ sizeof(struct ceph_nfs_snapfh) >> 2;
+ struct ceph_nfs_snapfh *sfh = (void *)rawfh;
+ u64 snapid = ceph_snap(inode);
+ int ret;
+ bool no_parent = true;
+
+ if (*max_len < snap_handle_length) {
+ *max_len = snap_handle_length;
+ ret = FILEID_INVALID;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (snapid != CEPH_SNAPDIR) {
+ struct inode *dir;
+ struct dentry *dentry = d_find_alias(inode);
+ if (!dentry)
+ goto out;
+
+ rcu_read_lock();
+ dir = d_inode_rcu(dentry->d_parent);
+ if (ceph_snap(dir) != CEPH_SNAPDIR) {
+ sfh->parent_ino = ceph_ino(dir);
+ sfh->hash = ceph_dentry_hash(dir, dentry);
+ no_parent = false;
+ }
+ rcu_read_unlock();
+ dput(dentry);
+ }
+
+ if (no_parent) {
+ if (!S_ISDIR(inode->i_mode))
+ goto out;
+ sfh->parent_ino = sfh->ino;
+ sfh->hash = 0;
+ }
+ sfh->ino = ceph_ino(inode);
+ sfh->snapid = snapid;
+
+ *max_len = snap_handle_length;
+ ret = FILEID_BTRFS_WITH_PARENT;
+out:
+ dout("encode_snapfh %llx.%llx ret=%d\n", ceph_vinop(inode), ret);
+ return ret;
+}
+
static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
+ const static int handle_length =
+ sizeof(struct ceph_nfs_fh) >> 2;
+ const static int connected_handle_length =
+ sizeof(struct ceph_nfs_confh) >> 2;
int type;
- struct ceph_nfs_fh *fh = (void *)rawfh;
- struct ceph_nfs_confh *cfh = (void *)rawfh;
- int connected_handle_length = sizeof(*cfh)/4;
- int handle_length = sizeof(*fh)/4;
- /* don't re-export snaps */
if (ceph_snap(inode) != CEPH_NOSNAP)
- return -EINVAL;
+ return ceph_encode_snapfh(inode, rawfh, max_len, parent_inode);
if (parent_inode && (*max_len < connected_handle_length)) {
*max_len = connected_handle_length;
@@ -44,6 +103,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
}
if (parent_inode) {
+ struct ceph_nfs_confh *cfh = (void *)rawfh;
dout("encode_fh %llx with parent %llx\n",
ceph_ino(inode), ceph_ino(parent_inode));
cfh->ino = ceph_ino(inode);
@@ -51,6 +111,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
*max_len = connected_handle_length;
type = FILEID_INO32_GEN_PARENT;
} else {
+ struct ceph_nfs_fh *fh = (void *)rawfh;
dout("encode_fh %llx\n", ceph_ino(inode));
fh->ino = ceph_ino(inode);
*max_len = handle_length;
@@ -59,7 +120,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
return type;
}
-static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
+static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
struct inode *inode;
@@ -81,7 +142,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
mask = CEPH_STAT_CAP_INODE;
if (ceph_security_xattr_wanted(d_inode(sb->s_root)))
mask |= CEPH_CAP_XATTR_SHARED;
- req->r_args.getattr.mask = cpu_to_le32(mask);
+ req->r_args.lookupino.mask = cpu_to_le32(mask);
req->r_ino1 = vino;
req->r_num_caps = 1;
@@ -91,16 +152,114 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
ihold(inode);
ceph_mdsc_put_request(req);
if (!inode)
- return ERR_PTR(-ESTALE);
- if (inode->i_nlink == 0) {
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
+ return err < 0 ? ERR_PTR(err) : ERR_PTR(-ESTALE);
}
+ return inode;
+}
+
+struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino)
+{
+ struct inode *inode = __lookup_inode(sb, ino);
+ if (IS_ERR(inode))
+ return inode;
+ if (inode->i_nlink == 0) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
+ return inode;
+}
+static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
+{
+ struct inode *inode = __lookup_inode(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (inode->i_nlink == 0) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
return d_obtain_alias(inode);
}
+static struct dentry *__snapfh_to_dentry(struct super_block *sb,
+ struct ceph_nfs_snapfh *sfh,
+ bool want_parent)
+{
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct ceph_mds_request *req;
+ struct inode *inode;
+ struct ceph_vino vino;
+ int mask;
+ int err;
+ bool unlinked = false;
+
+ if (want_parent) {
+ vino.ino = sfh->parent_ino;
+ if (sfh->snapid == CEPH_SNAPDIR)
+ vino.snap = CEPH_NOSNAP;
+ else if (sfh->ino == sfh->parent_ino)
+ vino.snap = CEPH_SNAPDIR;
+ else
+ vino.snap = sfh->snapid;
+ } else {
+ vino.ino = sfh->ino;
+ vino.snap = sfh->snapid;
+ }
+ inode = ceph_find_inode(sb, vino);
+ if (inode)
+ return d_obtain_alias(inode);
+
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO,
+ USE_ANY_MDS);
+ if (IS_ERR(req))
+ return ERR_CAST(req);
+
+ mask = CEPH_STAT_CAP_INODE;
+ if (ceph_security_xattr_wanted(d_inode(sb->s_root)))
+ mask |= CEPH_CAP_XATTR_SHARED;
+ req->r_args.lookupino.mask = cpu_to_le32(mask);
+ if (vino.snap < CEPH_NOSNAP) {
+ req->r_args.lookupino.snapid = cpu_to_le64(vino.snap);
+ if (!want_parent && sfh->ino != sfh->parent_ino) {
+ req->r_args.lookupino.parent =
+ cpu_to_le64(sfh->parent_ino);
+ req->r_args.lookupino.hash =
+ cpu_to_le32(sfh->hash);
+ }
+ }
+
+ req->r_ino1 = vino;
+ req->r_num_caps = 1;
+ err = ceph_mdsc_do_request(mdsc, NULL, req);
+ inode = req->r_target_inode;
+ if (inode) {
+ if (vino.snap == CEPH_SNAPDIR) {
+ if (inode->i_nlink == 0)
+ unlinked = true;
+ inode = ceph_get_snapdir(inode);
+ } else if (ceph_snap(inode) == vino.snap) {
+ ihold(inode);
+ } else {
+ /* mds does not support lookup snapped inode */
+ err = -EOPNOTSUPP;
+ inode = NULL;
+ }
+ }
+ ceph_mdsc_put_request(req);
+
+ if (want_parent) {
+ dout("snapfh_to_parent %llx.%llx\n err=%d\n",
+ vino.ino, vino.snap, err);
+ } else {
+ dout("snapfh_to_dentry %llx.%llx parent %llx hash %x err=%d",
+ vino.ino, vino.snap, sfh->parent_ino, sfh->hash, err);
+ }
+ if (!inode)
+ return ERR_PTR(-ESTALE);
+ /* see comments in ceph_get_parent() */
+ return unlinked ? d_obtain_root(inode) : d_obtain_alias(inode);
+}
+
/*
* convert regular fh to dentry
*/
@@ -110,6 +269,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
{
struct ceph_nfs_fh *fh = (void *)fid->raw;
+ if (fh_type == FILEID_BTRFS_WITH_PARENT) {
+ struct ceph_nfs_snapfh *sfh = (void *)fid->raw;
+ return __snapfh_to_dentry(sb, sfh, false);
+ }
+
if (fh_type != FILEID_INO32_GEN &&
fh_type != FILEID_INO32_GEN_PARENT)
return NULL;
@@ -163,13 +327,49 @@ static struct dentry *__get_parent(struct super_block *sb,
static struct dentry *ceph_get_parent(struct dentry *child)
{
- /* don't re-export snaps */
- if (ceph_snap(d_inode(child)) != CEPH_NOSNAP)
- return ERR_PTR(-EINVAL);
-
- dout("get_parent %p ino %llx.%llx\n",
- child, ceph_vinop(d_inode(child)));
- return __get_parent(child->d_sb, child, 0);
+ struct inode *inode = d_inode(child);
+ struct dentry *dn;
+
+ if (ceph_snap(inode) != CEPH_NOSNAP) {
+ struct inode* dir;
+ bool unlinked = false;
+ /* do not support non-directory */
+ if (!d_is_dir(child)) {
+ dn = ERR_PTR(-EINVAL);
+ goto out;
+ }
+ dir = __lookup_inode(inode->i_sb, ceph_ino(inode));
+ if (IS_ERR(dir)) {
+ dn = ERR_CAST(dir);
+ goto out;
+ }
+ /* There can be multiple paths to access snapped inode.
+ * For simplicity, treat snapdir of head inode as parent */
+ if (ceph_snap(inode) != CEPH_SNAPDIR) {
+ struct inode *snapdir = ceph_get_snapdir(dir);
+ if (dir->i_nlink == 0)
+ unlinked = true;
+ iput(dir);
+ if (IS_ERR(snapdir)) {
+ dn = ERR_CAST(snapdir);
+ goto out;
+ }
+ dir = snapdir;
+ }
+ /* If directory has already been deleted, futher get_parent
+ * will fail. Do not mark snapdir dentry as disconnected,
+ * this prevent exportfs from doing futher get_parent. */
+ if (unlinked)
+ dn = d_obtain_root(dir);
+ else
+ dn = d_obtain_alias(dir);
+ } else {
+ dn = __get_parent(child->d_sb, child, 0);
+ }
+out:
+ dout("get_parent %p ino %llx.%llx err=%ld\n",
+ child, ceph_vinop(inode), (IS_ERR(dn) ? PTR_ERR(dn) : 0));
+ return dn;
}
/*
@@ -182,6 +382,11 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
struct ceph_nfs_confh *cfh = (void *)fid->raw;
struct dentry *dentry;
+ if (fh_type == FILEID_BTRFS_WITH_PARENT) {
+ struct ceph_nfs_snapfh *sfh = (void *)fid->raw;
+ return __snapfh_to_dentry(sb, sfh, true);
+ }
+
if (fh_type != FILEID_INO32_GEN_PARENT)
return NULL;
if (fh_len < sizeof(*cfh) / 4)
@@ -194,14 +399,115 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
return dentry;
}
+static int __get_snap_name(struct dentry *parent, char *name,
+ struct dentry *child)
+{
+ struct inode *inode = d_inode(child);
+ struct inode *dir = d_inode(parent);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_mds_request *req = NULL;
+ char *last_name = NULL;
+ unsigned next_offset = 2;
+ int err = -EINVAL;
+
+ if (ceph_ino(inode) != ceph_ino(dir))
+ goto out;
+ if (ceph_snap(inode) == CEPH_SNAPDIR) {
+ if (ceph_snap(dir) == CEPH_NOSNAP) {
+ strcpy(name, fsc->mount_options->snapdir_name);
+ err = 0;
+ }
+ goto out;
+ }
+ if (ceph_snap(dir) != CEPH_SNAPDIR)
+ goto out;
+
+ while (1) {
+ struct ceph_mds_reply_info_parsed *rinfo;
+ struct ceph_mds_reply_dir_entry *rde;
+ int i;
+
+ req = ceph_mdsc_create_request(fsc->mdsc, CEPH_MDS_OP_LSSNAP,
+ USE_AUTH_MDS);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ req = NULL;
+ goto out;
+ }
+ err = ceph_alloc_readdir_reply_buffer(req, inode);
+ if (err)
+ goto out;
+
+ req->r_direct_mode = USE_AUTH_MDS;
+ req->r_readdir_offset = next_offset;
+ req->r_args.readdir.flags =
+ cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
+ if (last_name) {
+ req->r_path2 = last_name;
+ last_name = NULL;
+ }
+
+ req->r_inode = dir;
+ ihold(dir);
+ req->r_dentry = dget(parent);
+
+ inode_lock(dir);
+ err = ceph_mdsc_do_request(fsc->mdsc, NULL, req);
+ inode_unlock(dir);
+
+ if (err < 0)
+ goto out;
+
+ rinfo = &req->r_reply_info;
+ for (i = 0; i < rinfo->dir_nr; i++) {
+ rde = rinfo->dir_entries + i;
+ BUG_ON(!rde->inode.in);
+ if (ceph_snap(inode) ==
+ le64_to_cpu(rde->inode.in->snapid)) {
+ memcpy(name, rde->name, rde->name_len);
+ name[rde->name_len] = '\0';
+ err = 0;
+ goto out;
+ }
+ }
+
+ if (rinfo->dir_end)
+ break;
+
+ BUG_ON(rinfo->dir_nr <= 0);
+ rde = rinfo->dir_entries + (rinfo->dir_nr - 1);
+ next_offset += rinfo->dir_nr;
+ last_name = kstrndup(rde->name, rde->name_len, GFP_KERNEL);
+ if (!last_name) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ceph_mdsc_put_request(req);
+ req = NULL;
+ }
+ err = -ENOENT;
+out:
+ if (req)
+ ceph_mdsc_put_request(req);
+ kfree(last_name);
+ dout("get_snap_name %p ino %llx.%llx err=%d\n",
+ child, ceph_vinop(inode), err);
+ return err;
+}
+
static int ceph_get_name(struct dentry *parent, char *name,
struct dentry *child)
{
struct ceph_mds_client *mdsc;
struct ceph_mds_request *req;
+ struct inode *inode = d_inode(child);
int err;
- mdsc = ceph_inode_to_client(d_inode(child))->mdsc;
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ return __get_snap_name(parent, name, child);
+
+ mdsc = ceph_inode_to_client(inode)->mdsc;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME,
USE_ANY_MDS);
if (IS_ERR(req))
@@ -209,8 +515,8 @@ static int ceph_get_name(struct dentry *parent, char *name,
inode_lock(d_inode(parent));
- req->r_inode = d_inode(child);
- ihold(d_inode(child));
+ req->r_inode = inode;
+ ihold(inode);
req->r_ino2 = ceph_vino(d_inode(parent));
req->r_parent = d_inode(parent);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
@@ -224,10 +530,10 @@ static int ceph_get_name(struct dentry *parent, char *name,
memcpy(name, rinfo->dname, rinfo->dname_len);
name[rinfo->dname_len] = 0;
dout("get_name %p ino %llx.%llx name %s\n",
- child, ceph_vinop(d_inode(child)), name);
+ child, ceph_vinop(inode), name);
} else {
dout("get_name %p ino %llx.%llx err %d\n",
- child, ceph_vinop(d_inode(child)), err);
+ child, ceph_vinop(inode), err);
}
ceph_mdsc_put_request(req);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 84725b53ac21..305daf043eb0 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -929,7 +929,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
(write ? "write" : "read"), file, pos, (unsigned)count,
- snapc, snapc->seq);
+ snapc, snapc ? snapc->seq : 0);
ret = filemap_write_and_wait_range(inode->i_mapping,
pos, pos + count - 1);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 35dae6d5493a..f85355bf49c4 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2266,43 +2266,72 @@ int ceph_permission(struct inode *inode, int mask)
return err;
}
+/* Craft a mask of needed caps given a set of requested statx attrs. */
+static int statx_to_caps(u32 want)
+{
+ int mask = 0;
+
+ if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME))
+ mask |= CEPH_CAP_AUTH_SHARED;
+
+ if (want & (STATX_NLINK|STATX_CTIME))
+ mask |= CEPH_CAP_LINK_SHARED;
+
+ if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|
+ STATX_BLOCKS))
+ mask |= CEPH_CAP_FILE_SHARED;
+
+ if (want & (STATX_CTIME))
+ mask |= CEPH_CAP_XATTR_SHARED;
+
+ return mask;
+}
+
/*
- * Get all attributes. Hopefully somedata we'll have a statlite()
- * and can limit the fields we require to be accurate.
+ * Get all the attributes. If we have sufficient caps for the requested attrs,
+ * then we can avoid talking to the MDS at all.
*/
int ceph_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
- int err;
+ int err = 0;
- err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
- if (!err) {
- generic_fillattr(inode, stat);
- stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
- if (ceph_snap(inode) == CEPH_NOSNAP)
- stat->dev = inode->i_sb->s_dev;
+ /* Skip the getattr altogether if we're asked not to sync */
+ if (!(flags & AT_STATX_DONT_SYNC)) {
+ err = ceph_do_getattr(inode, statx_to_caps(request_mask),
+ flags & AT_STATX_FORCE_SYNC);
+ if (err)
+ return err;
+ }
+
+ generic_fillattr(inode, stat);
+ stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
+ if (ceph_snap(inode) == CEPH_NOSNAP)
+ stat->dev = inode->i_sb->s_dev;
+ else
+ stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
+
+ if (S_ISDIR(inode->i_mode)) {
+ if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
+ RBYTES))
+ stat->size = ci->i_rbytes;
else
- stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
-
- if (S_ISDIR(inode->i_mode)) {
- if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
- RBYTES))
- stat->size = ci->i_rbytes;
- else
- stat->size = ci->i_files + ci->i_subdirs;
- stat->blocks = 0;
- stat->blksize = 65536;
- /*
- * Some applications rely on the number of st_nlink
- * value on directories to be either 0 (if unlinked)
- * or 2 + number of subdirectories.
- */
- if (stat->nlink == 1)
- /* '.' + '..' + subdirs */
- stat->nlink = 1 + 1 + ci->i_subdirs;
- }
+ stat->size = ci->i_files + ci->i_subdirs;
+ stat->blocks = 0;
+ stat->blksize = 65536;
+ /*
+ * Some applications rely on the number of st_nlink
+ * value on directories to be either 0 (if unlinked)
+ * or 2 + number of subdirectories.
+ */
+ if (stat->nlink == 1)
+ /* '.' + '..' + subdirs */
+ stat->nlink = 1 + 1 + ci->i_subdirs;
}
+
+ /* Mask off any higher bits (e.g. btime) until we have support */
+ stat->result_mask = request_mask & STATX_BASIC_STATS;
return err;
}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 9dae2ec7e1fa..ac9b53b89365 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -237,15 +237,6 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
err = -EIO;
- } else if (op == CEPH_MDS_OP_SETFILELOCK) {
- /*
- * increasing i_filelock_ref closes race window between
- * handling request reply and adding file_lock struct to
- * inode. Otherwise, i_auth_cap may get trimmed in the
- * window. Caller function will decrease the counter.
- */
- fl->fl_ops = &ceph_fl_lock_ops;
- atomic_inc(&ci->i_filelock_ref);
}
spin_unlock(&ci->i_ceph_lock);
if (err < 0) {
@@ -299,10 +290,6 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
err = -EIO;
- } else {
- /* see comment in ceph_lock */
- fl->fl_ops = &ceph_fl_lock_ops;
- atomic_inc(&ci->i_filelock_ref);
}
spin_unlock(&ci->i_ceph_lock);
if (err < 0) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 9049c2a3e972..959b1bf7c327 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -550,15 +550,9 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
int mds)
{
- struct ceph_mds_session *session;
-
if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
return NULL;
- session = mdsc->sessions[mds];
- dout("lookup_mds_session %p %d\n", session,
- refcount_read(&session->s_ref));
- get_session(session);
- return session;
+ return get_session(mdsc->sessions[mds]);
}
static bool __have_session(struct ceph_mds_client *mdsc, int mds)
@@ -1284,9 +1278,9 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
*
* Caller must hold session s_mutex.
*/
-static int iterate_session_caps(struct ceph_mds_session *session,
- int (*cb)(struct inode *, struct ceph_cap *,
- void *), void *arg)
+int ceph_iterate_session_caps(struct ceph_mds_session *session,
+ int (*cb)(struct inode *, struct ceph_cap *,
+ void *), void *arg)
{
struct list_head *p;
struct ceph_cap *cap;
@@ -1451,7 +1445,7 @@ static void remove_session_caps(struct ceph_mds_session *session)
LIST_HEAD(dispose);
dout("remove_session_caps on %p\n", session);
- iterate_session_caps(session, remove_session_caps_cb, fsc);
+ ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
wake_up_all(&fsc->mdsc->cap_flushing_wq);
@@ -1534,8 +1528,8 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
{
dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
- iterate_session_caps(session, wake_up_session_cb,
- (void *)(unsigned long)ev);
+ ceph_iterate_session_caps(session, wake_up_session_cb,
+ (void *)(unsigned long)ev);
}
/*
@@ -1768,7 +1762,7 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
session->s_mds, session->s_nr_caps, max_caps, trim_caps);
if (trim_caps > 0) {
session->s_trim_caps = trim_caps;
- iterate_session_caps(session, trim_caps_cb, session);
+ ceph_iterate_session_caps(session, trim_caps_cb, session);
dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
session->s_mds, session->s_nr_caps, max_caps,
trim_caps - session->s_trim_caps);
@@ -1861,7 +1855,8 @@ again:
num_cap_releases--;
head = msg->front.iov_base;
- le32_add_cpu(&head->num, 1);
+ put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
+ &head->num);
item = msg->front.iov_base + msg->front.iov_len;
item->ino = cpu_to_le64(cap->cap_ino);
item->cap_id = cpu_to_le64(cap->cap_id);
@@ -2089,43 +2084,29 @@ static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
* Encode hidden .snap dirs as a double /, i.e.
* foo/.snap/bar -> foo//bar
*/
-char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
+char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
int stop_on_nosnap)
{
struct dentry *temp;
char *path;
- int len, pos;
+ int pos;
unsigned seq;
+ u64 base;
if (!dentry)
return ERR_PTR(-EINVAL);
-retry:
- len = 0;
- seq = read_seqbegin(&rename_lock);
- rcu_read_lock();
- for (temp = dentry; !IS_ROOT(temp);) {
- struct inode *inode = d_inode(temp);
- if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
- len++; /* slash only */
- else if (stop_on_nosnap && inode &&
- ceph_snap(inode) == CEPH_NOSNAP)
- break;
- else
- len += 1 + temp->d_name.len;
- temp = temp->d_parent;
- }
- rcu_read_unlock();
- if (len)
- len--; /* no leading '/' */
-
- path = kmalloc(len+1, GFP_NOFS);
+ path = __getname();
if (!path)
return ERR_PTR(-ENOMEM);
- pos = len;
- path[pos] = 0; /* trailing null */
+retry:
+ pos = PATH_MAX - 1;
+ path[pos] = '\0';
+
+ seq = read_seqbegin(&rename_lock);
rcu_read_lock();
- for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
+ temp = dentry;
+ for (;;) {
struct inode *inode;
spin_lock(&temp->d_lock);
@@ -2143,83 +2124,54 @@ retry:
spin_unlock(&temp->d_lock);
break;
}
- strncpy(path + pos, temp->d_name.name,
- temp->d_name.len);
+ memcpy(path + pos, temp->d_name.name, temp->d_name.len);
}
spin_unlock(&temp->d_lock);
- if (pos)
- path[--pos] = '/';
temp = temp->d_parent;
+
+ /* Are we at the root? */
+ if (IS_ROOT(temp))
+ break;
+
+ /* Are we out of buffer? */
+ if (--pos < 0)
+ break;
+
+ path[pos] = '/';
}
+ base = ceph_ino(d_inode(temp));
rcu_read_unlock();
- if (pos != 0 || read_seqretry(&rename_lock, seq)) {
+ if (pos < 0 || read_seqretry(&rename_lock, seq)) {
pr_err("build_path did not end path lookup where "
- "expected, namelen is %d, pos is %d\n", len, pos);
+ "expected, pos is %d\n", pos);
/* presumably this is only possible if racing with a
rename of one of the parent directories (we can not
lock the dentries above us to prevent this, but
retrying should be harmless) */
- kfree(path);
goto retry;
}
- *base = ceph_ino(d_inode(temp));
- *plen = len;
+ *pbase = base;
+ *plen = PATH_MAX - 1 - pos;
dout("build_path on %p %d built %llx '%.*s'\n",
- dentry, d_count(dentry), *base, len, path);
- return path;
-}
-
-/* Duplicate the dentry->d_name.name safely */
-static int clone_dentry_name(struct dentry *dentry, const char **ppath,
- int *ppathlen)
-{
- u32 len;
- char *name;
-
-retry:
- len = READ_ONCE(dentry->d_name.len);
- name = kmalloc(len + 1, GFP_NOFS);
- if (!name)
- return -ENOMEM;
-
- spin_lock(&dentry->d_lock);
- if (dentry->d_name.len != len) {
- spin_unlock(&dentry->d_lock);
- kfree(name);
- goto retry;
- }
- memcpy(name, dentry->d_name.name, len);
- spin_unlock(&dentry->d_lock);
-
- name[len] = '\0';
- *ppath = name;
- *ppathlen = len;
- return 0;
+ dentry, d_count(dentry), base, *plen, path + pos);
+ return path + pos;
}
static int build_dentry_path(struct dentry *dentry, struct inode *dir,
const char **ppath, int *ppathlen, u64 *pino,
bool *pfreepath, bool parent_locked)
{
- int ret;
char *path;
rcu_read_lock();
if (!dir)
dir = d_inode_rcu(dentry->d_parent);
- if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
+ if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
*pino = ceph_ino(dir);
rcu_read_unlock();
- if (parent_locked) {
- *ppath = dentry->d_name.name;
- *ppathlen = dentry->d_name.len;
- } else {
- ret = clone_dentry_name(dentry, ppath, ppathlen);
- if (ret)
- return ret;
- *pfreepath = true;
- }
+ *ppath = dentry->d_name.name;
+ *ppathlen = dentry->d_name.len;
return 0;
}
rcu_read_unlock();
@@ -2331,9 +2283,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
(!!req->r_inode_drop + !!req->r_dentry_drop +
!!req->r_old_inode_drop + !!req->r_old_dentry_drop);
if (req->r_dentry_drop)
- len += req->r_dentry->d_name.len;
+ len += pathlen1;
if (req->r_old_dentry_drop)
- len += req->r_old_dentry->d_name.len;
+ len += pathlen2;
msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
if (!msg) {
@@ -2410,10 +2362,10 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
out_free2:
if (freepath2)
- kfree((char *)path2);
+ ceph_mdsc_free_path((char *)path2, pathlen2);
out_free1:
if (freepath1)
- kfree((char *)path1);
+ ceph_mdsc_free_path((char *)path1, pathlen1);
out:
return msg;
}
@@ -2427,8 +2379,7 @@ static void complete_request(struct ceph_mds_client *mdsc,
{
if (req->r_callback)
req->r_callback(mdsc, req);
- else
- complete_all(&req->r_completion);
+ complete_all(&req->r_completion);
}
/*
@@ -2670,28 +2621,11 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
}
}
-void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
+int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
struct ceph_mds_request *req)
{
- dout("submit_request on %p\n", req);
- mutex_lock(&mdsc->mutex);
- __register_request(mdsc, req, NULL);
- __do_request(mdsc, req);
- mutex_unlock(&mdsc->mutex);
-}
-
-/*
- * Synchrously perform an mds request. Take care of all of the
- * session setup, forwarding, retry details.
- */
-int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
- struct inode *dir,
- struct ceph_mds_request *req)
-{
int err;
- dout("do_request on %p\n", req);
-
/* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
if (req->r_inode)
ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
@@ -2701,18 +2635,21 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
CEPH_CAP_PIN);
- /* issue */
+ dout("submit_request on %p for inode %p\n", req, dir);
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, dir);
__do_request(mdsc, req);
+ err = req->r_err;
+ mutex_unlock(&mdsc->mutex);
+ return err;
+}
- if (req->r_err) {
- err = req->r_err;
- goto out;
- }
+static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+{
+ int err;
/* wait */
- mutex_unlock(&mdsc->mutex);
dout("do_request waiting\n");
if (!req->r_timeout && req->r_wait_for_completion) {
err = req->r_wait_for_completion(mdsc, req);
@@ -2753,8 +2690,26 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
err = req->r_err;
}
-out:
mutex_unlock(&mdsc->mutex);
+ return err;
+}
+
+/*
+ * Synchrously perform an mds request. Take care of all of the
+ * session setup, forwarding, retry details.
+ */
+int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
+ struct inode *dir,
+ struct ceph_mds_request *req)
+{
+ int err;
+
+ dout("do_request on %p\n", req);
+
+ /* issue */
+ err = ceph_mdsc_submit_request(mdsc, dir, req);
+ if (!err)
+ err = ceph_mdsc_wait_request(mdsc, req);
dout("do_request %p done, result %d\n", req, err);
return err;
}
@@ -3485,7 +3440,7 @@ out_freeflocks:
ceph_pagelist_encode_string(pagelist, path, pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
out_freepath:
- kfree(path);
+ ceph_mdsc_free_path(path, pathlen);
}
out_err:
@@ -3642,7 +3597,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
recon_state.msg_version = 2;
}
/* trsaverse this session's caps */
- err = iterate_session_caps(session, encode_caps_cb, &recon_state);
+ err = ceph_iterate_session_caps(session, encode_caps_cb, &recon_state);
spin_lock(&session->s_cap_lock);
session->s_cap_reconnect = 0;
@@ -4125,6 +4080,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
mdsc->max_sessions = 0;
mdsc->stopping = 0;
atomic64_set(&mdsc->quotarealms_count, 0);
+ mdsc->quotarealms_inodes = RB_ROOT;
+ mutex_init(&mdsc->quotarealms_inodes_mutex);
mdsc->last_snap_seq = 0;
init_rwsem(&mdsc->snap_rwsem);
mdsc->snap_realms = RB_ROOT;
@@ -4216,6 +4173,8 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
* their inode/dcache refs
*/
ceph_msgr_flush();
+
+ ceph_cleanup_quotarealms_inodes(mdsc);
}
/*
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 50385a481fdb..a83f28bc2387 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -326,6 +326,18 @@ struct ceph_snapid_map {
};
/*
+ * node for list of quotarealm inodes that are not visible from the filesystem
+ * mountpoint, but required to handle, e.g. quotas.
+ */
+struct ceph_quotarealm_inode {
+ struct rb_node node;
+ u64 ino;
+ unsigned long timeout; /* last time a lookup failed for this inode */
+ struct mutex mutex;
+ struct inode *inode;
+};
+
+/*
* mds client state
*/
struct ceph_mds_client {
@@ -344,6 +356,12 @@ struct ceph_mds_client {
int stopping; /* true if shutting down */
atomic64_t quotarealms_count; /* # realms with quota */
+ /*
+ * We keep a list of inodes we don't see in the mountpoint but that we
+ * need to track quota realms.
+ */
+ struct rb_root quotarealms_inodes;
+ struct mutex quotarealms_inodes_mutex;
/*
* snap_rwsem will cover cap linkage into snaprealms, and
@@ -447,8 +465,9 @@ extern int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
struct inode *dir);
extern struct ceph_mds_request *
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode);
-extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
- struct ceph_mds_request *req);
+extern int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
+ struct inode *dir,
+ struct ceph_mds_request *req);
extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req);
@@ -468,8 +487,18 @@ extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
+extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
+ int (*cb)(struct inode *,
+ struct ceph_cap *, void *),
+ void *arg);
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
+static inline void ceph_mdsc_free_path(char *path, int len)
+{
+ if (path)
+ __putname(path - (PATH_MAX - 1 - len));
+}
+
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
int stop_on_nosnap);
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 1a2c5d390f7f..701b4fb0fb5a 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -205,7 +205,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
i+1, n, global_id, mds, inc,
- ceph_pr_addr(&addr.in_addr),
+ ceph_pr_addr(&addr),
ceph_mds_state_name(state));
if (mds < 0 || state <= 0)
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 9455d3aef0c3..c4522212872c 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -22,7 +22,16 @@ void ceph_adjust_quota_realms_count(struct inode *inode, bool inc)
static inline bool ceph_has_realms_with_quotas(struct inode *inode)
{
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
- return atomic64_read(&mdsc->quotarealms_count) > 0;
+ struct super_block *sb = mdsc->fsc->sb;
+
+ if (atomic64_read(&mdsc->quotarealms_count) > 0)
+ return true;
+ /* if root is the real CephFS root, we don't have quota realms */
+ if (sb->s_root->d_inode &&
+ (sb->s_root->d_inode->i_ino == CEPH_INO_ROOT))
+ return false;
+ /* otherwise, we can't know for sure */
+ return true;
}
void ceph_handle_quota(struct ceph_mds_client *mdsc,
@@ -68,6 +77,108 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
iput(inode);
}
+static struct ceph_quotarealm_inode *
+find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino)
+{
+ struct ceph_quotarealm_inode *qri = NULL;
+ struct rb_node **node, *parent = NULL;
+
+ mutex_lock(&mdsc->quotarealms_inodes_mutex);
+ node = &(mdsc->quotarealms_inodes.rb_node);
+ while (*node) {
+ parent = *node;
+ qri = container_of(*node, struct ceph_quotarealm_inode, node);
+
+ if (ino < qri->ino)
+ node = &((*node)->rb_left);
+ else if (ino > qri->ino)
+ node = &((*node)->rb_right);
+ else
+ break;
+ }
+ if (!qri || (qri->ino != ino)) {
+ /* Not found, create a new one and insert it */
+ qri = kmalloc(sizeof(*qri), GFP_KERNEL);
+ if (qri) {
+ qri->ino = ino;
+ qri->inode = NULL;
+ qri->timeout = 0;
+ mutex_init(&qri->mutex);
+ rb_link_node(&qri->node, parent, node);
+ rb_insert_color(&qri->node, &mdsc->quotarealms_inodes);
+ } else
+ pr_warn("Failed to alloc quotarealms_inode\n");
+ }
+ mutex_unlock(&mdsc->quotarealms_inodes_mutex);
+
+ return qri;
+}
+
+/*
+ * This function will try to lookup a realm inode which isn't visible in the
+ * filesystem mountpoint. A list of these kind of inodes (not visible) is
+ * maintained in the mdsc and freed only when the filesystem is umounted.
+ *
+ * Note that these inodes are kept in this list even if the lookup fails, which
+ * allows to prevent useless lookup requests.
+ */
+static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
+ struct super_block *sb,
+ struct ceph_snap_realm *realm)
+{
+ struct ceph_quotarealm_inode *qri;
+ struct inode *in;
+
+ qri = find_quotarealm_inode(mdsc, realm->ino);
+ if (!qri)
+ return NULL;
+
+ mutex_lock(&qri->mutex);
+ if (qri->inode) {
+ /* A request has already returned the inode */
+ mutex_unlock(&qri->mutex);
+ return qri->inode;
+ }
+ /* Check if this inode lookup has failed recently */
+ if (qri->timeout &&
+ time_before_eq(jiffies, qri->timeout)) {
+ mutex_unlock(&qri->mutex);
+ return NULL;
+ }
+ in = ceph_lookup_inode(sb, realm->ino);
+ if (IS_ERR(in)) {
+ pr_warn("Can't lookup inode %llx (err: %ld)\n",
+ realm->ino, PTR_ERR(in));
+ qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
+ } else {
+ qri->timeout = 0;
+ qri->inode = in;
+ }
+ mutex_unlock(&qri->mutex);
+
+ return in;
+}
+
+void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
+{
+ struct ceph_quotarealm_inode *qri;
+ struct rb_node *node;
+
+ /*
+ * It should now be safe to clean quotarealms_inode tree without holding
+ * mdsc->quotarealms_inodes_mutex...
+ */
+ mutex_lock(&mdsc->quotarealms_inodes_mutex);
+ while (!RB_EMPTY_ROOT(&mdsc->quotarealms_inodes)) {
+ node = rb_first(&mdsc->quotarealms_inodes);
+ qri = rb_entry(node, struct ceph_quotarealm_inode, node);
+ rb_erase(node, &mdsc->quotarealms_inodes);
+ iput(qri->inode);
+ kfree(qri);
+ }
+ mutex_unlock(&mdsc->quotarealms_inodes_mutex);
+}
+
/*
* This function walks through the snaprealm for an inode and returns the
* ceph_snap_realm for the first snaprealm that has quotas set (either max_files
@@ -76,9 +187,15 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
*
* Note that the caller is responsible for calling ceph_put_snap_realm() on the
* returned realm.
+ *
+ * Callers of this function need to hold mdsc->snap_rwsem. However, if there's
+ * a need to do an inode lookup, this rwsem will be temporarily dropped. Hence
+ * the 'retry' argument: if rwsem needs to be dropped and 'retry' is 'false'
+ * this function will return -EAGAIN; otherwise, the snaprealms walk-through
+ * will be restarted.
*/
static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
- struct inode *inode)
+ struct inode *inode, bool retry)
{
struct ceph_inode_info *ci = NULL;
struct ceph_snap_realm *realm, *next;
@@ -88,6 +205,7 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
if (ceph_snap(inode) != CEPH_NOSNAP)
return NULL;
+restart:
realm = ceph_inode(inode)->i_snap_realm;
if (realm)
ceph_get_snap_realm(mdsc, realm);
@@ -95,11 +213,25 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
pr_err_ratelimited("get_quota_realm: ino (%llx.%llx) "
"null i_snap_realm\n", ceph_vinop(inode));
while (realm) {
+ bool has_inode;
+
spin_lock(&realm->inodes_with_caps_lock);
- in = realm->inode ? igrab(realm->inode) : NULL;
+ has_inode = realm->inode;
+ in = has_inode ? igrab(realm->inode) : NULL;
spin_unlock(&realm->inodes_with_caps_lock);
- if (!in)
+ if (has_inode && !in)
break;
+ if (!in) {
+ up_read(&mdsc->snap_rwsem);
+ in = lookup_quotarealm_inode(mdsc, inode->i_sb, realm);
+ down_read(&mdsc->snap_rwsem);
+ if (IS_ERR_OR_NULL(in))
+ break;
+ ceph_put_snap_realm(mdsc, realm);
+ if (!retry)
+ return ERR_PTR(-EAGAIN);
+ goto restart;
+ }
ci = ceph_inode(in);
has_quota = __ceph_has_any_quota(ci);
@@ -125,9 +257,22 @@ bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
struct ceph_snap_realm *old_realm, *new_realm;
bool is_same;
+restart:
+ /*
+ * We need to lookup 2 quota realms atomically, i.e. with snap_rwsem.
+ * However, get_quota_realm may drop it temporarily. By setting the
+ * 'retry' parameter to 'false', we'll get -EAGAIN if the rwsem was
+ * dropped and we can then restart the whole operation.
+ */
down_read(&mdsc->snap_rwsem);
- old_realm = get_quota_realm(mdsc, old);
- new_realm = get_quota_realm(mdsc, new);
+ old_realm = get_quota_realm(mdsc, old, true);
+ new_realm = get_quota_realm(mdsc, new, false);
+ if (PTR_ERR(new_realm) == -EAGAIN) {
+ up_read(&mdsc->snap_rwsem);
+ if (old_realm)
+ ceph_put_snap_realm(mdsc, old_realm);
+ goto restart;
+ }
is_same = (old_realm == new_realm);
up_read(&mdsc->snap_rwsem);
@@ -166,6 +311,7 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
return false;
down_read(&mdsc->snap_rwsem);
+restart:
realm = ceph_inode(inode)->i_snap_realm;
if (realm)
ceph_get_snap_realm(mdsc, realm);
@@ -173,12 +319,23 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
pr_err_ratelimited("check_quota_exceeded: ino (%llx.%llx) "
"null i_snap_realm\n", ceph_vinop(inode));
while (realm) {
+ bool has_inode;
+
spin_lock(&realm->inodes_with_caps_lock);
- in = realm->inode ? igrab(realm->inode) : NULL;
+ has_inode = realm->inode;
+ in = has_inode ? igrab(realm->inode) : NULL;
spin_unlock(&realm->inodes_with_caps_lock);
- if (!in)
+ if (has_inode && !in)
break;
-
+ if (!in) {
+ up_read(&mdsc->snap_rwsem);
+ in = lookup_quotarealm_inode(mdsc, inode->i_sb, realm);
+ down_read(&mdsc->snap_rwsem);
+ if (IS_ERR_OR_NULL(in))
+ break;
+ ceph_put_snap_realm(mdsc, realm);
+ goto restart;
+ }
ci = ceph_inode(in);
spin_lock(&ci->i_ceph_lock);
if (op == QUOTA_CHECK_MAX_FILES_OP) {
@@ -314,7 +471,7 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
bool is_updated = false;
down_read(&mdsc->snap_rwsem);
- realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root));
+ realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root), true);
up_read(&mdsc->snap_rwsem);
if (!realm)
return false;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 285edda4fc3b..c864b44c8341 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -845,6 +845,12 @@ static void ceph_umount_begin(struct super_block *sb)
return;
}
+static int ceph_remount(struct super_block *sb, int *flags, char *data)
+{
+ sync_filesystem(sb);
+ return 0;
+}
+
static const struct super_operations ceph_super_ops = {
.alloc_inode = ceph_alloc_inode,
.destroy_inode = ceph_destroy_inode,
@@ -853,6 +859,7 @@ static const struct super_operations ceph_super_ops = {
.drop_inode = ceph_drop_inode,
.sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
+ .remount_fs = ceph_remount,
.show_options = ceph_show_options,
.statfs = ceph_statfs,
.umount_begin = ceph_umount_begin,
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index c5b4a05905c0..6edab9a750f8 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1083,6 +1083,7 @@ extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
/* export.c */
extern const struct export_operations ceph_export_ops;
+struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino);
/* locks.c */
extern __init void ceph_flock_init(void);
@@ -1133,5 +1134,6 @@ extern bool ceph_quota_is_max_bytes_approaching(struct inode *inode,
loff_t newlen);
extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
struct kstatfs *buf);
+extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 7ede7306599f..1e21b2528cfb 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -77,7 +77,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
goto name_is_IP_address;
/* Perform the upcall */
- rc = dns_query(NULL, hostname, len, NULL, ip_addr, NULL);
+ rc = dns_query(NULL, hostname, len, NULL, ip_addr, NULL, false);
if (rc < 0)
cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
__func__, len, len, hostname);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 591e82ba443c..5e7932d668ab 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1757,12 +1757,19 @@ int configfs_register_group(struct config_group *parent_group,
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
ret = create_default_group(parent_group, group);
- if (!ret) {
- spin_lock(&configfs_dirent_lock);
- configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
- spin_unlock(&configfs_dirent_lock);
- }
+ if (ret)
+ goto err_out;
+
+ spin_lock(&configfs_dirent_lock);
+ configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
+ spin_unlock(&configfs_dirent_lock);
+ inode_unlock(d_inode(parent));
+ return 0;
+err_out:
inode_unlock(d_inode(parent));
+ mutex_lock(&subsys->su_mutex);
+ unlink_group(group);
+ mutex_unlock(&subsys->su_mutex);
return ret;
}
EXPORT_SYMBOL(configfs_register_group);
diff --git a/fs/fsopen.c b/fs/fsopen.c
index 3bb9c0c8cbcc..c2891e933ef1 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -92,7 +92,7 @@ static int fscontext_create_fd(struct fs_context *fc, unsigned int o_flags)
{
int fd;
- fd = anon_inode_getfd("fscontext", &fscontext_fops, fc,
+ fd = anon_inode_getfd("[fscontext]", &fscontext_fops, fc,
O_RDWR | o_flags);
if (fd < 0)
put_fs_context(fc);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fdc18321d70c..310f8d17c53e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -231,7 +231,6 @@ struct io_ring_ctx {
struct task_struct *sqo_thread; /* if using sq thread polling */
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
- unsigned sqo_stop;
struct {
/* CQ ring */
@@ -329,9 +328,8 @@ struct io_kiocb {
#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
#define REQ_F_FIXED_FILE 4 /* ctx owns file */
#define REQ_F_SEQ_PREV 8 /* sequential with previous */
-#define REQ_F_PREPPED 16 /* prep already done */
-#define REQ_F_IO_DRAIN 32 /* drain existing IO first */
-#define REQ_F_IO_DRAINED 64 /* drain done */
+#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
+#define REQ_F_IO_DRAINED 32 /* drain done */
u64 user_data;
u32 error; /* iopoll result from callback */
u32 sequence;
@@ -490,7 +488,7 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
}
static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
- long res, unsigned ev_flags)
+ long res)
{
struct io_uring_cqe *cqe;
@@ -503,7 +501,7 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
if (cqe) {
WRITE_ONCE(cqe->user_data, ki_user_data);
WRITE_ONCE(cqe->res, res);
- WRITE_ONCE(cqe->flags, ev_flags);
+ WRITE_ONCE(cqe->flags, 0);
} else {
unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
@@ -522,12 +520,12 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
}
static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
- long res, unsigned ev_flags)
+ long res)
{
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
- io_cqring_fill_event(ctx, user_data, res, ev_flags);
+ io_cqring_fill_event(ctx, user_data, res);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
@@ -629,7 +627,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, list);
list_del(&req->list);
- io_cqring_fill_event(ctx, req->user_data, req->error, 0);
+ io_cqring_fill_event(ctx, req->user_data, req->error);
(*nr_events)++;
if (refcount_dec_and_test(&req->refs)) {
@@ -777,7 +775,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
kiocb_end_write(kiocb);
- io_cqring_add_event(req->ctx, req->user_data, res, 0);
+ io_cqring_add_event(req->ctx, req->user_data, res);
io_put_req(req);
}
@@ -896,9 +894,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
if (!req->file)
return -EBADF;
- /* For -EAGAIN retry, everything is already prepped */
- if (req->flags & REQ_F_PREPPED)
- return 0;
if (force_nonblock && !io_file_supports_async(req->file))
force_nonblock = false;
@@ -941,7 +936,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
return -EINVAL;
kiocb->ki_complete = io_complete_rw;
}
- req->flags |= REQ_F_PREPPED;
return 0;
}
@@ -1216,7 +1210,7 @@ static int io_nop(struct io_kiocb *req, u64 user_data)
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- io_cqring_add_event(ctx, user_data, err, 0);
+ io_cqring_add_event(ctx, user_data, err);
io_put_req(req);
return 0;
}
@@ -1227,16 +1221,12 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!req->file)
return -EBADF;
- /* Prep already done (EAGAIN retry) */
- if (req->flags & REQ_F_PREPPED)
- return 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
return -EINVAL;
- req->flags |= REQ_F_PREPPED;
return 0;
}
@@ -1265,7 +1255,7 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
end > 0 ? end : LLONG_MAX,
fsync_flags & IORING_FSYNC_DATASYNC);
- io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
+ io_cqring_add_event(req->ctx, sqe->user_data, ret);
io_put_req(req);
return 0;
}
@@ -1277,16 +1267,12 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!req->file)
return -EBADF;
- /* Prep already done (EAGAIN retry) */
- if (req->flags & REQ_F_PREPPED)
- return 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
return -EINVAL;
- req->flags |= REQ_F_PREPPED;
return ret;
}
@@ -1313,7 +1299,7 @@ static int io_sync_file_range(struct io_kiocb *req,
ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
- io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
+ io_cqring_add_event(req->ctx, sqe->user_data, ret);
io_put_req(req);
return 0;
}
@@ -1371,7 +1357,7 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
spin_unlock_irq(&ctx->completion_lock);
- io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
+ io_cqring_add_event(req->ctx, sqe->user_data, ret);
io_put_req(req);
return 0;
}
@@ -1380,7 +1366,7 @@ static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
__poll_t mask)
{
req->poll.done = true;
- io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask), 0);
+ io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
io_commit_cqring(ctx);
}
@@ -1700,7 +1686,7 @@ restart:
io_put_req(req);
if (ret) {
- io_cqring_add_event(ctx, sqe->user_data, ret, 0);
+ io_cqring_add_event(ctx, sqe->user_data, ret);
io_put_req(req);
}
@@ -2005,7 +1991,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
continue;
}
- io_cqring_add_event(ctx, sqes[i].sqe->user_data, ret, 0);
+ io_cqring_add_event(ctx, sqes[i].sqe->user_data, ret);
}
if (statep)
@@ -2028,7 +2014,7 @@ static int io_sq_thread(void *data)
set_fs(USER_DS);
timeout = inflight = 0;
- while (!kthread_should_stop() && !ctx->sqo_stop) {
+ while (!kthread_should_park()) {
bool all_fixed, mm_fault = false;
int i;
@@ -2090,7 +2076,7 @@ static int io_sq_thread(void *data)
smp_mb();
if (!io_get_sqring(ctx, &sqes[0])) {
- if (kthread_should_stop()) {
+ if (kthread_should_park()) {
finish_wait(&ctx->sqo_wait, &wait);
break;
}
@@ -2140,8 +2126,7 @@ static int io_sq_thread(void *data)
mmput(cur_mm);
}
- if (kthread_should_park())
- kthread_parkme();
+ kthread_parkme();
return 0;
}
@@ -2170,7 +2155,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
ret = io_submit_sqe(ctx, &s, statep);
if (ret)
- io_cqring_add_event(ctx, s.sqe->user_data, ret, 0);
+ io_cqring_add_event(ctx, s.sqe->user_data, ret);
}
io_commit_sqring(ctx);
@@ -2182,6 +2167,8 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
static unsigned io_cqring_events(struct io_cq_ring *ring)
{
+ /* See comment at the top of this file */
+ smp_rmb();
return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
}
@@ -2194,11 +2181,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
{
struct io_cq_ring *ring = ctx->cq_ring;
sigset_t ksigmask, sigsaved;
- DEFINE_WAIT(wait);
int ret;
- /* See comment at the top of this file */
- smp_rmb();
if (io_cqring_events(ring) >= min_events)
return 0;
@@ -2216,23 +2200,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- do {
- prepare_to_wait(&ctx->wait, &wait, TASK_INTERRUPTIBLE);
-
- ret = 0;
- /* See comment at the top of this file */
- smp_rmb();
- if (io_cqring_events(ring) >= min_events)
- break;
-
- schedule();
-
+ ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events);
+ if (ret == -ERESTARTSYS)
ret = -EINTR;
- if (signal_pending(current))
- break;
- } while (1);
-
- finish_wait(&ctx->wait, &wait);
if (sig)
restore_user_sigmask(sig, &sigsaved);
@@ -2273,8 +2243,11 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
static void io_sq_thread_stop(struct io_ring_ctx *ctx)
{
if (ctx->sqo_thread) {
- ctx->sqo_stop = 1;
- mb();
+ /*
+ * The park is a bit of a work-around, without it we get
+ * warning spews on shutdown with SQPOLL set and affinity
+ * set to a single CPU.
+ */
kthread_park(ctx->sqo_thread);
kthread_stop(ctx->sqo_thread);
ctx->sqo_thread = NULL;
@@ -2467,10 +2440,11 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
ctx->sq_thread_idle = HZ;
if (p->flags & IORING_SETUP_SQ_AFF) {
- int cpu = array_index_nospec(p->sq_thread_cpu,
- nr_cpu_ids);
+ int cpu = p->sq_thread_cpu;
ret = -EINVAL;
+ if (cpu >= nr_cpu_ids)
+ goto err;
if (!cpu_online(cpu))
goto err;
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index a7d3df85736d..e6a700f01452 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -22,7 +22,7 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
char *ip_addr = NULL;
int ip_len;
- ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL);
+ ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL, false);
if (ip_len > 0)
ret = rpc_pton(net, ip_addr, ip_len, sa, salen);
else
diff --git a/include/asm-generic/segment.h b/include/asm-generic/segment.h
deleted file mode 100644
index 5580eace622c..000000000000
--- a/include/asm-generic/segment.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_GENERIC_SEGMENT_H
-#define __ASM_GENERIC_SEGMENT_H
-/*
- * Only here because we have some old header files that expect it...
- *
- * New architectures probably don't want to have their own version.
- */
-
-#endif /* __ASM_GENERIC_SEGMENT_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index b3d2241e03f8..e935318804f8 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -9,7 +9,63 @@
*/
#include <linux/string.h>
-#include <asm/segment.h>
+#ifdef CONFIG_UACCESS_MEMCPY
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+{
+ if (__builtin_constant_p(n)) {
+ switch(n) {
+ case 1:
+ *(u8 *)to = *(u8 __force *)from;
+ return 0;
+ case 2:
+ *(u16 *)to = *(u16 __force *)from;
+ return 0;
+ case 4:
+ *(u32 *)to = *(u32 __force *)from;
+ return 0;
+#ifdef CONFIG_64BIT
+ case 8:
+ *(u64 *)to = *(u64 __force *)from;
+ return 0;
+#endif
+ }
+ }
+
+ memcpy(to, (const void __force *)from, n);
+ return 0;
+}
+
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (__builtin_constant_p(n)) {
+ switch(n) {
+ case 1:
+ *(u8 __force *)to = *(u8 *)from;
+ return 0;
+ case 2:
+ *(u16 __force *)to = *(u16 *)from;
+ return 0;
+ case 4:
+ *(u32 __force *)to = *(u32 *)from;
+ return 0;
+#ifdef CONFIG_64BIT
+ case 8:
+ *(u64 __force *)to = *(u64 *)from;
+ return 0;
+#endif
+ default:
+ break;
+ }
+ }
+
+ memcpy((void __force *)to, from, n);
+ return 0;
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+#endif /* CONFIG_UACCESS_MEMCPY */
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 4903deb0777a..3ac0feaf2b5e 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -436,6 +436,12 @@ union ceph_mds_request_args {
__le64 length; /* num bytes to lock from start */
__u8 wait; /* will caller wait for lock to become available? */
} __attribute__ ((packed)) filelock_change;
+ struct {
+ __le32 mask; /* CEPH_CAP_* */
+ __le64 snapid;
+ __le64 parent;
+ __le32 hash;
+ } __attribute__ ((packed)) lookupino;
} __attribute__ ((packed));
#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 800a2128d411..23895d178149 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -323,7 +323,8 @@ struct ceph_connection {
};
-extern const char *ceph_pr_addr(const struct sockaddr_storage *ss);
+extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr);
+
extern int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
int max_count, int *count);
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 5675b1f09bc5..e081b56f1c1d 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -110,17 +110,16 @@ struct ceph_object_id {
int name_len;
};
+#define __CEPH_OID_INITIALIZER(oid) { .name = (oid).inline_name }
+
+#define CEPH_DEFINE_OID_ONSTACK(oid) \
+ struct ceph_object_id oid = __CEPH_OID_INITIALIZER(oid)
+
static inline void ceph_oid_init(struct ceph_object_id *oid)
{
- oid->name = oid->inline_name;
- oid->name_len = 0;
+ *oid = (struct ceph_object_id) __CEPH_OID_INITIALIZER(*oid);
}
-#define CEPH_OID_INIT_ONSTACK(oid) \
- ({ ceph_oid_init(&oid); oid; })
-#define CEPH_DEFINE_OID_ONSTACK(oid) \
- struct ceph_object_id oid = CEPH_OID_INIT_ONSTACK(oid)
-
static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
{
return oid->name == oid->inline_name && !oid->name_len;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 491d992d045d..bb6118f79784 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -6,7 +6,6 @@
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
-#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_clk.h>
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index b0672756d056..e1f51d607cc5 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -62,7 +62,8 @@ typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
struct request *rq,
union map_info *map_context,
struct request **clone);
-typedef void (*dm_release_clone_request_fn) (struct request *clone);
+typedef void (*dm_release_clone_request_fn) (struct request *clone,
+ union map_info *map_context);
/*
* Returns:
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index 34a744a1bafc..f2b3ae22e6b7 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -27,6 +27,7 @@
#include <uapi/linux/dns_resolver.h>
extern int dns_query(const char *type, const char *name, size_t namelen,
- const char *options, char **_result, time64_t *_expiry);
+ const char *options, char **_result, time64_t *_expiry,
+ bool invalidate);
#endif /* _LINUX_DNS_RESOLVER_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 640a03642766..79fa4426509c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -227,6 +227,32 @@ enum {
READING_SHADOW_PAGE_TABLES,
};
+#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
+
+struct kvm_host_map {
+ /*
+ * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
+ * a 'struct page' for it. When using mem= kernel parameter some memory
+ * can be used as guest memory but they are not managed by host
+ * kernel).
+ * If 'pfn' is not managed by the host kernel, this field is
+ * initialized to KVM_UNMAPPED_PAGE.
+ */
+ struct page *page;
+ void *hva;
+ kvm_pfn_t pfn;
+ kvm_pfn_t gfn;
+};
+
+/*
+ * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
+ * directly to check for that.
+ */
+static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
+{
+ return !!map->hva;
+}
+
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@@ -733,7 +759,9 @@ struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -1242,11 +1270,21 @@ struct kvm_device_ops {
*/
void (*destroy)(struct kvm_device *dev);
+ /*
+ * Release is an alternative method to free the device. It is
+ * called when the device file descriptor is closed. Once
+ * release is called, the destroy method will not be called
+ * anymore as the device is removed from the device list of
+ * the VM. kvm->lock is held.
+ */
+ void (*release)(struct kvm_device *dev);
+
int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
unsigned long arg);
+ int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};
void kvm_device_get(struct kvm_device *dev);
@@ -1307,6 +1345,16 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+#ifdef CONFIG_HAVE_KVM_NO_POLL
+/* Callback that tells if we must not poll */
+bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
+#else
+static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif /* CONFIG_HAVE_KVM_NO_POLL */
+
#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 5d865a5d5cdc..4d0d5655c7b2 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -358,6 +358,7 @@ struct nvm_geo {
u16 csecs; /* sector size */
u16 sos; /* out-of-band area size */
bool ext; /* metadata in extended data buffer */
+ u32 mdts; /* Max data transfer size*/
/* device write constrains */
u32 ws_min; /* minimum write size */
@@ -427,6 +428,7 @@ struct nvm_dev {
char name[DISK_NAME_LEN];
void *private_data;
+ struct kref ref;
void *rmap;
struct mutex mlock;
diff --git a/include/linux/list.h b/include/linux/list.h
index d3b4db895340..e951228db4b2 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -789,7 +789,7 @@ static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
- WRITE_ONCE(prev->next, n);
+ prev->next = n;
n->pprev = &prev->next;
if (n->next)
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index 3fc2cc57ba1b..ae1b541446c9 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -86,6 +86,32 @@ static inline void hlist_bl_add_head(struct hlist_bl_node *n,
hlist_bl_set_first(h, n);
}
+static inline void hlist_bl_add_before(struct hlist_bl_node *n,
+ struct hlist_bl_node *next)
+{
+ struct hlist_bl_node **pprev = next->pprev;
+
+ n->pprev = pprev;
+ n->next = next;
+ next->pprev = &n->next;
+
+ /* pprev may be `first`, so be careful not to lose the lock bit */
+ WRITE_ONCE(*pprev,
+ (struct hlist_bl_node *)
+ ((uintptr_t)n | ((uintptr_t)*pprev & LIST_BL_LOCKMASK)));
+}
+
+static inline void hlist_bl_add_behind(struct hlist_bl_node *n,
+ struct hlist_bl_node *prev)
+{
+ n->next = prev->next;
+ n->pprev = &prev->next;
+ prev->next = n;
+
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
static inline void __hlist_bl_del(struct hlist_bl_node *n)
{
struct hlist_bl_node *next = n->next;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c40720cb59ac..8028adacaff3 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1246,9 +1246,9 @@ enum {
NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
NVME_SC_FW_NEEDS_RESET = 0x111,
NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
- NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
+ NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113,
NVME_SC_OVERLAPPING_RANGE = 0x114,
- NVME_SC_NS_INSUFFICENT_CAP = 0x115,
+ NVME_SC_NS_INSUFFICIENT_CAP = 0x115,
NVME_SC_NS_ID_UNAVAILABLE = 0x116,
NVME_SC_NS_ALREADY_ATTACHED = 0x118,
NVME_SC_NS_IS_PRIVATE = 0x119,
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 15eb85de9226..659045046468 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -284,11 +284,15 @@ static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
return bytes;
}
-static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
+/*
+ * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for
+ * struct_size() below.
+ */
+static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
{
size_t bytes;
- if (check_mul_overflow(n, size, &bytes))
+ if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
if (check_add_overflow(bytes, c, &bytes))
return SIZE_MAX;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 15a82ff0aefe..0ab99c7b652d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -30,6 +30,7 @@ struct perf_guest_info_callbacks {
int (*is_in_guest)(void);
int (*is_user_mode)(void);
unsigned long (*get_guest_ip)(void);
+ void (*handle_intel_pt_intr)(void);
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
diff --git a/include/linux/platform_data/xtalk-bridge.h b/include/linux/platform_data/xtalk-bridge.h
new file mode 100644
index 000000000000..51e5001f2c05
--- /dev/null
+++ b/include/linux/platform_data/xtalk-bridge.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SGI PCI Xtalk Bridge
+ */
+
+#ifndef PLATFORM_DATA_XTALK_BRIDGE_H
+#define PLATFORM_DATA_XTALK_BRIDGE_H
+
+#include <asm/sn/types.h>
+
+struct xtalk_bridge_platform_data {
+ struct resource mem;
+ struct resource io;
+ unsigned long bridge_addr;
+ unsigned long intr_addr;
+ unsigned long mem_offset;
+ unsigned long io_offset;
+ nasid_t nasid;
+ int masterwid;
+};
+
+#endif /* PLATFORM_DATA_XTALK_BRIDGE_H */
diff --git a/include/linux/random.h b/include/linux/random.h
index 13aeaf5a4bd4..1f7dced2bba6 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -20,7 +20,7 @@ struct random_ready_callback {
extern void add_device_randomness(const void *, unsigned int);
-#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
{
add_device_randomness((const void *)&latent_entropy,
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 9a5eafb7145b..abc7de77b988 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -61,9 +61,6 @@ struct kmem_cache {
atomic_t allocmiss;
atomic_t freehit;
atomic_t freemiss;
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- atomic_t store_user_clean;
-#endif
/*
* If debugging is enabled, then the allocator can add additional
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 4a22099ed8c0..15a4ca5d7099 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -442,10 +442,10 @@ void thermal_zone_device_update(struct thermal_zone_device *,
enum thermal_notify_event);
void thermal_zone_set_trips(struct thermal_zone_device *);
-struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
- const struct thermal_cooling_device_ops *);
+struct thermal_cooling_device *thermal_cooling_device_register(const char *,
+ void *, const struct thermal_cooling_device_ops *);
struct thermal_cooling_device *
-thermal_of_cooling_device_register(struct device_node *np, char *, void *,
+thermal_of_cooling_device_register(struct device_node *np, const char *, void *,
const struct thermal_cooling_device_ops *);
struct thermal_cooling_device *
devm_thermal_of_cooling_device_register(struct device *dev,
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 2b0072fa5e92..7dec36aecbd9 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -305,6 +305,19 @@ do { \
__ret; \
})
+#define __wait_var_event_interruptible(var, condition) \
+ ___wait_var_event(var, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ schedule())
+
+#define wait_var_event_interruptible(var, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_var_event_interruptible(var, condition); \
+ __ret; \
+})
+
/**
* clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
*
diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h
index 5c31a7682492..f76d2f25a824 100644
--- a/include/media/davinci/vpbe.h
+++ b/include/media/davinci/vpbe.h
@@ -92,7 +92,7 @@ struct vpbe_config {
struct encoder_config_info *ext_encoders;
/* amplifier information goes here */
struct amp_config_info *amp;
- int num_outputs;
+ unsigned int num_outputs;
/* Order is venc outputs followed by LCD and then external encoders */
struct vpbe_output *outputs;
};
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 78c856cba4f5..93358bfc0e1b 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -45,6 +45,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
gfp_t,
rxrpc_notify_rx_t,
bool,
+ bool,
unsigned int);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
@@ -68,5 +69,7 @@ u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
ktime_t *);
bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
+void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *,
+ unsigned long);
#endif /* _NET_RXRPC_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 896c3f45503b..e8346784cf3f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -81,6 +81,7 @@ struct hdac_device {
atomic_t in_pm; /* suspend/resume being performed */
/* sysfs */
+ struct mutex widget_lock;
struct hdac_widget_tree *widgets;
/* regmap */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index dee7292e1df6..a87904daf103 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -832,9 +832,21 @@ __SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
#define __NR_io_uring_register 427
__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
+#define __NR_open_tree 428
+__SYSCALL(__NR_open_tree, sys_open_tree)
+#define __NR_move_mount 429
+__SYSCALL(__NR_move_mount, sys_move_mount)
+#define __NR_fsopen 430
+__SYSCALL(__NR_fsopen, sys_fsopen)
+#define __NR_fsconfig 431
+__SYSCALL(__NR_fsconfig, sys_fsconfig)
+#define __NR_fsmount 432
+__SYSCALL(__NR_fsmount, sys_fsmount)
+#define __NR_fspick 433
+__SYSCALL(__NR_fspick, sys_fspick)
#undef __NR_syscalls
-#define __NR_syscalls 428
+#define __NR_syscalls 434
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 6d4ea4b6c922..2fe12b40d503 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -986,8 +986,13 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
#define KVM_CAP_EXCEPTION_PAYLOAD 164
#define KVM_CAP_ARM_VM_IPA_SIZE 165
-#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166
+#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 /* Obsolete */
#define KVM_CAP_HYPERV_CPUID 167
+#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 168
+#define KVM_CAP_PPC_IRQ_XIVE 169
+#define KVM_CAP_ARM_SVE 170
+#define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
+#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1145,6 +1150,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_SIZE_U256 0x0050000000000000ULL
#define KVM_REG_SIZE_U512 0x0060000000000000ULL
#define KVM_REG_SIZE_U1024 0x0070000000000000ULL
+#define KVM_REG_SIZE_U2048 0x0080000000000000ULL
struct kvm_reg_list {
__u64 n; /* number of regs */
@@ -1211,6 +1217,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3
KVM_DEV_TYPE_ARM_VGIC_ITS,
#define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS
+ KVM_DEV_TYPE_XIVE,
+#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_MAX,
};
@@ -1434,12 +1442,15 @@ struct kvm_enc_region {
#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
-/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */
+/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT_2 */
#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
/* Available with KVM_CAP_HYPERV_CPUID */
#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
+/* Available with KVM_CAP_ARM_SVE */
+#define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 6b3ee9948bf1..0b1f77957240 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -130,6 +130,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
{
struct rwsem_waiter *waiter, *tmp;
long oldcount, woken = 0, adjustment = 0;
+ struct list_head wlist;
/*
* Take a peek at the queue head waiter such that we can determine
@@ -188,18 +189,43 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
* of the queue. We know that woken will be at least 1 as we accounted
* for above. Note we increment the 'active part' of the count by the
* number of readers before waking any processes up.
+ *
+ * We have to do wakeup in 2 passes to prevent the possibility that
+ * the reader count may be decremented before it is incremented. It
+ * is because the to-be-woken waiter may not have slept yet. So it
+ * may see waiter->task got cleared, finish its critical section and
+ * do an unlock before the reader count increment.
+ *
+ * 1) Collect the read-waiters in a separate list, count them and
+ * fully increment the reader count in rwsem.
+ * 2) For each waiters in the new list, clear waiter->task and
+ * put them into wake_q to be woken up later.
*/
- list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
- struct task_struct *tsk;
-
+ list_for_each_entry(waiter, &sem->wait_list, list) {
if (waiter->type == RWSEM_WAITING_FOR_WRITE)
break;
woken++;
- tsk = waiter->task;
+ }
+ list_cut_before(&wlist, &sem->wait_list, &waiter->list);
+
+ adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
+ lockevent_cond_inc(rwsem_wake_reader, woken);
+ if (list_empty(&sem->wait_list)) {
+ /* hit end of list above */
+ adjustment -= RWSEM_WAITING_BIAS;
+ }
+
+ if (adjustment)
+ atomic_long_add(adjustment, &sem->count);
+
+ /* 2nd pass */
+ list_for_each_entry_safe(waiter, tmp, &wlist, list) {
+ struct task_struct *tsk;
+ tsk = waiter->task;
get_task_struct(tsk);
- list_del(&waiter->list);
+
/*
* Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot
@@ -213,16 +239,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
*/
wake_q_add_safe(wake_q, tsk);
}
-
- adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
- lockevent_cond_inc(rwsem_wake_reader, woken);
- if (list_empty(&sem->wait_list)) {
- /* hit end of list above */
- adjustment -= RWSEM_WAITING_BIAS;
- }
-
- if (adjustment)
- atomic_long_add(adjustment, &sem->count);
}
/*
diff --git a/kernel/signal.c b/kernel/signal.c
index c4dd66436fc5..a1eb44dc9ff5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2113,6 +2113,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
preempt_enable_no_resched();
cgroup_enter_frozen();
freezable_schedule();
+ cgroup_leave_frozen(true);
} else {
/*
* By the time we got the lock, our tracer went away.
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index ac5555e25733..8de4f789dc1b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -691,7 +691,7 @@ static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
time_constant = max(time_constant, 0l);
}
- if (txc->modes & ADJ_TAI && txc->constant > 0)
+ if (txc->modes & ADJ_TAI && txc->constant >= 0)
*time_tai = txc->constant;
if (txc->modes & ADJ_OFFSET)
diff --git a/lib/Kconfig b/lib/Kconfig
index 3577609b61be..8d9239a4156c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -601,6 +601,10 @@ config ARCH_NO_SG_CHAIN
config ARCH_HAS_PMEM_API
bool
+# use memcpy to implement user copies for nommu architectures
+config UACCESS_MEMCPY
+ bool
+
config ARCH_HAS_UACCESS_FLUSHCACHE
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index fdfa173651eb..eae43952902e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -542,10 +542,6 @@ config DEBUG_SLAB
allocation as well as poisoning memory on free to catch use of freed
memory. This can make kmalloc/kfree-intensive workloads much slower.
-config DEBUG_SLAB_LEAK
- bool "Memory leak debugging"
- depends on DEBUG_SLAB
-
config SLUB_DEBUG_ON
bool "SLUB debugging on by default"
depends on SLUB && SLUB_DEBUG
diff --git a/lib/hweight.c b/lib/hweight.c
index 7660d88fd496..c94586b62551 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -10,7 +10,6 @@
* The Hamming Weight of a number is the total number of bits set in it.
*/
-#ifndef __HAVE_ARCH_SW_HWEIGHT
unsigned int __sw_hweight32(unsigned int w)
{
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
@@ -27,7 +26,6 @@ unsigned int __sw_hweight32(unsigned int w)
#endif
}
EXPORT_SYMBOL(__sw_hweight32);
-#endif
unsigned int __sw_hweight16(unsigned int w)
{
@@ -46,7 +44,6 @@ unsigned int __sw_hweight8(unsigned int w)
}
EXPORT_SYMBOL(__sw_hweight8);
-#ifndef __HAVE_ARCH_SW_HWEIGHT
unsigned long __sw_hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
@@ -69,4 +66,3 @@ unsigned long __sw_hweight64(__u64 w)
#endif
}
EXPORT_SYMBOL(__sw_hweight64);
-#endif
diff --git a/mm/slab.c b/mm/slab.c
index 2915d912e89a..f7117ad9b3a3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -362,29 +362,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
-#ifdef CONFIG_DEBUG_SLAB_LEAK
-
-static inline bool is_store_user_clean(struct kmem_cache *cachep)
-{
- return atomic_read(&cachep->store_user_clean) == 1;
-}
-
-static inline void set_store_user_clean(struct kmem_cache *cachep)
-{
- atomic_set(&cachep->store_user_clean, 1);
-}
-
-static inline void set_store_user_dirty(struct kmem_cache *cachep)
-{
- if (is_store_user_clean(cachep))
- atomic_set(&cachep->store_user_clean, 0);
-}
-
-#else
-static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
-
-#endif
-
/*
* Do not go above this order unless 0 objects fit into the slab or
* overridden on the command line.
@@ -2552,11 +2529,6 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
page->active++;
-#if DEBUG
- if (cachep->flags & SLAB_STORE_USER)
- set_store_user_dirty(cachep);
-#endif
-
return objp;
}
@@ -2762,10 +2734,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
- if (cachep->flags & SLAB_STORE_USER) {
- set_store_user_dirty(cachep);
+ if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = (void *)caller;
- }
objnr = obj_to_index(cachep, page, objp);
@@ -4184,200 +4154,6 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
return res;
}
-#ifdef CONFIG_DEBUG_SLAB_LEAK
-
-static inline int add_caller(unsigned long *n, unsigned long v)
-{
- unsigned long *p;
- int l;
- if (!v)
- return 1;
- l = n[1];
- p = n + 2;
- while (l) {
- int i = l/2;
- unsigned long *q = p + 2 * i;
- if (*q == v) {
- q[1]++;
- return 1;
- }
- if (*q > v) {
- l = i;
- } else {
- p = q + 2;
- l -= i + 1;
- }
- }
- if (++n[1] == n[0])
- return 0;
- memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
- p[0] = v;
- p[1] = 1;
- return 1;
-}
-
-static void handle_slab(unsigned long *n, struct kmem_cache *c,
- struct page *page)
-{
- void *p;
- int i, j;
- unsigned long v;
-
- if (n[0] == n[1])
- return;
- for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
- bool active = true;
-
- for (j = page->active; j < c->num; j++) {
- if (get_free_obj(page, j) == i) {
- active = false;
- break;
- }
- }
-
- if (!active)
- continue;
-
- /*
- * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
- * mapping is established when actual object allocation and
- * we could mistakenly access the unmapped object in the cpu
- * cache.
- */
- if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
- continue;
-
- if (!add_caller(n, v))
- return;
- }
-}
-
-static void show_symbol(struct seq_file *m, unsigned long address)
-{
-#ifdef CONFIG_KALLSYMS
- unsigned long offset, size;
- char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
-
- if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
- seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
- if (modname[0])
- seq_printf(m, " [%s]", modname);
- return;
- }
-#endif
- seq_printf(m, "%px", (void *)address);
-}
-
-static int leaks_show(struct seq_file *m, void *p)
-{
- struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
- root_caches_node);
- struct page *page;
- struct kmem_cache_node *n;
- const char *name;
- unsigned long *x = m->private;
- int node;
- int i;
-
- if (!(cachep->flags & SLAB_STORE_USER))
- return 0;
- if (!(cachep->flags & SLAB_RED_ZONE))
- return 0;
-
- /*
- * Set store_user_clean and start to grab stored user information
- * for all objects on this cache. If some alloc/free requests comes
- * during the processing, information would be wrong so restart
- * whole processing.
- */
- do {
- drain_cpu_caches(cachep);
- /*
- * drain_cpu_caches() could make kmemleak_object and
- * debug_objects_cache dirty, so reset afterwards.
- */
- set_store_user_clean(cachep);
-
- x[1] = 0;
-
- for_each_kmem_cache_node(cachep, node, n) {
-
- check_irq_on();
- spin_lock_irq(&n->list_lock);
-
- list_for_each_entry(page, &n->slabs_full, slab_list)
- handle_slab(x, cachep, page);
- list_for_each_entry(page, &n->slabs_partial, slab_list)
- handle_slab(x, cachep, page);
- spin_unlock_irq(&n->list_lock);
- }
- } while (!is_store_user_clean(cachep));
-
- name = cachep->name;
- if (x[0] == x[1]) {
- /* Increase the buffer size */
- mutex_unlock(&slab_mutex);
- m->private = kcalloc(x[0] * 4, sizeof(unsigned long),
- GFP_KERNEL);
- if (!m->private) {
- /* Too bad, we are really out */
- m->private = x;
- mutex_lock(&slab_mutex);
- return -ENOMEM;
- }
- *(unsigned long *)m->private = x[0] * 2;
- kfree(x);
- mutex_lock(&slab_mutex);
- /* Now make sure this entry will be retried */
- m->count = m->size;
- return 0;
- }
- for (i = 0; i < x[1]; i++) {
- seq_printf(m, "%s: %lu ", name, x[2*i+3]);
- show_symbol(m, x[2*i+2]);
- seq_putc(m, '\n');
- }
-
- return 0;
-}
-
-static const struct seq_operations slabstats_op = {
- .start = slab_start,
- .next = slab_next,
- .stop = slab_stop,
- .show = leaks_show,
-};
-
-static int slabstats_open(struct inode *inode, struct file *file)
-{
- unsigned long *n;
-
- n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
- if (!n)
- return -ENOMEM;
-
- *n = PAGE_SIZE / (2 * sizeof(unsigned long));
-
- return 0;
-}
-
-static const struct file_operations proc_slabstats_operations = {
- .open = slabstats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_private,
-};
-#endif
-
-static int __init slab_proc_init(void)
-{
-#ifdef CONFIG_DEBUG_SLAB_LEAK
- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
-#endif
- return 0;
-}
-module_init(slab_proc_init);
-
#ifdef CONFIG_HARDENED_USERCOPY
/*
* Rejects incorrectly sized objects and objects that are to be copied
diff --git a/net/ceph/cls_lock_client.c b/net/ceph/cls_lock_client.c
index 2105a6eaa66c..4cc28541281b 100644
--- a/net/ceph/cls_lock_client.c
+++ b/net/ceph/cls_lock_client.c
@@ -271,7 +271,7 @@ static int decode_locker(void **p, void *end, struct ceph_locker *locker)
dout("%s %s%llu cookie %s addr %s\n", __func__,
ENTITY_NAME(locker->id.name), locker->id.cookie,
- ceph_pr_addr(&locker->info.addr.in_addr));
+ ceph_pr_addr(&locker->info.addr));
return 0;
}
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 46f65709a6ff..63aef9915f75 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -46,7 +46,7 @@ static int monmap_show(struct seq_file *s, void *p)
seq_printf(s, "\t%s%lld\t%s\n",
ENTITY_NAME(inst->name),
- ceph_pr_addr(&inst->addr.in_addr));
+ ceph_pr_addr(&inst->addr));
}
return 0;
}
@@ -82,7 +82,7 @@ static int osdmap_show(struct seq_file *s, void *p)
char sb[64];
seq_printf(s, "osd%d\t%s\t%3d%%\t(%s)\t%3d%%\n",
- i, ceph_pr_addr(&addr->in_addr),
+ i, ceph_pr_addr(addr),
((map->osd_weight[i]*100) >> 16),
ceph_osdmap_state_str(sb, sizeof(sb), state),
((ceph_get_primary_affinity(map, i)*100) >> 16));
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3083988ce729..cd0b094468b6 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -186,17 +186,18 @@ static atomic_t addr_str_seq = ATOMIC_INIT(0);
static struct page *zero_page; /* used in certain error cases */
-const char *ceph_pr_addr(const struct sockaddr_storage *ss)
+const char *ceph_pr_addr(const struct ceph_entity_addr *addr)
{
int i;
char *s;
- struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
- struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
+ struct sockaddr_storage ss = addr->in_addr; /* align */
+ struct sockaddr_in *in4 = (struct sockaddr_in *)&ss;
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&ss;
i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
s = addr_str[i];
- switch (ss->ss_family) {
+ switch (ss.ss_family) {
case AF_INET:
snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
ntohs(in4->sin_port));
@@ -209,7 +210,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
default:
snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
- ss->ss_family);
+ ss.ss_family);
}
return s;
@@ -449,7 +450,7 @@ static void set_sock_callbacks(struct socket *sock,
*/
static int ceph_tcp_connect(struct ceph_connection *con)
{
- struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
+ struct sockaddr_storage ss = con->peer_addr.in_addr; /* align */
struct socket *sock;
unsigned int noio_flag;
int ret;
@@ -458,7 +459,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
/* sock_create_kern() allocates with GFP_KERNEL */
noio_flag = memalloc_noio_save();
- ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
+ ret = sock_create_kern(read_pnet(&con->msgr->net), ss.ss_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
memalloc_noio_restore(noio_flag);
if (ret)
@@ -471,18 +472,18 @@ static int ceph_tcp_connect(struct ceph_connection *con)
set_sock_callbacks(sock, con);
- dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
+ dout("connect %s\n", ceph_pr_addr(&con->peer_addr));
con_sock_state_connecting(con);
- ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
+ ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
O_NONBLOCK);
if (ret == -EINPROGRESS) {
dout("connect %s EINPROGRESS sk_state = %u\n",
- ceph_pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr),
sock->sk->sk_state);
} else if (ret < 0) {
pr_err("connect %s error %d\n",
- ceph_pr_addr(&con->peer_addr.in_addr), ret);
+ ceph_pr_addr(&con->peer_addr), ret);
sock_release(sock);
return ret;
}
@@ -669,8 +670,7 @@ static void reset_connection(struct ceph_connection *con)
void ceph_con_close(struct ceph_connection *con)
{
mutex_lock(&con->mutex);
- dout("con_close %p peer %s\n", con,
- ceph_pr_addr(&con->peer_addr.in_addr));
+ dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr));
con->state = CON_STATE_CLOSED;
con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
@@ -694,7 +694,7 @@ void ceph_con_open(struct ceph_connection *con,
struct ceph_entity_addr *addr)
{
mutex_lock(&con->mutex);
- dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
+ dout("con_open %p %s\n", con, ceph_pr_addr(addr));
WARN_ON(con->state != CON_STATE_CLOSED);
con->state = CON_STATE_PREOPEN;
@@ -1788,21 +1788,22 @@ static int verify_hello(struct ceph_connection *con)
{
if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
pr_err("connect to %s got bad banner\n",
- ceph_pr_addr(&con->peer_addr.in_addr));
+ ceph_pr_addr(&con->peer_addr));
con->error_msg = "protocol error, bad banner";
return -1;
}
return 0;
}
-static bool addr_is_blank(struct sockaddr_storage *ss)
+static bool addr_is_blank(struct ceph_entity_addr *addr)
{
- struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
- struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
+ struct sockaddr_storage ss = addr->in_addr; /* align */
+ struct in_addr *addr4 = &((struct sockaddr_in *)&ss)->sin_addr;
+ struct in6_addr *addr6 = &((struct sockaddr_in6 *)&ss)->sin6_addr;
- switch (ss->ss_family) {
+ switch (ss.ss_family) {
case AF_INET:
- return addr->s_addr == htonl(INADDR_ANY);
+ return addr4->s_addr == htonl(INADDR_ANY);
case AF_INET6:
return ipv6_addr_any(addr6);
default:
@@ -1810,25 +1811,25 @@ static bool addr_is_blank(struct sockaddr_storage *ss)
}
}
-static int addr_port(struct sockaddr_storage *ss)
+static int addr_port(struct ceph_entity_addr *addr)
{
- switch (ss->ss_family) {
+ switch (get_unaligned(&addr->in_addr.ss_family)) {
case AF_INET:
- return ntohs(((struct sockaddr_in *)ss)->sin_port);
+ return ntohs(get_unaligned(&((struct sockaddr_in *)&addr->in_addr)->sin_port));
case AF_INET6:
- return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
+ return ntohs(get_unaligned(&((struct sockaddr_in6 *)&addr->in_addr)->sin6_port));
}
return 0;
}
-static void addr_set_port(struct sockaddr_storage *ss, int p)
+static void addr_set_port(struct ceph_entity_addr *addr, int p)
{
- switch (ss->ss_family) {
+ switch (get_unaligned(&addr->in_addr.ss_family)) {
case AF_INET:
- ((struct sockaddr_in *)ss)->sin_port = htons(p);
+ put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
break;
case AF_INET6:
- ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
+ put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
break;
}
}
@@ -1836,21 +1837,18 @@ static void addr_set_port(struct sockaddr_storage *ss, int p)
/*
* Unlike other *_pton function semantics, zero indicates success.
*/
-static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
+static int ceph_pton(const char *str, size_t len, struct ceph_entity_addr *addr,
char delim, const char **ipend)
{
- struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
- struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
+ memset(&addr->in_addr, 0, sizeof(addr->in_addr));
- memset(ss, 0, sizeof(*ss));
-
- if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
- ss->ss_family = AF_INET;
+ if (in4_pton(str, len, (u8 *)&((struct sockaddr_in *)&addr->in_addr)->sin_addr.s_addr, delim, ipend)) {
+ put_unaligned(AF_INET, &addr->in_addr.ss_family);
return 0;
}
- if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
- ss->ss_family = AF_INET6;
+ if (in6_pton(str, len, (u8 *)&((struct sockaddr_in6 *)&addr->in_addr)->sin6_addr.s6_addr, delim, ipend)) {
+ put_unaligned(AF_INET6, &addr->in_addr.ss_family);
return 0;
}
@@ -1862,7 +1860,7 @@ static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
*/
#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
static int ceph_dns_resolve_name(const char *name, size_t namelen,
- struct sockaddr_storage *ss, char delim, const char **ipend)
+ struct ceph_entity_addr *addr, char delim, const char **ipend)
{
const char *end, *delim_p;
char *colon_p, *ip_addr = NULL;
@@ -1889,9 +1887,9 @@ static int ceph_dns_resolve_name(const char *name, size_t namelen,
return -EINVAL;
/* do dns_resolve upcall */
- ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
+ ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL, false);
if (ip_len > 0)
- ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
+ ret = ceph_pton(ip_addr, ip_len, addr, -1, NULL);
else
ret = -ESRCH;
@@ -1900,13 +1898,13 @@ static int ceph_dns_resolve_name(const char *name, size_t namelen,
*ipend = end;
pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
- ret, ret ? "failed" : ceph_pr_addr(ss));
+ ret, ret ? "failed" : ceph_pr_addr(addr));
return ret;
}
#else
static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
- struct sockaddr_storage *ss, char delim, const char **ipend)
+ struct ceph_entity_addr *addr, char delim, const char **ipend)
{
return -EINVAL;
}
@@ -1917,13 +1915,13 @@ static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
* then try to extract a hostname to resolve using userspace DNS upcall.
*/
static int ceph_parse_server_name(const char *name, size_t namelen,
- struct sockaddr_storage *ss, char delim, const char **ipend)
+ struct ceph_entity_addr *addr, char delim, const char **ipend)
{
int ret;
- ret = ceph_pton(name, namelen, ss, delim, ipend);
+ ret = ceph_pton(name, namelen, addr, delim, ipend);
if (ret)
- ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
+ ret = ceph_dns_resolve_name(name, namelen, addr, delim, ipend);
return ret;
}
@@ -1942,7 +1940,6 @@ int ceph_parse_ips(const char *c, const char *end,
dout("parse_ips on '%.*s'\n", (int)(end-c), c);
for (i = 0; i < max_count; i++) {
const char *ipend;
- struct sockaddr_storage *ss = &addr[i].in_addr;
int port;
char delim = ',';
@@ -1951,7 +1948,7 @@ int ceph_parse_ips(const char *c, const char *end,
p++;
}
- ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
+ ret = ceph_parse_server_name(p, end - p, &addr[i], delim, &ipend);
if (ret)
goto bad;
ret = -EINVAL;
@@ -1982,9 +1979,9 @@ int ceph_parse_ips(const char *c, const char *end,
port = CEPH_MON_PORT;
}
- addr_set_port(ss, port);
+ addr_set_port(&addr[i], port);
- dout("parse_ips got %s\n", ceph_pr_addr(ss));
+ dout("parse_ips got %s\n", ceph_pr_addr(&addr[i]));
if (p == end)
break;
@@ -2023,12 +2020,12 @@ static int process_banner(struct ceph_connection *con)
*/
if (memcmp(&con->peer_addr, &con->actual_peer_addr,
sizeof(con->peer_addr)) != 0 &&
- !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
+ !(addr_is_blank(&con->actual_peer_addr) &&
con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
pr_warn("wrong peer, want %s/%d, got %s/%d\n",
- ceph_pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr),
(int)le32_to_cpu(con->peer_addr.nonce),
- ceph_pr_addr(&con->actual_peer_addr.in_addr),
+ ceph_pr_addr(&con->actual_peer_addr),
(int)le32_to_cpu(con->actual_peer_addr.nonce));
con->error_msg = "wrong peer at address";
return -1;
@@ -2037,16 +2034,16 @@ static int process_banner(struct ceph_connection *con)
/*
* did we learn our address?
*/
- if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
- int port = addr_port(&con->msgr->inst.addr.in_addr);
+ if (addr_is_blank(&con->msgr->inst.addr)) {
+ int port = addr_port(&con->msgr->inst.addr);
memcpy(&con->msgr->inst.addr.in_addr,
&con->peer_addr_for_me.in_addr,
sizeof(con->peer_addr_for_me.in_addr));
- addr_set_port(&con->msgr->inst.addr.in_addr, port);
+ addr_set_port(&con->msgr->inst.addr, port);
encode_my_addr(con->msgr);
dout("process_banner learned my addr is %s\n",
- ceph_pr_addr(&con->msgr->inst.addr.in_addr));
+ ceph_pr_addr(&con->msgr->inst.addr));
}
return 0;
@@ -2097,7 +2094,7 @@ static int process_connect(struct ceph_connection *con)
pr_err("%s%lld %s feature set mismatch,"
" my %llx < server's %llx, missing %llx\n",
ENTITY_NAME(con->peer_name),
- ceph_pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr),
sup_feat, server_feat, server_feat & ~sup_feat);
con->error_msg = "missing required protocol features";
reset_connection(con);
@@ -2107,7 +2104,7 @@ static int process_connect(struct ceph_connection *con)
pr_err("%s%lld %s protocol version mismatch,"
" my %d != server's %d\n",
ENTITY_NAME(con->peer_name),
- ceph_pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr),
le32_to_cpu(con->out_connect.protocol_version),
le32_to_cpu(con->in_reply.protocol_version));
con->error_msg = "protocol version mismatch";
@@ -2141,7 +2138,7 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_reply.connect_seq));
pr_err("%s%lld %s connection reset\n",
ENTITY_NAME(con->peer_name),
- ceph_pr_addr(&con->peer_addr.in_addr));
+ ceph_pr_addr(&con->peer_addr));
reset_connection(con);
con_out_kvec_reset(con);
ret = prepare_write_connect(con);
@@ -2198,7 +2195,7 @@ static int process_connect(struct ceph_connection *con)
pr_err("%s%lld %s protocol feature mismatch,"
" my required %llx > server's %llx, need %llx\n",
ENTITY_NAME(con->peer_name),
- ceph_pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr),
req_feat, server_feat, req_feat & ~server_feat);
con->error_msg = "missing required protocol features";
reset_connection(con);
@@ -2405,7 +2402,7 @@ static int read_partial_message(struct ceph_connection *con)
if ((s64)seq - (s64)con->in_seq < 1) {
pr_info("skipping %s%lld %s seq %lld expected %lld\n",
ENTITY_NAME(con->peer_name),
- ceph_pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr),
seq, con->in_seq + 1);
con->in_base_pos = -front_len - middle_len - data_len -
sizeof_footer(con);
@@ -2984,10 +2981,10 @@ static void ceph_con_workfn(struct work_struct *work)
static void con_fault(struct ceph_connection *con)
{
dout("fault %p state %lu to peer %s\n",
- con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
+ con, con->state, ceph_pr_addr(&con->peer_addr));
pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
- ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
+ ceph_pr_addr(&con->peer_addr), con->error_msg);
con->error_msg = NULL;
WARN_ON(con->state != CON_STATE_CONNECTING &&
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a53e4fbb6319..895679d3529b 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -76,7 +76,7 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
m->num_mon);
for (i = 0; i < m->num_mon; i++)
dout("monmap_decode mon%d is %s\n", i,
- ceph_pr_addr(&m->mon_inst[i].addr.in_addr));
+ ceph_pr_addr(&m->mon_inst[i].addr));
return m;
bad:
@@ -203,7 +203,7 @@ static void reopen_session(struct ceph_mon_client *monc)
{
if (!monc->hunting)
pr_info("mon%d %s session lost, hunting for new mon\n",
- monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr.in_addr));
+ monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr));
__close_session(monc);
__open_session(monc);
@@ -1178,7 +1178,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
__resend_generic_request(monc);
pr_info("mon%d %s session established\n", monc->cur_mon,
- ceph_pr_addr(&monc->con.peer_addr.in_addr));
+ ceph_pr_addr(&monc->con.peer_addr));
}
out:
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6f739de28918..9a8eca5eda65 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -4926,7 +4926,7 @@ static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
dout("%s %s%llu cookie %llu addr %s\n", __func__,
ENTITY_NAME(item->name), item->cookie,
- ceph_pr_addr(&item->addr.in_addr));
+ ceph_pr_addr(&item->addr));
return 0;
}
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 19aa32fc1802..2d260432b3be 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -54,6 +54,7 @@
* @options: Request options (or NULL if no options)
* @_result: Where to place the returned data (or NULL)
* @_expiry: Where to store the result expiry time (or NULL)
+ * @invalidate: Always invalidate the key after use
*
* The data will be returned in the pointer at *result, if provided, and the
* caller is responsible for freeing it.
@@ -69,7 +70,8 @@
* Returns the size of the result on success, -ve error code otherwise.
*/
int dns_query(const char *type, const char *name, size_t namelen,
- const char *options, char **_result, time64_t *_expiry)
+ const char *options, char **_result, time64_t *_expiry,
+ bool invalidate)
{
struct key *rkey;
struct user_key_payload *upayload;
@@ -157,6 +159,8 @@ int dns_query(const char *type, const char *name, size_t namelen,
ret = len;
put:
up_read(&rkey->sem);
+ if (invalidate)
+ key_invalidate(rkey);
key_put(rkey);
out:
kleave(" = %d", ret);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index ae8c5d7f3bf1..ffde5b187f5d 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -270,6 +270,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
* @gfp: The allocation constraints
* @notify_rx: Where to send notifications instead of socket queue
* @upgrade: Request service upgrade for call
+ * @intr: The call is interruptible
* @debug_id: The debug ID for tracing to be assigned to the call
*
* Allow a kernel service to begin a call on the nominated socket. This just
@@ -287,6 +288,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
gfp_t gfp,
rxrpc_notify_rx_t notify_rx,
bool upgrade,
+ bool intr,
unsigned int debug_id)
{
struct rxrpc_conn_parameters cp;
@@ -311,6 +313,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
memset(&p, 0, sizeof(p));
p.user_call_ID = user_call_ID;
p.tx_total_len = tx_total_len;
+ p.intr = intr;
memset(&cp, 0, sizeof(cp));
cp.local = rx->local;
@@ -443,6 +446,31 @@ void rxrpc_kernel_new_call_notification(
}
EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
+/**
+ * rxrpc_kernel_set_max_life - Set maximum lifespan on a call
+ * @sock: The socket the call is on
+ * @call: The call to configure
+ * @hard_timeout: The maximum lifespan of the call in jiffies
+ *
+ * Set the maximum lifespan of a call. The call will end with ETIME or
+ * ETIMEDOUT if it takes longer than this.
+ */
+void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call,
+ unsigned long hard_timeout)
+{
+ unsigned long now;
+
+ mutex_lock(&call->user_mutex);
+
+ now = jiffies;
+ hard_timeout += now;
+ WRITE_ONCE(call->expect_term_by, hard_timeout);
+ rxrpc_reduce_call_timer(call, hard_timeout, now, rxrpc_timer_set_for_hard);
+
+ mutex_unlock(&call->user_mutex);
+}
+EXPORT_SYMBOL(rxrpc_kernel_set_max_life);
+
/*
* connect an RxRPC socket
* - this just targets it at a specific destination; no actual connection
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 062ca9dc29b8..07fc1dfa4878 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -482,6 +482,7 @@ enum rxrpc_call_flag {
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
+ RXRPC_CALL_IS_INTR, /* The call is interruptible */
};
/*
@@ -711,6 +712,7 @@ struct rxrpc_call_params {
u32 normal; /* Max time since last call packet (msec) */
} timeouts;
u8 nr_timeouts; /* Number of timeouts specified */
+ bool intr; /* The call is interruptible */
};
struct rxrpc_send_params {
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index fe96881a334d..d0ca98d7aef5 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -241,6 +241,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
return call;
}
+ if (p->intr)
+ __set_bit(RXRPC_CALL_IS_INTR, &call->flags);
call->tx_total_len = p->tx_total_len;
trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
here, (const void *)p->user_call_ID);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 83797b3949e2..5cf5595a14d8 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -656,10 +656,14 @@ static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
add_wait_queue_exclusive(&call->waitq, &myself);
for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
+ if (test_bit(RXRPC_CALL_IS_INTR, &call->flags))
+ set_current_state(TASK_INTERRUPTIBLE);
+ else
+ set_current_state(TASK_UNINTERRUPTIBLE);
if (call->call_id)
break;
- if (signal_pending(current)) {
+ if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
+ signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index bec64deb7b0a..45a05d9a27fa 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -80,7 +80,8 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
if (call->state >= RXRPC_CALL_COMPLETE)
return call->error;
- if (timeout == 0 &&
+ if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
+ timeout == 0 &&
tx_win == tx_start && signal_pending(current))
return -EINTR;
@@ -620,6 +621,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
.call.tx_total_len = -1,
.call.user_call_ID = 0,
.call.nr_timeouts = 0,
+ .call.intr = true,
.abort_code = 0,
.command = RXRPC_CMD_SEND_DATA,
.exclusive = false,
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index 95b073ee4b32..4769f4c03e14 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -55,6 +55,7 @@ int snd_hdac_device_init(struct hdac_device *codec, struct hdac_bus *bus,
codec->bus = bus;
codec->addr = addr;
codec->type = HDA_DEV_CORE;
+ mutex_init(&codec->widget_lock);
pm_runtime_set_active(&codec->dev);
pm_runtime_get_noresume(&codec->dev);
atomic_set(&codec->in_pm, 0);
@@ -141,7 +142,9 @@ int snd_hdac_device_register(struct hdac_device *codec)
err = device_add(&codec->dev);
if (err < 0)
return err;
+ mutex_lock(&codec->widget_lock);
err = hda_widget_sysfs_init(codec);
+ mutex_unlock(&codec->widget_lock);
if (err < 0) {
device_del(&codec->dev);
return err;
@@ -158,7 +161,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_register);
void snd_hdac_device_unregister(struct hdac_device *codec)
{
if (device_is_registered(&codec->dev)) {
+ mutex_lock(&codec->widget_lock);
hda_widget_sysfs_exit(codec);
+ mutex_unlock(&codec->widget_lock);
device_del(&codec->dev);
snd_hdac_bus_remove_device(codec->bus, codec);
}
@@ -404,7 +409,9 @@ int snd_hdac_refresh_widgets(struct hdac_device *codec, bool sysfs)
}
if (sysfs) {
+ mutex_lock(&codec->widget_lock);
err = hda_widget_sysfs_reinit(codec, start_nid, nums);
+ mutex_unlock(&codec->widget_lock);
if (err < 0)
return err;
}
diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c
index fb2aa344981e..909d5ef1179c 100644
--- a/sound/hda/hdac_sysfs.c
+++ b/sound/hda/hdac_sysfs.c
@@ -395,6 +395,7 @@ static int widget_tree_create(struct hdac_device *codec)
return 0;
}
+/* call with codec->widget_lock held */
int hda_widget_sysfs_init(struct hdac_device *codec)
{
int err;
@@ -411,11 +412,13 @@ int hda_widget_sysfs_init(struct hdac_device *codec)
return 0;
}
+/* call with codec->widget_lock held */
void hda_widget_sysfs_exit(struct hdac_device *codec)
{
widget_tree_free(codec);
}
+/* call with codec->widget_lock held */
int hda_widget_sysfs_reinit(struct hdac_device *codec,
hda_nid_t start_nid, int num_nodes)
{
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c53ca589c930..f83f21d64dd4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -478,12 +478,45 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
set_eapd(codec, *p, on);
}
+static int find_ext_mic_pin(struct hda_codec *codec);
+
+static void alc_headset_mic_no_shutup(struct hda_codec *codec)
+{
+ const struct hda_pincfg *pin;
+ int mic_pin = find_ext_mic_pin(codec);
+ int i;
+
+ /* don't shut up pins when unloading the driver; otherwise it breaks
+ * the default pin setup at the next load of the driver
+ */
+ if (codec->bus->shutdown)
+ return;
+
+ snd_array_for_each(&codec->init_pins, i, pin) {
+ /* use read here for syncing after issuing each verb */
+ if (pin->nid != mic_pin)
+ snd_hda_codec_read(codec, pin->nid, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+ }
+
+ codec->pins_shutup = 1;
+}
+
static void alc_shutup_pins(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- if (!spec->no_shutup_pins)
- snd_hda_shutup_pins(codec);
+ switch (codec->core.vendor_id) {
+ case 0x10ec0286:
+ case 0x10ec0288:
+ case 0x10ec0298:
+ alc_headset_mic_no_shutup(codec);
+ break;
+ default:
+ if (!spec->no_shutup_pins)
+ snd_hda_shutup_pins(codec);
+ break;
+ }
}
/* generic shutup callback;
@@ -502,7 +535,6 @@ static void alc_eapd_shutup(struct hda_codec *codec)
/* generic EAPD initialization */
static void alc_auto_init_amp(struct hda_codec *codec, int type)
{
- alc_fill_eapd_coef(codec);
alc_auto_setup_eapd(codec, true);
alc_write_gpio(codec);
switch (type) {
@@ -797,10 +829,22 @@ static int alc_build_controls(struct hda_codec *codec)
* Common callbacks
*/
+static void alc_pre_init(struct hda_codec *codec)
+{
+ alc_fill_eapd_coef(codec);
+}
+
+#define is_s4_resume(codec) \
+ ((codec)->core.dev.power.power_state.event == PM_EVENT_RESTORE)
+
static int alc_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
+ /* hibernation resume needs the full chip initialization */
+ if (is_s4_resume(codec))
+ alc_pre_init(codec);
+
if (spec->init_hook)
spec->init_hook(codec);
@@ -1538,6 +1582,8 @@ static int patch_alc880(struct hda_codec *codec)
codec->patch_ops.unsol_event = alc880_unsol_event;
+ alc_pre_init(codec);
+
snd_hda_pick_fixup(codec, alc880_fixup_models, alc880_fixup_tbl,
alc880_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
@@ -1789,6 +1835,8 @@ static int patch_alc260(struct hda_codec *codec)
spec->shutup = alc_eapd_shutup;
+ alc_pre_init(codec);
+
snd_hda_pick_fixup(codec, alc260_fixup_models, alc260_fixup_tbl,
alc260_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
@@ -2492,6 +2540,8 @@ static int patch_alc882(struct hda_codec *codec)
break;
}
+ alc_pre_init(codec);
+
snd_hda_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
alc882_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
@@ -2666,6 +2716,8 @@ static int patch_alc262(struct hda_codec *codec)
#endif
alc_fix_pll_init(codec, 0x20, 0x0a, 10);
+ alc_pre_init(codec);
+
snd_hda_pick_fixup(codec, alc262_fixup_models, alc262_fixup_tbl,
alc262_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
@@ -2810,6 +2862,8 @@ static int patch_alc268(struct hda_codec *codec)
spec->shutup = alc_eapd_shutup;
+ alc_pre_init(codec);
+
snd_hda_pick_fixup(codec, alc268_fixup_models, alc268_fixup_tbl, alc268_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
@@ -2924,27 +2978,6 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
return alc_parse_auto_config(codec, alc269_ignore, ssids);
}
-static int find_ext_mic_pin(struct hda_codec *codec);
-
-static void alc286_shutup(struct hda_codec *codec)
-{
- const struct hda_pincfg *pin;
- int i;
- int mic_pin = find_ext_mic_pin(codec);
- /* don't shut up pins when unloading the driver; otherwise it breaks
- * the default pin setup at the next load of the driver
- */
- if (codec->bus->shutdown)
- return;
- snd_array_for_each(&codec->init_pins, i, pin) {
- /* use read here for syncing after issuing each verb */
- if (pin->nid != mic_pin)
- snd_hda_codec_read(codec, pin->nid, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
- }
- codec->pins_shutup = 1;
-}
-
static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
{
alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0);
@@ -6964,7 +6997,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8550, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8551, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -7007,7 +7042,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
- SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
@@ -7736,7 +7771,6 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0286:
case 0x10ec0288:
spec->codec_variant = ALC269_TYPE_ALC286;
- spec->shutup = alc286_shutup;
break;
case 0x10ec0298:
spec->codec_variant = ALC269_TYPE_ALC298;
@@ -7805,6 +7839,8 @@ static int patch_alc269(struct hda_codec *codec)
spec->init_hook = alc5505_dsp_init;
}
+ alc_pre_init(codec);
+
snd_hda_pick_fixup(codec, alc269_fixup_models,
alc269_fixup_tbl, alc269_fixups);
snd_hda_pick_pin_fixup(codec, alc269_pin_fixup_tbl, alc269_fixups);
@@ -7947,6 +7983,8 @@ static int patch_alc861(struct hda_codec *codec)
spec->power_hook = alc_power_eapd;
#endif
+ alc_pre_init(codec);
+
snd_hda_pick_fixup(codec, NULL, alc861_fixup_tbl, alc861_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
@@ -8044,6 +8082,8 @@ static int patch_alc861vd(struct hda_codec *codec)
spec->shutup = alc_eapd_shutup;
+ alc_pre_init(codec);
+
snd_hda_pick_fixup(codec, NULL, alc861vd_fixup_tbl, alc861vd_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
@@ -8779,6 +8819,8 @@ static int patch_alc662(struct hda_codec *codec)
break;
}
+ alc_pre_init(codec);
+
snd_hda_pick_fixup(codec, alc662_fixup_models,
alc662_fixup_tbl, alc662_fixups);
snd_hda_pick_pin_fixup(codec, alc662_pin_fixup_tbl, alc662_fixups);
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 156aa7c00787..2bbb92ed96c8 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -26,6 +26,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/time.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 16511d97e8dc..09652eabe769 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -152,7 +152,8 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 pcc[16]; /* with MSA4 */
__u8 ppno[16]; /* with MSA5 */
__u8 kma[16]; /* with MSA8 */
- __u8 reserved[1808];
+ __u8 kdsa[16]; /* with MSA9 */
+ __u8 reserved[1792];
};
/* kvm attributes for crypto */
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 8df526c80b65..4dd11a554b9b 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -306,7 +306,7 @@ ignore it:
- To skip validation of a file, add
- OBJECT_FILES_NON_STANDARD_filename.o := n
+ OBJECT_FILES_NON_STANDARD_filename.o := y
to the Makefile.
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index ac743a1d53ab..7325d89ccad9 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -28,6 +28,8 @@
#include <linux/hashtable.h>
#include <linux/kernel.h>
+#define FAKE_JUMP_OFFSET -1
+
struct alternative {
struct list_head list;
struct instruction *insn;
@@ -568,7 +570,7 @@ static int add_jump_destinations(struct objtool_file *file)
insn->type != INSN_JUMP_UNCONDITIONAL)
continue;
- if (insn->ignore)
+ if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
continue;
rela = find_rela_by_dest_range(insn->sec, insn->offset,
@@ -745,10 +747,10 @@ static int handle_group_alt(struct objtool_file *file,
clear_insn_state(&fake_jump->state);
fake_jump->sec = special_alt->new_sec;
- fake_jump->offset = -1;
+ fake_jump->offset = FAKE_JUMP_OFFSET;
fake_jump->type = INSN_JUMP_UNCONDITIONAL;
fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
- fake_jump->ignore = true;
+ fake_jump->func = orig_insn->func;
}
if (!special_alt->new_len) {
@@ -1957,7 +1959,8 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
return 1;
}
- func = insn->func ? insn->func->pfunc : NULL;
+ if (insn->func)
+ func = insn->func->pfunc;
if (func && insn->ignore) {
WARN_FUNC("BUG: why am I validating an ignored function?",
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
index 91750352459d..8059ce834247 100644
--- a/tools/testing/selftests/.gitignore
+++ b/tools/testing/selftests/.gitignore
@@ -1,4 +1,3 @@
-kselftest
gpiogpio-event-mon
gpiogpio-hammer
gpioinclude/
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index f2ebf8cf4686..9781ca79794a 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -71,6 +71,9 @@ override LDFLAGS =
override MAKEFLAGS =
endif
+# Append kselftest to KBUILD_OUTPUT to avoid cluttering
+# KBUILD_OUTPUT with selftest objects and headers installed
+# by selftests Makefile or lib.mk.
ifneq ($(KBUILD_SRC),)
override LDFLAGS =
endif
@@ -79,19 +82,13 @@ ifneq ($(O),)
BUILD := $(O)
else
ifneq ($(KBUILD_OUTPUT),)
- BUILD := $(KBUILD_OUTPUT)
+ BUILD := $(KBUILD_OUTPUT)/kselftest
else
BUILD := $(shell pwd)
DEFAULT_INSTALL_HDR_PATH := 1
endif
endif
-# KSFT_TAP_LEVEL is used from KSFT framework to prevent nested TAP header
-# printing from tests. Applicable to run_tests case where run_tests adds
-# TAP header prior running tests and when a test program invokes another
-# with system() call. Export it here to cover override RUN_TESTS defines.
-export KSFT_TAP_LEVEL=`echo 1`
-
# Prepare for headers install
top_srcdir ?= ../../..
include $(top_srcdir)/scripts/subarch.include
@@ -169,14 +166,22 @@ clean_hotplug:
run_pstore_crash:
make -C pstore run_crash
-INSTALL_PATH ?= install
+# Use $BUILD as the default install root. $BUILD points to the
+# right output location for the following cases:
+# 1. output_dir=kernel_src
+# 2. a separate output directory is specified using O= KBUILD_OUTPUT
+# 3. a separate output directory is specified using KBUILD_OUTPUT
+#
+INSTALL_PATH ?= $(BUILD)/install
INSTALL_PATH := $(abspath $(INSTALL_PATH))
ALL_SCRIPT := $(INSTALL_PATH)/run_kselftest.sh
-install:
+install: all
ifdef INSTALL_PATH
@# Ask all targets to install their files
- mkdir -p $(INSTALL_PATH)
+ mkdir -p $(INSTALL_PATH)/kselftest
+ install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
+ install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
make OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
@@ -186,24 +191,20 @@ ifdef INSTALL_PATH
echo "#!/bin/sh" > $(ALL_SCRIPT)
echo "BASE_DIR=\$$(realpath \$$(dirname \$$0))" >> $(ALL_SCRIPT)
echo "cd \$$BASE_DIR" >> $(ALL_SCRIPT)
+ echo ". ./kselftest/runner.sh" >> $(ALL_SCRIPT)
echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
echo "if [ \"\$$1\" = \"--summary\" ]; then" >> $(ALL_SCRIPT)
- echo " OUTPUT=\$$BASE_DIR/output.log" >> $(ALL_SCRIPT)
- echo " cat /dev/null > \$$OUTPUT" >> $(ALL_SCRIPT)
- echo "else" >> $(ALL_SCRIPT)
- echo " OUTPUT=/dev/stdout" >> $(ALL_SCRIPT)
+ echo " logfile=\$$BASE_DIR/output.log" >> $(ALL_SCRIPT)
+ echo " cat /dev/null > \$$logfile" >> $(ALL_SCRIPT)
echo "fi" >> $(ALL_SCRIPT)
- echo "export KSFT_TAP_LEVEL=1" >> $(ALL_SCRIPT)
- echo "export skip=4" >> $(ALL_SCRIPT)
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- echo "echo ; echo TAP version 13" >> $(ALL_SCRIPT); \
- echo "echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
- echo "echo ========================================" >> $(ALL_SCRIPT); \
echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
echo "cd $$TARGET" >> $(ALL_SCRIPT); \
+ echo -n "run_many" >> $(ALL_SCRIPT); \
make -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
+ echo "" >> $(ALL_SCRIPT); \
echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
done;
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test.c b/tools/testing/selftests/breakpoints/breakpoint_test.c
index 901b85ea6a59..8f3655e59020 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test.c
@@ -21,6 +21,8 @@
#include "../kselftest.h"
+#define COUNT_ISN_BPS 4
+#define COUNT_WPS 4
/* Breakpoint access modes */
enum {
@@ -220,7 +222,7 @@ static void trigger_tests(void)
if (!local && !global)
continue;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < COUNT_ISN_BPS; i++) {
dummy_funcs[i]();
check_trapped();
}
@@ -292,7 +294,7 @@ static void launch_instruction_breakpoints(char *buf, int local, int global)
{
int i;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < COUNT_ISN_BPS; i++) {
set_breakpoint_addr(dummy_funcs[i], i);
toggle_breakpoint(i, BP_X, 1, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
@@ -314,7 +316,7 @@ static void launch_watchpoints(char *buf, int mode, int len,
else
mode_str = "read";
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < COUNT_WPS; i++) {
set_breakpoint_addr(&dummy_var[i], i);
toggle_breakpoint(i, mode, len, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
@@ -330,8 +332,15 @@ static void launch_watchpoints(char *buf, int mode, int len,
static void launch_tests(void)
{
char buf[1024];
+ unsigned int tests = 0;
int len, local, global, i;
+ tests += 3 * COUNT_ISN_BPS;
+ tests += sizeof(long) / 2 * 3 * COUNT_WPS;
+ tests += sizeof(long) / 2 * 3 * COUNT_WPS;
+ tests += 2;
+ ksft_set_plan(tests);
+
/* Instruction breakpoints */
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
index 2d95e5adde72..ab59d814341a 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
@@ -118,7 +118,7 @@ static bool set_watchpoint(pid_t pid, int size, int wp)
return false;
}
-static bool run_test(int wr_size, int wp_size, int wr, int wp)
+static bool arun_test(int wr_size, int wp_size, int wr, int wp)
{
int status;
siginfo_t siginfo;
@@ -214,6 +214,7 @@ int main(int argc, char **argv)
bool result;
ksft_print_header();
+ ksft_set_plan(213);
act.sa_handler = sigalrm;
sigemptyset(&act.sa_mask);
diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
index f82dcc1f8841..cf868b5e00f7 100644
--- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
@@ -173,6 +173,7 @@ int main(int argc, char **argv)
int opt;
bool do_suspend = true;
bool succeeded = true;
+ unsigned int tests = 0;
cpu_set_t available_cpus;
int err;
int cpu;
@@ -191,6 +192,13 @@ int main(int argc, char **argv)
}
}
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (!CPU_ISSET(cpu, &available_cpus))
+ continue;
+ tests++;
+ }
+ ksft_set_plan(tests);
+
if (do_suspend)
suspend();
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
index 3ab39a61b95b..df0ef02b4036 100644
--- a/tools/testing/selftests/capabilities/test_execve.c
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -430,8 +430,6 @@ int main(int argc, char **argv)
{
char *tmp1, *tmp2, *our_path;
- ksft_print_header();
-
/* Find our path */
tmp1 = strdup(argv[0]);
if (!tmp1)
@@ -445,6 +443,8 @@ int main(int argc, char **argv)
mpid = getpid();
if (fork_wait()) {
+ ksft_print_header();
+ ksft_set_plan(12);
ksft_print_msg("[RUN]\t+++ Tests with uid == 0 +++\n");
return do_tests(0, our_path);
}
@@ -452,6 +452,8 @@ int main(int argc, char **argv)
ksft_print_msg("==================================================\n");
if (fork_wait()) {
+ ksft_print_header();
+ ksft_set_plan(9);
ksft_print_msg("[RUN]\t+++ Tests with uid != 0 +++\n");
return do_tests(1, our_path);
}
diff --git a/tools/testing/selftests/drivers/.gitignore b/tools/testing/selftests/drivers/.gitignore
new file mode 100644
index 000000000000..f6aebcc27b76
--- /dev/null
+++ b/tools/testing/selftests/drivers/.gitignore
@@ -0,0 +1 @@
+/dma-buf/udmabuf
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
index 54cd5c414e82..8d20957f7586 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
@@ -395,6 +395,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Test requeue functionality\n", basename(argv[0]));
ksft_print_msg(
"\tArguments: broadcast=%d locked=%d owner=%d timeout=%ldns\n",
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
index 08187a16507f..742624c59ba7 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
@@ -79,6 +79,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Detect mismatched requeue_pi operations\n",
basename(argv[0]));
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index f0542a344d95..a0f5934707ff 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -144,6 +144,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Test signal handling during requeue_pi\n",
basename(argv[0]));
ksft_print_msg("\tArguments: <none>\n");
diff --git a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
index 6216de828093..a458d42ff86e 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
@@ -98,6 +98,7 @@ int main(int argc, char **argv)
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg(
"%s: Test the futex value of private file mappings in FUTEX_WAIT\n",
basename(argv[0]));
diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
index bab3dfe1787f..04b95478059c 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
@@ -69,6 +69,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Block on a futex and wait for timeout\n",
basename(argv[0]));
ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
diff --git a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
index 26975322545b..3a1d12a14921 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
@@ -100,6 +100,7 @@ int main(int argc, char **argv)
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Test the uninitialized futex value in FUTEX_WAIT\n",
basename(argv[0]));
diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
index da15a63269b4..a34a6bbc30ce 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
@@ -65,6 +65,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n",
basename(argv[0]));
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index 47e1d995c182..ec15c4f6af55 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -33,6 +33,7 @@ struct ksft_count {
};
static struct ksft_count ksft_cnt;
+static unsigned int ksft_plan;
static inline int ksft_test_num(void)
{
@@ -61,13 +62,21 @@ static inline void ksft_print_header(void)
printf("TAP version 13\n");
}
+static inline void ksft_set_plan(unsigned int plan)
+{
+ ksft_plan = plan;
+ printf("1..%d\n", ksft_plan);
+}
+
static inline void ksft_print_cnts(void)
{
- printf("Pass %d Fail %d Xfail %d Xpass %d Skip %d Error %d\n",
+ if (ksft_plan != ksft_test_num())
+ printf("# Planned tests != run tests (%u != %u)\n",
+ ksft_plan, ksft_test_num());
+ printf("# Pass %d Fail %d Xfail %d Xpass %d Skip %d Error %d\n",
ksft_cnt.ksft_pass, ksft_cnt.ksft_fail,
ksft_cnt.ksft_xfail, ksft_cnt.ksft_xpass,
ksft_cnt.ksft_xskip, ksft_cnt.ksft_error);
- printf("1..%d\n", ksft_test_num());
}
static inline void ksft_print_msg(const char *msg, ...)
@@ -111,7 +120,7 @@ static inline void ksft_test_result_skip(const char *msg, ...)
ksft_cnt.ksft_xskip++;
va_start(args, msg);
- printf("ok %d # skip ", ksft_test_num());
+ printf("not ok %d # SKIP ", ksft_test_num());
vprintf(msg, args);
va_end(args);
}
@@ -172,7 +181,7 @@ static inline int ksft_exit_skip(const char *msg, ...)
va_list args;
va_start(args, msg);
- printf("1..%d # Skipped: ", ksft_test_num());
+ printf("not ok %d # SKIP ", 1 + ksft_test_num());
vprintf(msg, args);
va_end(args);
} else {
diff --git a/tools/testing/selftests/kselftest/prefix.pl b/tools/testing/selftests/kselftest/prefix.pl
new file mode 100755
index 000000000000..ec7e48118183
--- /dev/null
+++ b/tools/testing/selftests/kselftest/prefix.pl
@@ -0,0 +1,23 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+# Prefix all lines with "# ", unbuffered. Command being piped in may need
+# to have unbuffering forced with "stdbuf -i0 -o0 -e0 $cmd".
+use strict;
+
+binmode STDIN;
+binmode STDOUT;
+
+STDOUT->autoflush(1);
+
+my $needed = 1;
+while (1) {
+ my $char;
+ my $bytes = sysread(STDIN, $char, 1);
+ exit 0 if ($bytes == 0);
+ if ($needed) {
+ print "# ";
+ $needed = 0;
+ }
+ print $char;
+ $needed = 1 if ($char eq "\n");
+}
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
new file mode 100644
index 000000000000..eff3ee303d0d
--- /dev/null
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -0,0 +1,86 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Runs a set of tests in a given subdirectory.
+export skip_rc=4
+export logfile=/dev/stdout
+export per_test_logging=
+
+# There isn't a shell-agnostic way to find the path of a sourced file,
+# so we must rely on BASE_DIR being set to find other tools.
+if [ -z "$BASE_DIR" ]; then
+ echo "Error: BASE_DIR must be set before sourcing." >&2
+ exit 1
+fi
+
+# If Perl is unavailable, we must fall back to line-at-a-time prefixing
+# with sed instead of unbuffered output.
+tap_prefix()
+{
+ if [ ! -x /usr/bin/perl ]; then
+ sed -e 's/^/# /'
+ else
+ "$BASE_DIR"/kselftest/prefix.pl
+ fi
+}
+
+# If stdbuf is unavailable, we must fall back to line-at-a-time piping.
+tap_unbuffer()
+{
+ if ! which stdbuf >/dev/null ; then
+ "$@"
+ else
+ stdbuf -i0 -o0 -e0 "$@"
+ fi
+}
+
+run_one()
+{
+ DIR="$1"
+ TEST="$2"
+ NUM="$3"
+
+ BASENAME_TEST=$(basename $TEST)
+
+ TEST_HDR_MSG="selftests: $DIR: $BASENAME_TEST"
+ echo "# $TEST_HDR_MSG"
+ if [ ! -x "$TEST" ]; then
+ echo -n "# Warning: file $TEST is "
+ if [ ! -e "$TEST" ]; then
+ echo "missing!"
+ else
+ echo "not executable, correct this."
+ fi
+ echo "not ok $test_num $TEST_HDR_MSG"
+ else
+ cd `dirname $TEST` > /dev/null
+ (((((tap_unbuffer ./$BASENAME_TEST 2>&1; echo $? >&3) |
+ tap_prefix >&4) 3>&1) |
+ (read xs; exit $xs)) 4>>"$logfile" &&
+ echo "ok $test_num $TEST_HDR_MSG") ||
+ (if [ $? -eq $skip_rc ]; then \
+ echo "not ok $test_num $TEST_HDR_MSG # SKIP"
+ else
+ echo "not ok $test_num $TEST_HDR_MSG"
+ fi)
+ cd - >/dev/null
+ fi
+}
+
+run_many()
+{
+ echo "TAP version 13"
+ DIR=$(basename "$PWD")
+ test_num=0
+ total=$(echo "$@" | wc -w)
+ echo "1..$total"
+ for TEST in "$@"; do
+ BASENAME_TEST=$(basename $TEST)
+ test_num=$(( test_num + 1 ))
+ if [ -n "$per_test_logging" ]; then
+ logfile="/tmp/$BASENAME_TEST"
+ cat /dev/null > "$logfile"
+ fi
+ run_one "$DIR" "$TEST" "$test_num"
+ done
+}
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 2689d1ea6d7a..df1bf9230a74 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,9 +1,14 @@
/x86_64/cr4_cpuid_sync_test
/x86_64/evmcs_test
+/x86_64/hyperv_cpuid
+/x86_64/kvm_create_max_vcpus
/x86_64/platform_info_test
/x86_64/set_sregs_test
+/x86_64/smm_test
+/x86_64/state_test
/x86_64/sync_regs_test
/x86_64/vmx_close_while_nested_test
+/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
-/x86_64/state_test
+/clear_dirty_log_test
/dirty_log_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index f8588cca2bef..79c524395ebe 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -20,6 +20,8 @@ TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
+TEST_GEN_PROGS_x86_64 += x86_64/kvm_create_max_vcpus
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 93f99c6b7d79..f50a15c38f9b 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -314,7 +314,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
#ifdef USE_CLEAR_DIRTY_LOG
struct kvm_enable_cap cap = {};
- cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT;
+ cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
cap.args[0] = 1;
vm_enable_cap(vm, &cap);
#endif
@@ -430,7 +430,7 @@ int main(int argc, char *argv[])
int opt, i;
#ifdef USE_CLEAR_DIRTY_LOG
- if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT)) {
+ if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2)) {
fprintf(stderr, "KVM_CLEAR_DIRTY_LOG not available, skipping tests\n");
exit(KSFT_SKIP);
}
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 07b71ad9734a..8c6b9619797d 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -118,6 +118,10 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_events *events);
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_events *events);
+void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_nested_state *state);
+int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_nested_state *state, bool ignore_error);
const char *exit_reason_str(unsigned int exit_reason);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 4ca96b228e46..e9113857f44e 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1250,6 +1250,38 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
ret, errno);
}
+void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_nested_state *state)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ int ret;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
+ TEST_ASSERT(ret == 0,
+ "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
+ ret, errno);
+}
+
+int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_nested_state *state, bool ignore_error)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ int ret;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
+ if (!ignore_error) {
+ TEST_ASSERT(ret == 0,
+ "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
+ ret, errno);
+ }
+
+ return ret;
+}
+
/*
* VM VCPU System Regs Get
*
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c
new file mode 100644
index 000000000000..50e92996f918
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c
@@ -0,0 +1,70 @@
+/*
+ * kvm_create_max_vcpus
+ *
+ * Copyright (C) 2019, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Test for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_VCPU_ID.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+#include "asm/kvm.h"
+#include "linux/kvm.h"
+
+void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
+{
+ struct kvm_vm *vm;
+ int i;
+
+ printf("Testing creating %d vCPUs, with IDs %d...%d.\n",
+ num_vcpus, first_vcpu_id, first_vcpu_id + num_vcpus - 1);
+
+ vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+ for (i = 0; i < num_vcpus; i++) {
+ int vcpu_id = first_vcpu_id + i;
+
+ /* This asserts that the vCPU was created. */
+ vm_vcpu_add(vm, vcpu_id, 0, 0);
+ }
+
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
+ int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+
+ printf("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
+ printf("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
+
+ /*
+ * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
+ * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
+ * in this case.
+ */
+ if (!kvm_max_vcpu_id)
+ kvm_max_vcpu_id = kvm_max_vcpus;
+
+ TEST_ASSERT(kvm_max_vcpu_id >= kvm_max_vcpus,
+ "KVM_MAX_VCPU_ID (%d) must be at least as large as KVM_MAX_VCPUS (%d).",
+ kvm_max_vcpu_id, kvm_max_vcpus);
+
+ test_vcpu_creation(0, kvm_max_vcpus);
+
+ if (kvm_max_vcpu_id > kvm_max_vcpus)
+ test_vcpu_creation(
+ kvm_max_vcpu_id - kvm_max_vcpus, kvm_max_vcpus);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
new file mode 100644
index 000000000000..61a2163cf9f1
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -0,0 +1,280 @@
+/*
+ * vmx_set_nested_state_test
+ *
+ * Copyright (C) 2019, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * This test verifies the integrity of calling the ioctl KVM_SET_NESTED_STATE.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <errno.h>
+#include <linux/kvm.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+/*
+ * Mirror of VMCS12_REVISION in arch/x86/kvm/vmx/vmcs12.h. If that value
+ * changes this should be updated.
+ */
+#define VMCS12_REVISION 0x11e57ed0
+#define VCPU_ID 5
+
+void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
+{
+ volatile struct kvm_run *run;
+
+ vcpu_nested_state_set(vm, VCPU_ID, state, false);
+ run = vcpu_state(vm, VCPU_ID);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+}
+
+void test_nested_state_expect_errno(struct kvm_vm *vm,
+ struct kvm_nested_state *state,
+ int expected_errno)
+{
+ volatile struct kvm_run *run;
+ int rv;
+
+ rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
+ TEST_ASSERT(rv == -1 && errno == expected_errno,
+ "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
+ strerror(expected_errno), expected_errno, rv, strerror(errno),
+ errno);
+ run = vcpu_state(vm, VCPU_ID);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+}
+
+void test_nested_state_expect_einval(struct kvm_vm *vm,
+ struct kvm_nested_state *state)
+{
+ test_nested_state_expect_errno(vm, state, EINVAL);
+}
+
+void test_nested_state_expect_efault(struct kvm_vm *vm,
+ struct kvm_nested_state *state)
+{
+ test_nested_state_expect_errno(vm, state, EFAULT);
+}
+
+void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
+ u32 vmcs12_revision)
+{
+ /* Set revision_id in vmcs12 to vmcs12_revision. */
+ *(u32 *)(state->data) = vmcs12_revision;
+}
+
+void set_default_state(struct kvm_nested_state *state)
+{
+ memset(state, 0, sizeof(*state));
+ state->flags = KVM_STATE_NESTED_RUN_PENDING |
+ KVM_STATE_NESTED_GUEST_MODE;
+ state->format = 0;
+ state->size = sizeof(*state);
+}
+
+void set_default_vmx_state(struct kvm_nested_state *state, int size)
+{
+ memset(state, 0, size);
+ state->flags = KVM_STATE_NESTED_GUEST_MODE |
+ KVM_STATE_NESTED_RUN_PENDING |
+ KVM_STATE_NESTED_EVMCS;
+ state->format = 0;
+ state->size = size;
+ state->vmx.vmxon_pa = 0x1000;
+ state->vmx.vmcs_pa = 0x2000;
+ state->vmx.smm.flags = 0;
+ set_revision_id_for_vmcs12(state, VMCS12_REVISION);
+}
+
+void test_vmx_nested_state(struct kvm_vm *vm)
+{
+ /* Add a page for VMCS12. */
+ const int state_sz = sizeof(struct kvm_nested_state) + getpagesize();
+ struct kvm_nested_state *state =
+ (struct kvm_nested_state *)malloc(state_sz);
+
+ /* The format must be set to 0. 0 for VMX, 1 for SVM. */
+ set_default_vmx_state(state, state_sz);
+ state->format = 1;
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * We cannot virtualize anything if the guest does not have VMX
+ * enabled.
+ */
+ set_default_vmx_state(state, state_sz);
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * We cannot virtualize anything if the guest does not have VMX
+ * enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa
+ * is set to -1ull.
+ */
+ set_default_vmx_state(state, state_sz);
+ state->vmx.vmxon_pa = -1ull;
+ test_nested_state(vm, state);
+
+ /* Enable VMX in the guest CPUID. */
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
+ set_default_vmx_state(state, state_sz);
+ state->vmx.vmxon_pa = -1ull;
+ state->vmx.smm.flags = 1;
+ test_nested_state_expect_einval(vm, state);
+
+ /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
+ set_default_vmx_state(state, state_sz);
+ state->vmx.vmxon_pa = -1ull;
+ state->vmx.vmcs_pa = 0;
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
+ * setting the nested state.
+ */
+ set_default_vmx_state(state, state_sz);
+ state->vmx.vmxon_pa = -1ull;
+ state->vmx.vmcs_pa = -1ull;
+ test_nested_state(vm, state);
+
+ /* It is invalid to have vmxon_pa set to a non-page aligned address. */
+ set_default_vmx_state(state, state_sz);
+ state->vmx.vmxon_pa = 1;
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * It is invalid to have KVM_STATE_NESTED_SMM_GUEST_MODE and
+ * KVM_STATE_NESTED_GUEST_MODE set together.
+ */
+ set_default_vmx_state(state, state_sz);
+ state->flags = KVM_STATE_NESTED_GUEST_MODE |
+ KVM_STATE_NESTED_RUN_PENDING;
+ state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * It is invalid to have any of the SMM flags set besides:
+ * KVM_STATE_NESTED_SMM_GUEST_MODE
+ * KVM_STATE_NESTED_SMM_VMXON
+ */
+ set_default_vmx_state(state, state_sz);
+ state->vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
+ KVM_STATE_NESTED_SMM_VMXON);
+ test_nested_state_expect_einval(vm, state);
+
+ /* Outside SMM, SMM flags must be zero. */
+ set_default_vmx_state(state, state_sz);
+ state->flags = 0;
+ state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
+ test_nested_state_expect_einval(vm, state);
+
+ /* Size must be large enough to fit kvm_nested_state and vmcs12. */
+ set_default_vmx_state(state, state_sz);
+ state->size = sizeof(*state);
+ test_nested_state(vm, state);
+
+ /* vmxon_pa cannot be the same address as vmcs_pa. */
+ set_default_vmx_state(state, state_sz);
+ state->vmx.vmxon_pa = 0;
+ state->vmx.vmcs_pa = 0;
+ test_nested_state_expect_einval(vm, state);
+
+ /* The revision id for vmcs12 must be VMCS12_REVISION. */
+ set_default_vmx_state(state, state_sz);
+ set_revision_id_for_vmcs12(state, 0);
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * Test that if we leave nesting the state reflects that when we get
+ * it again.
+ */
+ set_default_vmx_state(state, state_sz);
+ state->vmx.vmxon_pa = -1ull;
+ state->vmx.vmcs_pa = -1ull;
+ state->flags = 0;
+ test_nested_state(vm, state);
+ vcpu_nested_state_get(vm, VCPU_ID, state);
+ TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
+ "Size must be between %d and %d. The size returned was %d.",
+ sizeof(*state), state_sz, state->size);
+ TEST_ASSERT(state->vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
+ TEST_ASSERT(state->vmx.vmcs_pa == -1ull, "vmcs_pa must be -1ull.");
+
+ free(state);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct kvm_nested_state state;
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+ if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+ printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ /*
+ * AMD currently does not implement set_nested_state, so for now we
+ * just early out.
+ */
+ if (!(entry->ecx & CPUID_VMX)) {
+ fprintf(stderr, "nested VMX not enabled, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vm = vm_create_default(VCPU_ID, 0, 0);
+
+ /* Passing a NULL kvm_nested_state causes a EFAULT. */
+ test_nested_state_expect_efault(vm, NULL);
+
+ /* 'size' cannot be smaller than sizeof(kvm_nested_state). */
+ set_default_state(&state);
+ state.size = 0;
+ test_nested_state_expect_einval(vm, &state);
+
+ /*
+ * Setting the flags 0xf fails the flags check. The only flags that
+ * can be used are:
+ * KVM_STATE_NESTED_GUEST_MODE
+ * KVM_STATE_NESTED_RUN_PENDING
+ * KVM_STATE_NESTED_EVMCS
+ */
+ set_default_state(&state);
+ state.flags = 0xf;
+ test_nested_state_expect_einval(vm, &state);
+
+ /*
+ * If KVM_STATE_NESTED_RUN_PENDING is set then
+ * KVM_STATE_NESTED_GUEST_MODE has to be set as well.
+ */
+ set_default_state(&state);
+ state.flags = KVM_STATE_NESTED_RUN_PENDING;
+ test_nested_state_expect_einval(vm, &state);
+
+ /*
+ * TODO: When SVM support is added for KVM_SET_NESTED_STATE
+ * add tests here to support it like VMX.
+ */
+ if (entry->ecx & CPUID_VMX)
+ test_vmx_nested_state(vm);
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 5979fdc4f36c..077337195783 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -3,17 +3,12 @@
CC := $(CROSS_COMPILE)gcc
ifeq (0,$(MAKELEVEL))
- ifneq ($(O),)
- OUTPUT := $(O)
- else
- ifneq ($(KBUILD_OUTPUT),)
- OUTPUT := $(KBUILD_OUTPUT)
- else
- OUTPUT := $(shell pwd)
- DEFAULT_INSTALL_HDR_PATH := 1
- endif
+ ifeq ($(OUTPUT),)
+ OUTPUT := $(shell pwd)
+ DEFAULT_INSTALL_HDR_PATH := 1
endif
endif
+selfdir = $(realpath $(dir $(filter %/lib.mk,$(MAKEFILE_LIST))))
# The following are built by lib.mk common compile rules.
# TEST_CUSTOM_PROGS should be used by tests that require
@@ -65,44 +60,13 @@ all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
endif
.ONESHELL:
-define RUN_TEST_PRINT_RESULT
- TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \
- echo $$TEST_HDR_MSG; \
- echo "========================================"; \
- if [ ! -x $$TEST ]; then \
- echo "$$TEST_HDR_MSG: Warning: file $$BASENAME_TEST is not executable, correct this.";\
- echo "not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]"; \
- else \
- cd `dirname $$TEST` > /dev/null; \
- if [ "X$(summary)" != "X" ]; then \
- (./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && \
- echo "ok 1..$$test_num $$TEST_HDR_MSG [PASS]") || \
- (if [ $$? -eq $$skip ]; then \
- echo "not ok 1..$$test_num $$TEST_HDR_MSG [SKIP]"; \
- else echo "not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]"; \
- fi;) \
- else \
- (./$$BASENAME_TEST && \
- echo "ok 1..$$test_num $$TEST_HDR_MSG [PASS]") || \
- (if [ $$? -eq $$skip ]; then \
- echo "not ok 1..$$test_num $$TEST_HDR_MSG [SKIP]"; \
- else echo "not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]"; \
- fi;) \
- fi; \
- cd - > /dev/null; \
- fi;
-endef
-
define RUN_TESTS
- @export KSFT_TAP_LEVEL=`echo 1`; \
- test_num=`echo 0`; \
- skip=`echo 4`; \
- echo "TAP version 13"; \
- for TEST in $(1); do \
- BASENAME_TEST=`basename $$TEST`; \
- test_num=`echo $$test_num+1 | bc`; \
- $(call RUN_TEST_PRINT_RESULT,$(TEST),$(BASENAME_TEST),$(test_num),$(skip)) \
- done;
+ @BASE_DIR="$(selfdir)"; \
+ . $(selfdir)/kselftest/runner.sh; \
+ if [ "X$(summary)" != "X" ]; then \
+ per_test_logging=1; \
+ fi; \
+ run_many $(1)
endef
run_tests: all
@@ -139,24 +103,12 @@ else
$(error Error: set INSTALL_PATH to use install)
endif
-define EMIT_TESTS
- @test_num=`echo 0`; \
+emit_tests:
for TEST in $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS); do \
BASENAME_TEST=`basename $$TEST`; \
- test_num=`echo $$test_num+1 | bc`; \
- TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \
- echo "echo $$TEST_HDR_MSG"; \
- if [ ! -x $$TEST ]; then \
- echo "echo \"$$TEST_HDR_MSG: Warning: file $$BASENAME_TEST is not executable, correct this.\""; \
- echo "echo \"not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]\""; \
- else
- echo "(./$$BASENAME_TEST >> \$$OUTPUT 2>&1 && echo \"ok 1..$$test_num $$TEST_HDR_MSG [PASS]\") || (if [ \$$? -eq \$$skip ]; then echo \"not ok 1..$$test_num $$TEST_HDR_MSG [SKIP]\"; else echo \"not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]\"; fi;)"; \
- fi; \
- done;
-endef
-
-emit_tests:
- $(EMIT_TESTS)
+ echo " \\"; \
+ echo -n " \"$$BASENAME_TEST\""; \
+ done; \
# define if isn't already. It is undefined in make O= case.
ifeq ($(RM),)
diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c
index 6793f8ecc8e7..70b4ddbf126b 100644
--- a/tools/testing/selftests/membarrier/membarrier_test.c
+++ b/tools/testing/selftests/membarrier/membarrier_test.c
@@ -304,6 +304,7 @@ static int test_membarrier_query(void)
int main(int argc, char **argv)
{
ksft_print_header();
+ ksft_set_plan(13);
test_membarrier_query();
test_membarrier();
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
new file mode 100644
index 000000000000..822a1e63d045
--- /dev/null
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -0,0 +1 @@
+pidfd_test
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index d59378a93782..5bae1792e3d6 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -371,6 +371,7 @@ static int test_pidfd_send_signal_syscall_support(void)
int main(int argc, char **argv)
{
ksft_print_header();
+ ksft_set_plan(4);
test_pidfd_send_signal_syscall_support();
test_pidfd_send_signal_simple_success();
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
index c30c52e1d0d2..d6469535630a 100644
--- a/tools/testing/selftests/rseq/Makefile
+++ b/tools/testing/selftests/rseq/Makefile
@@ -1,5 +1,11 @@
# SPDX-License-Identifier: GPL-2.0+ OR MIT
-CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./
+
+ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+CLANG_FLAGS += -no-integrated-as
+endif
+
+CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./ \
+ $(CLANG_FLAGS)
LDLIBS += -lpthread
# Own dependencies because we only want to build against 1st prerequisite, but
diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h
index 3cea19877227..84f28f147fb6 100644
--- a/tools/testing/selftests/rseq/rseq-arm.h
+++ b/tools/testing/selftests/rseq/rseq-arm.h
@@ -5,7 +5,54 @@
* (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#define RSEQ_SIG 0x53053053
+/*
+ * RSEQ_SIG uses the udf A32 instruction with an uncommon immediate operand
+ * value 0x5de3. This traps if user-space reaches this instruction by mistake,
+ * and the uncommon operand ensures the kernel does not move the instruction
+ * pointer to attacker-controlled code on rseq abort.
+ *
+ * The instruction pattern in the A32 instruction set is:
+ *
+ * e7f5def3 udf #24035 ; 0x5de3
+ *
+ * This translates to the following instruction pattern in the T16 instruction
+ * set:
+ *
+ * little endian:
+ * def3 udf #243 ; 0xf3
+ * e7f5 b.n <7f5>
+ *
+ * pre-ARMv6 big endian code:
+ * e7f5 b.n <7f5>
+ * def3 udf #243 ; 0xf3
+ *
+ * ARMv6+ -mbig-endian generates mixed endianness code vs data: little-endian
+ * code and big-endian data. Ensure the RSEQ_SIG data signature matches code
+ * endianness. Prior to ARMv6, -mbig-endian generates big-endian code and data
+ * (which match), so there is no need to reverse the endianness of the data
+ * representation of the signature. However, the choice between BE32 and BE8
+ * is done by the linker, so we cannot know whether code and data endianness
+ * will be mixed before the linker is invoked.
+ */
+
+#define RSEQ_SIG_CODE 0xe7f5def3
+
+#ifndef __ASSEMBLER__
+
+#define RSEQ_SIG_DATA \
+ ({ \
+ int sig; \
+ asm volatile ("b 2f\n\t" \
+ "1: .inst " __rseq_str(RSEQ_SIG_CODE) "\n\t" \
+ "2:\n\t" \
+ "ldr %[sig], 1b\n\t" \
+ : [sig] "=r" (sig)); \
+ sig; \
+ })
+
+#define RSEQ_SIG RSEQ_SIG_DATA
+
+#endif
#define rseq_smp_mb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
#define rseq_smp_rmb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
@@ -30,18 +77,35 @@ do { \
#include "rseq-skip.h"
#else /* !RSEQ_SKIP_FASTPATH */
-#define __RSEQ_ASM_DEFINE_TABLE(version, flags, start_ip, \
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".word " __rseq_str(label) "b, 0x0\n\t" \
".popsection\n\t"
-#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
- __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) ", 0x0\n\t" \
+ ".popsection\n\t"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
"adr r0, " __rseq_str(cs_label) "\n\t" \
@@ -61,7 +125,8 @@ do { \
__rseq_str(table_label) ":\n\t" \
".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
- ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+ ".arm\n\t" \
+ ".inst " __rseq_str(RSEQ_SIG_CODE) "\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
"b %l[" __rseq_str(abort_label) "]\n\t"
@@ -86,7 +151,12 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -148,7 +218,12 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -214,7 +289,10 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -266,7 +344,12 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -336,7 +419,12 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -407,7 +495,13 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -485,7 +579,12 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"str %[src], %[rseq_scratch0]\n\t"
"str %[dst], %[rseq_scratch1]\n\t"
"str %[len], %[rseq_scratch2]\n\t"
@@ -604,7 +703,12 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"str %[src], %[rseq_scratch0]\n\t"
"str %[dst], %[rseq_scratch1]\n\t"
"str %[len], %[rseq_scratch2]\n\t"
diff --git a/tools/testing/selftests/rseq/rseq-arm64.h b/tools/testing/selftests/rseq/rseq-arm64.h
index 954f34671ca6..200dae9e4208 100644
--- a/tools/testing/selftests/rseq/rseq-arm64.h
+++ b/tools/testing/selftests/rseq/rseq-arm64.h
@@ -6,7 +6,20 @@
* (C) Copyright 2018 - Will Deacon <will.deacon@arm.com>
*/
-#define RSEQ_SIG 0xd428bc00 /* BRK #0x45E0 */
+/*
+ * aarch64 -mbig-endian generates mixed endianness code vs data:
+ * little-endian code and big-endian data. Ensure the RSEQ_SIG signature
+ * matches code endianness.
+ */
+#define RSEQ_SIG_CODE 0xd428bc00 /* BRK #0x45E0. */
+
+#ifdef __AARCH64EB__
+#define RSEQ_SIG_DATA 0x00bc28d4 /* BRK #0x45E0. */
+#else
+#define RSEQ_SIG_DATA RSEQ_SIG_CODE
+#endif
+
+#define RSEQ_SIG RSEQ_SIG_DATA
#define rseq_smp_mb() __asm__ __volatile__ ("dmb ish" ::: "memory")
#define rseq_smp_rmb() __asm__ __volatile__ ("dmb ishld" ::: "memory")
@@ -82,19 +95,35 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
post_commit_offset, abort_ip) \
- " .pushsection __rseq_table, \"aw\"\n" \
+ " .pushsection __rseq_cs, \"aw\"\n" \
" .balign 32\n" \
__rseq_str(label) ":\n" \
" .long " __rseq_str(version) ", " __rseq_str(flags) "\n" \
" .quad " __rseq_str(start_ip) ", " \
__rseq_str(post_commit_offset) ", " \
__rseq_str(abort_ip) "\n" \
+ " .popsection\n\t" \
+ " .pushsection __rseq_cs_ptr_array, \"aw\"\n" \
+ " .quad " __rseq_str(label) "b\n" \
" .popsection\n"
#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
__RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ " .pushsection __rseq_exit_point_array, \"aw\"\n" \
+ " .quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n" \
+ " .popsection\n"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
" adrp " RSEQ_ASM_TMP_REG ", " __rseq_str(cs_label) "\n" \
@@ -105,7 +134,7 @@ do { \
#define RSEQ_ASM_DEFINE_ABORT(label, abort_label) \
" b 222f\n" \
- " .inst " __rseq_str(RSEQ_SIG) "\n" \
+ " .inst " __rseq_str(RSEQ_SIG_CODE) "\n" \
__rseq_str(label) ":\n" \
" b %l[" __rseq_str(abort_label) "]\n" \
"222:\n"
@@ -182,6 +211,11 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -231,6 +265,11 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -282,6 +321,9 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -325,6 +367,11 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -379,6 +426,11 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -433,6 +485,12 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error3])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -490,6 +548,11 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -545,6 +608,11 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
index 7f48ecf46994..e989e7c14b09 100644
--- a/tools/testing/selftests/rseq/rseq-mips.h
+++ b/tools/testing/selftests/rseq/rseq-mips.h
@@ -7,7 +7,39 @@
* (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#define RSEQ_SIG 0x53053053
+/*
+ * RSEQ_SIG uses the break instruction. The instruction pattern is:
+ *
+ * On MIPS:
+ * 0350000d break 0x350
+ *
+ * On nanoMIPS:
+ * 00100350 break 0x350
+ *
+ * On microMIPS:
+ * 0000d407 break 0x350
+ *
+ * For nanoMIPS32 and microMIPS, the instruction stream is encoded as 16-bit
+ * halfwords, so the signature halfwords need to be swapped accordingly for
+ * little-endian.
+ */
+#if defined(__nanomips__)
+# ifdef __MIPSEL__
+# define RSEQ_SIG 0x03500010
+# else
+# define RSEQ_SIG 0x00100350
+# endif
+#elif defined(__mips_micromips)
+# ifdef __MIPSEL__
+# define RSEQ_SIG 0xd4070000
+# else
+# define RSEQ_SIG 0x0000d407
+# endif
+#elif defined(__mips__)
+# define RSEQ_SIG 0x0350000d
+#else
+/* Unknown MIPS architecture. */
+#endif
#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory")
#define rseq_smp_rmb() rseq_smp_mb()
@@ -54,20 +86,38 @@ do { \
# error unsupported _MIPS_SZLONG
#endif
-#define __RSEQ_ASM_DEFINE_TABLE(version, flags, start_ip, \
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(label) "b") "\n\t" \
".popsection\n\t"
-#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
- __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(exit_ip)) "\n\t" \
+ ".popsection\n\t"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \
@@ -113,7 +163,12 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -173,7 +228,12 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -237,7 +297,10 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -289,7 +352,12 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -357,7 +425,12 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -426,7 +499,13 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -500,7 +579,12 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
LONG_S " %[src], %[rseq_scratch0]\n\t"
LONG_S " %[dst], %[rseq_scratch1]\n\t"
LONG_S " %[len], %[rseq_scratch2]\n\t"
@@ -616,7 +700,12 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
LONG_S " %[src], %[rseq_scratch0]\n\t"
LONG_S " %[dst], %[rseq_scratch1]\n\t"
LONG_S " %[len], %[rseq_scratch2]\n\t"
diff --git a/tools/testing/selftests/rseq/rseq-ppc.h b/tools/testing/selftests/rseq/rseq-ppc.h
index 52630c9f42be..76be90196fe4 100644
--- a/tools/testing/selftests/rseq/rseq-ppc.h
+++ b/tools/testing/selftests/rseq/rseq-ppc.h
@@ -6,7 +6,15 @@
* (C) Copyright 2016-2018 - Boqun Feng <boqun.feng@gmail.com>
*/
-#define RSEQ_SIG 0x53053053
+/*
+ * RSEQ_SIG is used with the following trap instruction:
+ *
+ * powerpc-be: 0f e5 00 0b twui r5,11
+ * powerpc64-le: 0b 00 e5 0f twui r5,11
+ * powerpc64-be: 0f e5 00 0b twui r5,11
+ */
+
+#define RSEQ_SIG 0x0fe5000b
#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory", "cc")
#define rseq_smp_lwsync() __asm__ __volatile__ ("lwsync" ::: "memory", "cc")
@@ -33,8 +41,8 @@ do { \
#else /* !RSEQ_SKIP_FASTPATH */
/*
- * The __rseq_table section can be used by debuggers to better handle
- * single-stepping through the restartable critical sections.
+ * The __rseq_cs_ptr_array and __rseq_cs sections can be used by debuggers to
+ * better handle single-stepping through the restartable critical sections.
*/
#ifdef __PPC64__
@@ -46,11 +54,14 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(label) "b\n\t" \
".popsection\n\t"
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
@@ -63,6 +74,19 @@ do { \
"std %%r17, %[" __rseq_str(rseq_cs) "]\n\t" \
__rseq_str(label) ":\n\t"
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \
+ ".popsection\n\t"
+
#else /* #ifdef __PPC64__ */
#define STORE_WORD "stw "
@@ -72,12 +96,29 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
/* 32-bit only supported on BE */ \
".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".long 0x0, " __rseq_str(label) "b\n\t" \
+ ".popsection\n\t"
+
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ /* 32-bit only supported on BE */ \
+ ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) "\n\t" \
".popsection\n\t"
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
@@ -169,6 +210,11 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -224,6 +270,11 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -286,6 +337,9 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -337,6 +391,11 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -400,6 +459,11 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -465,6 +529,12 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -532,6 +602,11 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* setup for mempcy */
"mr %%r19, %[len]\n\t"
"mr %%r20, %[src]\n\t"
@@ -601,6 +676,11 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* setup for mempcy */
"mr %%r19, %[len]\n\t"
"mr %%r20, %[src]\n\t"
diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h
index 0afdf7957974..8ef94ad1cbb4 100644
--- a/tools/testing/selftests/rseq/rseq-s390.h
+++ b/tools/testing/selftests/rseq/rseq-s390.h
@@ -44,22 +44,54 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(label) "b\n\t" \
+ ".popsection\n\t"
+
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \
".popsection\n\t"
#elif __s390__
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".long 0x0, " __rseq_str(label) "b\n\t" \
+ ".popsection\n\t"
+
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) "\n\t" \
".popsection\n\t"
#define LONG_L "l"
@@ -92,14 +124,14 @@ do { \
".long " __rseq_str(RSEQ_SIG) "\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
- "j %l[" __rseq_str(abort_label) "]\n\t" \
+ "jg %l[" __rseq_str(abort_label) "]\n\t" \
".popsection\n\t"
#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
".pushsection __rseq_failure, \"ax\"\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
- "j %l[" __rseq_str(cmpfail_label) "]\n\t" \
+ "jg %l[" __rseq_str(cmpfail_label) "]\n\t" \
".popsection\n\t"
static inline __attribute__((always_inline))
@@ -109,6 +141,11 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -167,6 +204,11 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -227,6 +269,9 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -275,6 +320,11 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -346,6 +396,12 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -414,6 +470,11 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
LONG_S " %[src], %[rseq_scratch0]\n\t"
LONG_S " %[dst], %[rseq_scratch1]\n\t"
LONG_S " %[len], %[rseq_scratch2]\n\t"
diff --git a/tools/testing/selftests/rseq/rseq-x86.h b/tools/testing/selftests/rseq/rseq-x86.h
index 089410a314e9..b2da6004fe30 100644
--- a/tools/testing/selftests/rseq/rseq-x86.h
+++ b/tools/testing/selftests/rseq/rseq-x86.h
@@ -7,8 +7,25 @@
#include <stdint.h>
+/*
+ * RSEQ_SIG is used with the following reserved undefined instructions, which
+ * trap in user-space:
+ *
+ * x86-32: 0f b9 3d 53 30 05 53 ud1 0x53053053,%edi
+ * x86-64: 0f b9 3d 53 30 05 53 ud1 0x53053053(%rip),%edi
+ */
#define RSEQ_SIG 0x53053053
+/*
+ * Due to a compiler optimization bug in gcc-8 with asm goto and TLS asm input
+ * operands, we cannot use "m" input operands, and rather pass the __rseq_abi
+ * address through a "r" input operand.
+ */
+
+/* Offset of cpu_id and rseq_cs fields in struct rseq. */
+#define RSEQ_CPU_ID_OFFSET 4
+#define RSEQ_CS_OFFSET 8
+
#ifdef __x86_64__
#define rseq_smp_mb() \
@@ -37,32 +54,49 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(label) "b\n\t" \
".popsection\n\t"
+
#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
__RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \
+ ".popsection\n\t"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
"leaq " __rseq_str(cs_label) "(%%rip), %%rax\n\t" \
- "movq %%rax, %[" __rseq_str(rseq_cs) "]\n\t" \
+ "movq %%rax, " __rseq_str(rseq_cs) "\n\t" \
__rseq_str(label) ":\n\t"
#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
RSEQ_INJECT_ASM(2) \
- "cmpl %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "cmpl %[" __rseq_str(cpu_id) "], " __rseq_str(current_cpu_id) "\n\t" \
"jnz " __rseq_str(label) "\n\t"
#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
".pushsection __rseq_failure, \"ax\"\n\t" \
- /* Disassembler-friendly signature: nopl <sig>(%rip). */\
- ".byte 0x0f, 0x1f, 0x05\n\t" \
+ /* Disassembler-friendly signature: ud1 <sig>(%rip),%edi. */ \
+ ".byte 0x0f, 0xb9, 0x3d\n\t" \
".long " __rseq_str(RSEQ_SIG) "\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
@@ -83,15 +117,20 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpq %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpq %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
#endif
@@ -102,8 +141,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
[v] "m" (*v),
[expect] "r" (expect),
[newv] "r" (newv)
@@ -140,16 +178,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movq %[v], %%rbx\n\t"
"cmpq %%rbx, %[expectnot]\n\t"
"je %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"movq %[v], %%rbx\n\t"
"cmpq %%rbx, %[expectnot]\n\t"
"je %l[error2]\n\t"
@@ -164,8 +207,7 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expectnot] "r" (expectnot),
@@ -199,12 +241,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
#endif
/* final store */
"addq %[count], %[v]\n\t"
@@ -213,8 +258,7 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[count] "er" (count)
@@ -244,15 +288,20 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpq %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpq %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
#endif
@@ -266,8 +315,7 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* try store input */
[v2] "m" (*v2),
[newv2] "r" (newv2),
@@ -314,9 +362,15 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpq %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
@@ -325,7 +379,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(5)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpq %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
"cmpq %[v2], %[expect2]\n\t"
@@ -338,8 +392,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* cmp2 input */
[v2] "m" (*v2),
[expect2] "r" (expect2),
@@ -381,18 +434,23 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"movq %[src], %[rseq_scratch0]\n\t"
"movq %[dst], %[rseq_scratch1]\n\t"
"movq %[len], %[rseq_scratch2]\n\t"
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpq %[v], %[expect]\n\t"
"jnz 5f\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f)
"cmpq %[v], %[expect]\n\t"
"jnz 7f\n\t"
#endif
@@ -440,8 +498,7 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
#endif
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expect] "r" (expect),
@@ -520,31 +577,47 @@ do { \
*/
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".long " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".long " __rseq_str(label) "b, 0x0\n\t" \
".popsection\n\t"
#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
__RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".long " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) ", 0x0\n\t" \
+ ".popsection\n\t"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
- "movl $" __rseq_str(cs_label) ", %[rseq_cs]\n\t" \
+ "movl $" __rseq_str(cs_label) ", " __rseq_str(rseq_cs) "\n\t" \
__rseq_str(label) ":\n\t"
#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
RSEQ_INJECT_ASM(2) \
- "cmpl %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "cmpl %[" __rseq_str(cpu_id) "], " __rseq_str(current_cpu_id) "\n\t" \
"jnz " __rseq_str(label) "\n\t"
#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
".pushsection __rseq_failure, \"ax\"\n\t" \
- /* Disassembler-friendly signature: nopl <sig>. */ \
- ".byte 0x0f, 0x1f, 0x05\n\t" \
+ /* Disassembler-friendly signature: ud1 <sig>,%edi. */ \
+ ".byte 0x0f, 0xb9, 0x3d\n\t" \
".long " __rseq_str(RSEQ_SIG) "\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
@@ -565,15 +638,20 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpl %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpl %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
#endif
@@ -584,8 +662,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
[v] "m" (*v),
[expect] "r" (expect),
[newv] "r" (newv)
@@ -622,16 +699,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movl %[v], %%ebx\n\t"
"cmpl %%ebx, %[expectnot]\n\t"
"je %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"movl %[v], %%ebx\n\t"
"cmpl %%ebx, %[expectnot]\n\t"
"je %l[error2]\n\t"
@@ -646,8 +728,7 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expectnot] "r" (expectnot),
@@ -681,12 +762,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
#endif
/* final store */
"addl %[count], %[v]\n\t"
@@ -695,8 +779,7 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[count] "ir" (count)
@@ -726,15 +809,20 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpl %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpl %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
#endif
@@ -749,8 +837,7 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* try store input */
[v2] "m" (*v2),
[newv2] "m" (newv2),
@@ -788,16 +875,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movl %[expect], %%eax\n\t"
"cmpl %[v], %%eax\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"movl %[expect], %%eax\n\t"
"cmpl %[v], %%eax\n\t"
"jnz %l[error2]\n\t"
@@ -813,8 +905,7 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* try store input */
[v2] "m" (*v2),
[newv2] "r" (newv2),
@@ -853,9 +944,15 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpl %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
@@ -864,7 +961,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(5)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpl %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
"cmpl %[expect2], %[v2]\n\t"
@@ -878,8 +975,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* cmp2 input */
[v2] "m" (*v2),
[expect2] "r" (expect2),
@@ -922,19 +1018,24 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"movl %[src], %[rseq_scratch0]\n\t"
"movl %[dst], %[rseq_scratch1]\n\t"
"movl %[len], %[rseq_scratch2]\n\t"
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movl %[expect], %%eax\n\t"
"cmpl %%eax, %[v]\n\t"
"jnz 5f\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f)
"movl %[expect], %%eax\n\t"
"cmpl %%eax, %[v]\n\t"
"jnz 7f\n\t"
@@ -984,8 +1085,7 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
#endif
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expect] "m" (expect),
@@ -1030,19 +1130,24 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"movl %[src], %[rseq_scratch0]\n\t"
"movl %[dst], %[rseq_scratch1]\n\t"
"movl %[len], %[rseq_scratch2]\n\t"
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movl %[expect], %%eax\n\t"
"cmpl %%eax, %[v]\n\t"
"jnz 5f\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f)
"movl %[expect], %%eax\n\t"
"cmpl %%eax, %[v]\n\t"
"jnz 7f\n\t"
@@ -1093,8 +1198,7 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
#endif
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expect] "m" (expect),
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
index 4847e97ed049..7159eb777fd3 100644
--- a/tools/testing/selftests/rseq/rseq.c
+++ b/tools/testing/selftests/rseq/rseq.c
@@ -25,18 +25,27 @@
#include <syscall.h>
#include <assert.h>
#include <signal.h>
+#include <limits.h>
#include "rseq.h"
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-__attribute__((tls_model("initial-exec"))) __thread
-volatile struct rseq __rseq_abi = {
+__thread volatile struct rseq __rseq_abi = {
.cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
};
-static __attribute__((tls_model("initial-exec"))) __thread
-volatile int refcount;
+/*
+ * Shared with other libraries. This library may take rseq ownership if it is
+ * still 0 when executing the library constructor. Set to 1 by library
+ * constructor when handling rseq. Set to 0 in destructor if handling rseq.
+ */
+int __rseq_handled;
+
+/* Whether this library have ownership of rseq registration. */
+static int rseq_ownership;
+
+static __thread volatile uint32_t __rseq_refcount;
static void signal_off_save(sigset_t *oldset)
{
@@ -69,8 +78,14 @@ int rseq_register_current_thread(void)
int rc, ret = 0;
sigset_t oldset;
+ if (!rseq_ownership)
+ return 0;
signal_off_save(&oldset);
- if (refcount++)
+ if (__rseq_refcount == UINT_MAX) {
+ ret = -1;
+ goto end;
+ }
+ if (__rseq_refcount++)
goto end;
rc = sys_rseq(&__rseq_abi, sizeof(struct rseq), 0, RSEQ_SIG);
if (!rc) {
@@ -78,9 +93,9 @@ int rseq_register_current_thread(void)
goto end;
}
if (errno != EBUSY)
- __rseq_abi.cpu_id = -2;
+ __rseq_abi.cpu_id = RSEQ_CPU_ID_REGISTRATION_FAILED;
ret = -1;
- refcount--;
+ __rseq_refcount--;
end:
signal_restore(oldset);
return ret;
@@ -91,13 +106,20 @@ int rseq_unregister_current_thread(void)
int rc, ret = 0;
sigset_t oldset;
+ if (!rseq_ownership)
+ return 0;
signal_off_save(&oldset);
- if (--refcount)
+ if (!__rseq_refcount) {
+ ret = -1;
+ goto end;
+ }
+ if (--__rseq_refcount)
goto end;
rc = sys_rseq(&__rseq_abi, sizeof(struct rseq),
RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
if (!rc)
goto end;
+ __rseq_refcount = 1;
ret = -1;
end:
signal_restore(oldset);
@@ -115,3 +137,20 @@ int32_t rseq_fallback_current_cpu(void)
}
return cpu;
}
+
+void __attribute__((constructor)) rseq_init(void)
+{
+ /* Check whether rseq is handled by another library. */
+ if (__rseq_handled)
+ return;
+ __rseq_handled = 1;
+ rseq_ownership = 1;
+}
+
+void __attribute__((destructor)) rseq_fini(void)
+{
+ if (!rseq_ownership)
+ return;
+ __rseq_handled = 0;
+ rseq_ownership = 0;
+}
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index 6c1126e7f685..d40d60e7499e 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -44,6 +44,7 @@
#endif
extern __thread volatile struct rseq __rseq_abi;
+extern int __rseq_handled;
#define rseq_likely(x) __builtin_expect(!!(x), 1)
#define rseq_unlikely(x) __builtin_expect(!!(x), 0)
diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
index 228c2ae47687..ad0f8df2ca0a 100644
--- a/tools/testing/selftests/sigaltstack/sas.c
+++ b/tools/testing/selftests/sigaltstack/sas.c
@@ -109,6 +109,7 @@ int main(void)
int err;
ksft_print_header();
+ ksft_set_plan(3);
sigemptyset(&act.sa_mask);
act.sa_flags = SA_ONSTACK | SA_SIGINFO;
diff --git a/tools/testing/selftests/sync/sync_test.c b/tools/testing/selftests/sync/sync_test.c
index 7f7938263c5c..3824b66f41a0 100644
--- a/tools/testing/selftests/sync/sync_test.c
+++ b/tools/testing/selftests/sync/sync_test.c
@@ -86,6 +86,7 @@ int main(void)
int err;
ksft_print_header();
+ ksft_set_plan(3 + 7);
sync_api_supported();
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index ea434ddc8499..aad9284c043a 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -57,3 +57,6 @@ config HAVE_KVM_VCPU_ASYNC_IOCTL
config HAVE_KVM_VCPU_RUN_PID_CHANGE
bool
+
+config HAVE_KVM_NO_POLL
+ bool
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index f412ebc90610..90cedebaeb94 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -56,7 +56,7 @@
__asm__(".arch_extension virt");
#endif
-DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
/* Per-CPU variable containing the currently running vcpu. */
@@ -224,9 +224,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
- case KVM_CAP_NR_MEMSLOTS:
- r = KVM_USER_MEM_SLOTS;
- break;
case KVM_CAP_MSI_DEVID:
if (!kvm)
r = -EINVAL;
@@ -360,8 +357,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
int *last_ran;
+ kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+ cpu_data = this_cpu_ptr(&kvm_host_data);
/*
* We might get preempted before the vCPU actually runs, but
@@ -373,18 +372,21 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
+ vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_arm_set_running_vcpu(vcpu);
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
kvm_vcpu_load_sysregs(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
+ kvm_vcpu_pmu_restore_guest(vcpu);
if (single_task_running())
vcpu_clear_wfe_traps(vcpu);
else
vcpu_set_wfe_traps(vcpu);
+
+ vcpu_ptrauth_setup_lazy(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -393,6 +395,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_vcpu_put_sysregs(vcpu);
kvm_timer_vcpu_put(vcpu);
kvm_vgic_put(vcpu);
+ kvm_vcpu_pmu_restore_host(vcpu);
vcpu->cpu = -1;
@@ -545,6 +548,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
if (likely(vcpu->arch.has_run_once))
return 0;
+ if (!kvm_arm_vcpu_is_finalized(vcpu))
+ return -EPERM;
+
vcpu->arch.has_run_once = true;
if (likely(irqchip_in_kernel(kvm))) {
@@ -1121,6 +1127,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (unlikely(!kvm_vcpu_initialized(vcpu)))
break;
+ r = -EPERM;
+ if (!kvm_arm_vcpu_is_finalized(vcpu))
+ break;
+
r = -EFAULT;
if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
break;
@@ -1174,6 +1184,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
return kvm_arm_vcpu_set_events(vcpu, &events);
}
+ case KVM_ARM_VCPU_FINALIZE: {
+ int what;
+
+ if (!kvm_vcpu_initialized(vcpu))
+ return -ENOEXEC;
+
+ if (get_user(what, (const int __user *)argp))
+ return -EFAULT;
+
+ return kvm_arm_vcpu_finalize(vcpu, what);
+ }
default:
r = -EINVAL;
}
@@ -1554,11 +1575,11 @@ static int init_hyp_mode(void)
}
for_each_possible_cpu(cpu) {
- kvm_cpu_context_t *cpu_ctxt;
+ kvm_host_data_t *cpu_data;
- cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
- kvm_init_host_cpu_context(cpu_ctxt, cpu);
- err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
+ cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
+ kvm_init_host_cpu_context(&cpu_data->host_ctxt, cpu);
+ err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
if (err) {
kvm_err("Cannot map host CPU state: %d\n", err);
@@ -1669,6 +1690,10 @@ int kvm_arch_init(void *opaque)
if (err)
return err;
+ err = kvm_arm_init_sve();
+ if (err)
+ return err;
+
if (!in_hyp_mode) {
err = init_hyp_mode();
if (err)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5fb0f1656a96..f0d13d9d125d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -51,9 +51,9 @@
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/bsearch.h>
+#include <linux/io.h>
#include <asm/processor.h>
-#include <asm/io.h>
#include <asm/ioctl.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
@@ -1135,11 +1135,11 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
/**
- * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
+ * kvm_get_dirty_log_protect - get a snapshot of dirty pages
* and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address to which we copy the log
- * @is_dirty: flag set if any page is dirty
+ * @flush: true if TLB flush is needed by caller
*
* We need to keep it in mind that VCPU threads can write to the bitmap
* concurrently. So, to avoid losing track of dirty pages we keep the
@@ -1224,6 +1224,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
* and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address from which to fetch the bitmap of dirty pages
+ * @flush: true if TLB flush is needed by caller
*/
int kvm_clear_dirty_log_protect(struct kvm *kvm,
struct kvm_clear_dirty_log *log, bool *flush)
@@ -1251,7 +1252,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
if (!dirty_bitmap)
return -ENOENT;
- n = kvm_dirty_bitmap_bytes(memslot);
+ n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
if (log->first_page > memslot->npages ||
log->num_pages > memslot->npages - log->first_page ||
@@ -1264,8 +1265,8 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
return -EFAULT;
spin_lock(&kvm->mmu_lock);
- for (offset = log->first_page,
- i = offset / BITS_PER_LONG, n = log->num_pages / BITS_PER_LONG; n--;
+ for (offset = log->first_page, i = offset / BITS_PER_LONG,
+ n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
i++, offset += BITS_PER_LONG) {
unsigned long mask = *dirty_bitmap_buffer++;
atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
@@ -1742,6 +1743,70 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_page);
+static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct kvm_host_map *map)
+{
+ kvm_pfn_t pfn;
+ void *hva = NULL;
+ struct page *page = KVM_UNMAPPED_PAGE;
+
+ if (!map)
+ return -EINVAL;
+
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (is_error_noslot_pfn(pfn))
+ return -EINVAL;
+
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ hva = kmap(page);
+ } else {
+ hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+ }
+
+ if (!hva)
+ return -EFAULT;
+
+ map->page = page;
+ map->hva = hva;
+ map->pfn = pfn;
+ map->gfn = gfn;
+
+ return 0;
+}
+
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+{
+ return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_map);
+
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+ bool dirty)
+{
+ if (!map)
+ return;
+
+ if (!map->hva)
+ return;
+
+ if (map->page)
+ kunmap(map->page);
+ else
+ memunmap(map->hva);
+
+ if (dirty) {
+ kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
+ kvm_release_pfn_dirty(map->pfn);
+ } else {
+ kvm_release_pfn_clean(map->pfn);
+ }
+
+ map->hva = NULL;
+ map->page = NULL;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
+
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
kvm_pfn_t pfn;
@@ -2255,7 +2320,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
u64 block_ns;
start = cur = ktime_get();
- if (vcpu->halt_poll_ns) {
+ if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
++vcpu->stat.halt_attempted_poll;
@@ -2886,6 +2951,16 @@ out:
}
#endif
+static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct kvm_device *dev = filp->private_data;
+
+ if (dev->ops->mmap)
+ return dev->ops->mmap(dev, vma);
+
+ return -ENODEV;
+}
+
static int kvm_device_ioctl_attr(struct kvm_device *dev,
int (*accessor)(struct kvm_device *dev,
struct kvm_device_attr *attr),
@@ -2930,6 +3005,13 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
struct kvm_device *dev = filp->private_data;
struct kvm *kvm = dev->kvm;
+ if (dev->ops->release) {
+ mutex_lock(&kvm->lock);
+ list_del(&dev->vm_node);
+ dev->ops->release(dev);
+ mutex_unlock(&kvm->lock);
+ }
+
kvm_put_kvm(kvm);
return 0;
}
@@ -2938,6 +3020,7 @@ static const struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl,
.release = kvm_device_release,
KVM_COMPAT(kvm_device_ioctl),
+ .mmap = kvm_device_mmap,
};
struct kvm_device *kvm_device_from_filp(struct file *filp)
@@ -3046,7 +3129,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_CHECK_EXTENSION_VM:
case KVM_CAP_ENABLE_CAP_VM:
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
- case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT:
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
#endif
return 1;
#ifdef CONFIG_KVM_MMIO
@@ -3065,6 +3148,8 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#endif
case KVM_CAP_MAX_VCPU_ID:
return KVM_MAX_VCPU_ID;
+ case KVM_CAP_NR_MEMSLOTS:
+ return KVM_USER_MEM_SLOTS;
default:
break;
}
@@ -3082,7 +3167,7 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
{
switch (cap->cap) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
- case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT:
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
if (cap->flags || (cap->args[0] & ~1))
return -EINVAL;
kvm->manual_dirty_log_protect = cap->args[0];