aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap5
-rw-r--r--Documentation/ABI/testing/sysfs-platform-lg-laptop35
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst8
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst42
-rw-r--r--Documentation/core-api/boot-time-mm.rst69
-rw-r--r--Documentation/crypto/asymmetric-keys.txt26
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-capacity.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt19
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt44
-rw-r--r--Documentation/devicetree/bindings/clock/actions,owl-cmu.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/at91-clock.txt516
-rw-r--r--Documentation/devicetree/bindings/clock/hi3670-clock.txt43
-rw-r--r--Documentation/devicetree/bindings/clock/ingenic,cgu.txt7
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,camcc.txt18
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,hfpll.txt60
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,krait-cc.txt34
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt17
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt (renamed from Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt)8
-rw-r--r--Documentation/devicetree/bindings/display/panel/simple-panel.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt1
-rw-r--r--Documentation/devicetree/bindings/media/cedrus.txt54
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-vpu.txt29
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiecap.txt1
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt10
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp-pil.txt126
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt5
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt8
-rw-r--r--Documentation/filesystems/ceph.txt5
-rw-r--r--Documentation/filesystems/nfs/rpc-cache.txt6
-rw-r--r--Documentation/filesystems/overlayfs.txt6
-rw-r--r--Documentation/filesystems/porting5
-rw-r--r--Documentation/filesystems/vfs.txt22
-rw-r--r--Documentation/kbuild/makefiles.txt15
-rw-r--r--Documentation/laptops/lg-laptop.rst81
-rw-r--r--Documentation/media/kapi/mc-core.rst2
-rw-r--r--Documentation/media/uapi/mediactl/media-controller.rst1
-rw-r--r--Documentation/media/uapi/mediactl/media-funcs.rst6
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst66
-rw-r--r--Documentation/media/uapi/mediactl/media-request-ioc-queue.rst78
-rw-r--r--Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst51
-rw-r--r--Documentation/media/uapi/mediactl/request-api.rst252
-rw-r--r--Documentation/media/uapi/mediactl/request-func-close.rst49
-rw-r--r--Documentation/media/uapi/mediactl/request-func-ioctl.rst67
-rw-r--r--Documentation/media/uapi/mediactl/request-func-poll.rst77
-rw-r--r--Documentation/media/uapi/v4l/buffer.rst29
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst176
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-compressed.rst16
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-reserved.rst15
-rw-r--r--Documentation/media/uapi/v4l/vidioc-create-bufs.rst14
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst59
-rw-r--r--Documentation/media/uapi/v4l/vidioc-qbuf.rst37
-rw-r--r--Documentation/media/uapi/v4l/vidioc-queryctrl.rst14
-rw-r--r--Documentation/media/uapi/v4l/vidioc-reqbufs.rst42
-rw-r--r--Documentation/media/videodev2.h.rst.exceptions3
-rw-r--r--Documentation/networking/ice.rst2
-rw-r--r--Documentation/networking/ip-sysctl.txt11
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/process/programming-language.rst45
-rw-r--r--Documentation/security/keys/core.rst217
-rw-r--r--Documentation/security/self-protection.rst10
-rw-r--r--Documentation/sysctl/kernel.txt18
-rw-r--r--Documentation/trace/kprobetrace.rst23
-rw-r--r--Documentation/x86/x86_64/mm.txt3
-rw-r--r--MAINTAINERS56
-rw-r--r--Makefile4
-rw-r--r--arch/Kconfig14
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/processor.h6
-rw-r--r--arch/alpha/kernel/core_apecs.c3
-rw-r--r--arch/alpha/kernel/core_cia.c4
-rw-r--r--arch/alpha/kernel/core_irongate.c4
-rw-r--r--arch/alpha/kernel/core_lca.c3
-rw-r--r--arch/alpha/kernel/core_marvel.c6
-rw-r--r--arch/alpha/kernel/core_mcpcia.c6
-rw-r--r--arch/alpha/kernel/core_t2.c2
-rw-r--r--arch/alpha/kernel/core_titan.c8
-rw-r--r--arch/alpha/kernel/core_tsunami.c8
-rw-r--r--arch/alpha/kernel/core_wildfire.c6
-rw-r--r--arch/alpha/kernel/pci-noop.c6
-rw-r--r--arch/alpha/kernel/pci.c6
-rw-r--r--arch/alpha/kernel/pci_iommu.c14
-rw-r--r--arch/alpha/kernel/setup.c3
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/alpha/mm/init.c4
-rw-r--r--arch/alpha/mm/numa.c1
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/processor.h8
-rw-r--r--arch/arc/kernel/unwind.c6
-rw-r--r--arch/arc/mm/highmem.c4
-rw-r--r--arch/arc/mm/init.c3
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/krait-l2-accessors.c48
-rw-r--r--arch/arm/include/asm/krait-l2-accessors.h9
-rw-r--r--arch/arm/include/asm/processor.h6
-rw-r--r--arch/arm/kernel/devtree.c1
-rw-r--r--arch/arm/kernel/setup.c5
-rw-r--r--arch/arm/mach-davinci/include/mach/clock.h21
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c10
-rw-r--r--arch/arm/mm/dma-mapping.c1
-rw-r--r--arch/arm/mm/init.c3
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/xen/mm.c1
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/include/asm/percpu.h3
-rw-r--r--arch/arm64/include/asm/processor.h7
-rw-r--r--arch/arm64/kernel/acpi.c1
-rw-r--r--arch/arm64/kernel/acpi_numa.c1
-rw-r--r--arch/arm64/kernel/crash_dump.c2
-rw-r--r--arch/arm64/kernel/probes/kprobes.c27
-rw-r--r--arch/arm64/kernel/process.c22
-rw-r--r--arch/arm64/kernel/setup.c6
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/init.c5
-rw-r--r--arch/arm64/mm/kasan_init.c3
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/arm64/mm/numa.c5
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/processor.h11
-rw-r--r--arch/c6x/kernel/setup.c27
-rw-r--r--arch/c6x/mm/dma-coherent.c4
-rw-r--r--arch/c6x/mm/init.c7
-rw-r--r--arch/csky/Kconfig2
-rw-r--r--arch/csky/Kconfig.debug10
-rw-r--r--arch/csky/Makefile13
-rw-r--r--arch/csky/boot/dts/Makefile10
-rw-r--r--arch/csky/include/asm/processor.h6
-rw-r--r--arch/csky/kernel/setup.c1
-rw-r--r--arch/csky/mm/highmem.c4
-rw-r--r--arch/csky/mm/init.c3
-rw-r--r--arch/h8300/Kconfig2
-rw-r--r--arch/h8300/include/asm/processor.h6
-rw-r--r--arch/h8300/kernel/setup.c1
-rw-r--r--arch/h8300/mm/init.c6
-rw-r--r--arch/hexagon/Kconfig2
-rw-r--r--arch/hexagon/include/asm/processor.h3
-rw-r--r--arch/hexagon/kernel/dma.c2
-rw-r--r--arch/hexagon/kernel/setup.c2
-rw-r--r--arch/hexagon/mm/init.c3
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/processor.h6
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/mca.c10
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/signal.c4
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/ia64/mm/contig.c6
-rw-r--r--arch/ia64/mm/discontig.c7
-rw-r--r--arch/ia64/mm/init.c11
-rw-r--r--arch/ia64/mm/numa.c2
-rw-r--r--arch/ia64/mm/tlb.c8
-rw-r--r--arch/ia64/pci/pci.c2
-rw-r--r--arch/ia64/sn/kernel/bte.c2
-rw-r--r--arch/ia64/sn/kernel/io_common.c11
-rw-r--r--arch/ia64/sn/kernel/setup.c7
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/atari/stram.c5
-rw-r--r--arch/m68k/coldfire/m54xx.c2
-rw-r--r--arch/m68k/include/asm/processor.h6
-rw-r--r--arch/m68k/kernel/setup_mm.c1
-rw-r--r--arch/m68k/kernel/setup_no.c1
-rw-r--r--arch/m68k/kernel/uboot.c2
-rw-r--r--arch/m68k/mm/init.c6
-rw-r--r--arch/m68k/mm/mcfmmu.c5
-rw-r--r--arch/m68k/mm/motorola.c8
-rw-r--r--arch/m68k/mm/sun3mmu.c6
-rw-r--r--arch/m68k/sun3/config.c2
-rw-r--r--arch/m68k/sun3/dvma.c2
-rw-r--r--arch/m68k/sun3/mmu_emu.c2
-rw-r--r--arch/m68k/sun3/sun3dvma.c5
-rw-r--r--arch/m68k/sun3x/dvma.c2
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/include/asm/processor.h12
-rw-r--r--arch/microblaze/mm/consistent.c2
-rw-r--r--arch/microblaze/mm/init.c7
-rw-r--r--arch/microblaze/pci/pci-common.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/ar7/memory.c2
-rw-r--r--arch/mips/ath79/setup.c2
-rw-r--r--arch/mips/bcm63xx/prom.c2
-rw-r--r--arch/mips/bcm63xx/setup.c2
-rw-r--r--arch/mips/bmips/setup.c2
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c4
-rw-r--r--arch/mips/dec/prom/memory.c2
-rw-r--r--arch/mips/emma/common/prom.c2
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/include/asm/processor.h5
-rw-r--r--arch/mips/jazz/jazzdma.c2
-rw-r--r--arch/mips/kernel/crash.c2
-rw-r--r--arch/mips/kernel/crash_dump.c2
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/setup.c7
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/kvm/commpage.c2
-rw-r--r--arch/mips/kvm/dyntrans.c2
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/kvm/interrupt.c2
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/lantiq/prom.c2
-rw-r--r--arch/mips/lasat/prom.c2
-rw-r--r--arch/mips/loongson64/common/init.c2
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c3
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/pgtable-32.c2
-rw-r--r--arch/mips/mti-malta/malta-memory.c2
-rw-r--r--arch/mips/netlogic/xlp/dt.c2
-rw-r--r--arch/mips/pci/pci-legacy.c2
-rw-r--r--arch/mips/pci/pci.c2
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/mips/rb532/prom.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c3
-rw-r--r--arch/mips/sibyte/common/cfe.c2
-rw-r--r--arch/mips/sibyte/swarm/setup.c2
-rw-r--r--arch/mips/txx9/rbtx4938/prom.c2
-rw-r--r--arch/mips/vdso/Makefile2
-rw-r--r--arch/nds32/Kconfig2
-rw-r--r--arch/nds32/include/asm/processor.h6
-rw-r--r--arch/nds32/kernel/setup.c3
-rw-r--r--arch/nds32/mm/highmem.c2
-rw-r--r--arch/nds32/mm/init.c13
-rw-r--r--arch/nios2/Kconfig2
-rw-r--r--arch/nios2/include/asm/processor.h6
-rw-r--r--arch/nios2/kernel/prom.c2
-rw-r--r--arch/nios2/kernel/setup.c1
-rw-r--r--arch/nios2/mm/init.c4
-rw-r--r--arch/openrisc/Kconfig2
-rw-r--r--arch/openrisc/include/asm/processor.h5
-rw-r--r--arch/openrisc/kernel/setup.c3
-rw-r--r--arch/openrisc/mm/init.c7
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/processor.h11
-rw-r--r--arch/parisc/mm/init.c3
-rw-r--r--arch/powerpc/Kconfig15
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/boot/dts/fsl/t2080rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc885ads.dts13
-rw-r--r--arch/powerpc/include/asm/code-patching.h5
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h43
-rw-r--r--arch/powerpc/include/asm/processor.h6
-rw-r--r--arch/powerpc/include/asm/rtas.h1
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/kernel/head_8xx.S97
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c5
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/setup_32.c10
-rw-r--r--arch/powerpc/kernel/setup_64.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv.c3
-rw-r--r--arch/powerpc/kvm/emulate.c7
-rw-r--r--arch/powerpc/lib/alloc.c4
-rw-r--r--arch/powerpc/mm/8xx_mmu.c26
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c9
-rw-r--r--arch/powerpc/mm/numa.c5
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c2
-rw-r--r--arch/powerpc/perf/8xx-pmu.c27
-rw-r--r--arch/powerpc/platforms/40x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/Kconfig2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c4
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c9
-rw-r--r--arch/powerpc/platforms/ps3/setup.c4
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c8
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c13
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c4
-rw-r--r--arch/powerpc/xmon/Makefile6
-rw-r--r--arch/riscv/Kconfig3
-rw-r--r--arch/riscv/configs/defconfig16
-rw-r--r--arch/riscv/include/asm/elf.h3
-rw-r--r--arch/riscv/include/asm/processor.h6
-rw-r--r--arch/riscv/kernel/cpufeature.c8
-rw-r--r--arch/riscv/mm/init.c5
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/processor.h6
-rw-r--r--arch/s390/kernel/crash_dump.c5
-rw-r--r--arch/s390/kernel/setup.c12
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kernel/topology.c6
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/vmem.c7
-rw-r--r--arch/s390/numa/mode_emu.c3
-rw-r--r--arch/s390/numa/numa.c3
-rw-r--r--arch/s390/numa/toptree.c4
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/include/asm/processor_32.h6
-rw-r--r--arch/sh/include/asm/processor_64.h15
-rw-r--r--arch/sh/mm/init.c9
-rw-r--r--arch/sh/mm/ioremap_fixed.c2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/processor_32.h6
-rw-r--r--arch/sparc/include/asm/processor_64.h6
-rw-r--r--arch/sparc/kernel/mdesc.c7
-rw-r--r--arch/sparc/kernel/perf_event.c5
-rw-r--r--arch/sparc/kernel/prom_32.c4
-rw-r--r--arch/sparc/kernel/prom_64.c2
-rw-r--r--arch/sparc/kernel/setup_64.c12
-rw-r--r--arch/sparc/kernel/smp_64.c18
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/mm/init_32.c5
-rw-r--r--arch/sparc/mm/init_64.c27
-rw-r--r--arch/sparc/mm/srmmu.c12
-rw-r--r--arch/um/Kconfig2
-rw-r--r--arch/um/drivers/line.c4
-rw-r--r--arch/um/drivers/net_kern.c4
-rw-r--r--arch/um/drivers/port_user.c2
-rw-r--r--arch/um/drivers/vector_kern.c19
-rw-r--r--arch/um/drivers/vector_user.c6
-rw-r--r--arch/um/include/shared/aio.h28
-rw-r--r--arch/um/kernel/initrd.c4
-rw-r--r--arch/um/kernel/irq.c3
-rw-r--r--arch/um/kernel/mem.c16
-rw-r--r--arch/um/kernel/physmem.c1
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/os-Linux/Makefile8
-rw-r--r--arch/um/os-Linux/aio.c390
-rw-r--r--arch/um/os-Linux/skas/process.c5
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/include/asm/processor.h6
-rw-r--r--arch/unicore32/kernel/hibernate.c2
-rw-r--r--arch/unicore32/kernel/setup.c5
-rw-r--r--arch/unicore32/mm/init.c7
-rw-r--r--arch/unicore32/mm/mmu.c3
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/entry/calling.h14
-rw-r--r--arch/x86/entry/entry_32.S7
-rw-r--r--arch/x86/entry/entry_64.S3
-rw-r--r--arch/x86/entry/entry_64_compat.S5
-rw-r--r--arch/x86/include/asm/iosf_mbi.h39
-rw-r--r--arch/x86/include/asm/kexec.h3
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/include/asm/ptrace.h38
-rw-r--r--arch/x86/kernel/acpi/boot.c5
-rw-r--r--arch/x86/kernel/acpi/sleep.c1
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/e820.c6
-rw-r--r--arch/x86/kernel/mpparse.c1
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/pvclock.c2
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/kernel/setup_percpu.c14
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tce_64.c6
-rw-r--r--arch/x86/mm/amdtopology.c1
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/highmem_32.c4
-rw-r--r--arch/x86/mm/init.c1
-rw-r--r--arch/x86/mm/init_32.c5
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c11
-rw-r--r--arch/x86/mm/kaslr.c1
-rw-r--r--arch/x86/mm/numa.c3
-rw-r--r--arch/x86/mm/numa_32.c1
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/numa_emulation.c1
-rw-r--r--arch/x86/mm/pageattr-test.c2
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pat.c2
-rw-r--r--arch/x86/mm/physaddr.c2
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/platform/efi/efi.c3
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/platform/efi/quirks.c7
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c217
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c4
-rw-r--r--arch/x86/power/hibernate_32.c2
-rw-r--r--arch/x86/um/asm/processor_32.h8
-rw-r--r--arch/x86/um/asm/processor_64.h3
-rw-r--r--arch/x86/um/shared/sysdep/ptrace_32.h12
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/enlighten_pv.c3
-rw-r--r--arch/x86/xen/mmu_pv.c2
-rw-r--r--arch/x86/xen/p2m.c6
-rw-r--r--arch/x86/xen/platform-pci-unplug.c4
-rw-r--r--arch/x86/xen/spinlock.c33
-rw-r--r--arch/x86/xen/xen-pvh.S2
-rw-r--r--arch/xtensa/Kconfig5
-rw-r--r--arch/xtensa/boot/Makefile2
-rw-r--r--arch/xtensa/include/asm/processor.h8
-rw-r--r--arch/xtensa/kernel/pci.c2
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S36
-rw-r--r--arch/xtensa/mm/cache.c2
-rw-r--r--arch/xtensa/mm/init.c6
-rw-r--r--arch/xtensa/mm/kasan_init.c5
-rw-r--r--arch/xtensa/mm/mmu.c4
-rw-r--r--arch/xtensa/platforms/iss/network.c4
-rw-r--r--arch/xtensa/platforms/iss/setup.c2
-rw-r--r--block/bfq-cgroup.c4
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/bio.c176
-rw-r--r--block/blk-cgroup.c123
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-iolatency.c26
-rw-r--r--block/blk-merge.c46
-rw-r--r--block/blk-settings.c2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-throttle.c13
-rw-r--r--block/bounce.c6
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--crypto/asymmetric_keys/Kconfig31
-rw-r--r--crypto/asymmetric_keys/Makefile25
-rw-r--r--crypto/asymmetric_keys/asym_tpm.c988
-rw-r--r--crypto/asymmetric_keys/asymmetric_keys.h3
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c43
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c1
-rw-r--r--crypto/asymmetric_keys/pkcs8.asn124
-rw-r--r--crypto/asymmetric_keys/pkcs8_parser.c184
-rw-r--r--crypto/asymmetric_keys/public_key.c191
-rw-r--r--crypto/asymmetric_keys/signature.c95
-rw-r--r--crypto/asymmetric_keys/tpm.asn15
-rw-r--r--crypto/asymmetric_keys/tpm_parser.c102
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c21
-rw-r--r--crypto/rsa-pkcs1pad.c59
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_memhotplug.c4
-rw-r--r--drivers/acpi/device_pm.c1
-rw-r--r--drivers/acpi/numa.c1
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c21
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/auxdisplay/panel.c7
-rw-r--r--drivers/base/memory.c22
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/block/brd.c16
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/loop.c14
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c4
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/rbd.c28
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/actions/Kconfig1
-rw-r--r--drivers/clk/actions/Makefile1
-rw-r--r--drivers/clk/actions/owl-common.c3
-rw-r--r--drivers/clk/actions/owl-common.h5
-rw-r--r--drivers/clk/actions/owl-reset.c66
-rw-r--r--drivers/clk/actions/owl-reset.h31
-rw-r--r--drivers/clk/actions/owl-s700.c55
-rw-r--r--drivers/clk/actions/owl-s900.c86
-rw-r--r--drivers/clk/at91/Makefile5
-rw-r--r--drivers/clk/at91/at91sam9260.c494
-rw-r--r--drivers/clk/at91/at91sam9rl.c171
-rw-r--r--drivers/clk/at91/at91sam9x5.c309
-rw-r--r--drivers/clk/at91/clk-audio-pll.c109
-rw-r--r--drivers/clk/at91/clk-generated.c81
-rw-r--r--drivers/clk/at91/clk-h32mx.c22
-rw-r--r--drivers/clk/at91/clk-i2s-mux.c40
-rw-r--r--drivers/clk/at91/clk-main.c112
-rw-r--r--drivers/clk/at91/clk-master.c99
-rw-r--r--drivers/clk/at91/clk-peripheral.c81
-rw-r--r--drivers/clk/at91/clk-pll.c190
-rw-r--r--drivers/clk/at91/clk-plldiv.c27
-rw-r--r--drivers/clk/at91/clk-programmable.c81
-rw-r--r--drivers/clk/at91/clk-slow.c32
-rw-r--r--drivers/clk/at91/clk-smd.c34
-rw-r--r--drivers/clk/at91/clk-system.c39
-rw-r--r--drivers/clk/at91/clk-usb.c94
-rw-r--r--drivers/clk/at91/clk-utmi.c45
-rw-r--r--drivers/clk/at91/dt-compat.c961
-rw-r--r--drivers/clk/at91/pmc.c78
-rw-r--r--drivers/clk/at91/pmc.h159
-rw-r--r--drivers/clk/at91/sama5d2.c336
-rw-r--r--drivers/clk/at91/sama5d4.c264
-rw-r--r--drivers/clk/axs10x/pll_clock.c4
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c22
-rw-r--r--drivers/clk/clk-asm9260.c4
-rw-r--r--drivers/clk/clk-bulk.c80
-rw-r--r--drivers/clk/clk-cdce925.c11
-rw-r--r--drivers/clk/clk-devres.c24
-rw-r--r--drivers/clk/clk-fixed-factor.c8
-rw-r--r--drivers/clk/clk-fixed-rate.c1
-rw-r--r--drivers/clk/clk-gpio.c8
-rw-r--r--drivers/clk/clk-hsdk-pll.c4
-rw-r--r--drivers/clk/clk-max77686.c27
-rw-r--r--drivers/clk/clk-nomadik.c4
-rw-r--r--drivers/clk/clk-npcm7xx.c2
-rw-r--r--drivers/clk/clk-palmas.c4
-rw-r--r--drivers/clk/clk-qoriq.c14
-rw-r--r--drivers/clk/clk-s2mps11.c58
-rw-r--r--drivers/clk/clk-scmi.c2
-rw-r--r--drivers/clk/clk-scpi.c6
-rw-r--r--drivers/clk/clk-si5351.c4
-rw-r--r--drivers/clk/clk-stm32f4.c2
-rw-r--r--drivers/clk/clk-stm32h7.c2
-rw-r--r--drivers/clk/clk-stm32mp1.c2
-rw-r--r--drivers/clk/clk-tango4.c10
-rw-r--r--drivers/clk/clk.c95
-rw-r--r--drivers/clk/davinci/psc.c18
-rw-r--r--drivers/clk/hisilicon/Kconfig7
-rw-r--r--drivers/clk/hisilicon/Makefile1
-rw-r--r--drivers/clk/hisilicon/clk-hi3670.c1016
-rw-r--r--drivers/clk/hisilicon/reset.c5
-rw-r--r--drivers/clk/imx/clk-cpu.c2
-rw-r--r--drivers/clk/imx/clk-imx6q.c1
-rw-r--r--drivers/clk/imx/clk-imx6sl.c2
-rw-r--r--drivers/clk/imx/clk-imx6sll.c1
-rw-r--r--drivers/clk/imx/clk-imx6sx.c1
-rw-r--r--drivers/clk/imx/clk-imx6ul.c1
-rw-r--r--drivers/clk/imx/clk-imx7d.c28
-rw-r--r--drivers/clk/imx/clk.h7
-rw-r--r--drivers/clk/ingenic/Kconfig47
-rw-r--r--drivers/clk/ingenic/Makefile9
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c225
-rw-r--r--drivers/clk/keystone/Kconfig2
-rw-r--r--drivers/clk/keystone/gate.c7
-rw-r--r--drivers/clk/keystone/pll.c7
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c5
-rw-r--r--drivers/clk/meson/axg-audio.c34
-rw-r--r--drivers/clk/meson/axg.c332
-rw-r--r--drivers/clk/meson/axg.h8
-rw-r--r--drivers/clk/meson/clk-pll.c156
-rw-r--r--drivers/clk/meson/clkc.h16
-rw-r--r--drivers/clk/meson/gxbb.c518
-rw-r--r--drivers/clk/meson/gxbb.h10
-rw-r--r--drivers/clk/meson/meson8b.c280
-rw-r--r--drivers/clk/meson/meson8b.h5
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c4
-rw-r--r--drivers/clk/mvebu/ap806-system-controller.c5
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/mvebu/armada-375.c4
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c58
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c8
-rw-r--r--drivers/clk/mvebu/armada-37xx-xtal.c4
-rw-r--r--drivers/clk/mvebu/armada-38x.c4
-rw-r--r--drivers/clk/mvebu/armada-39x.c4
-rw-r--r--drivers/clk/mvebu/armada-xp.c4
-rw-r--r--drivers/clk/mvebu/clk-corediv.c4
-rw-r--r--drivers/clk/mvebu/clk-cpu.c4
-rw-r--r--drivers/clk/mvebu/common.c4
-rw-r--r--drivers/clk/mvebu/common.h4
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c4
-rw-r--r--drivers/clk/mvebu/dove.c4
-rw-r--r--drivers/clk/mvebu/kirkwood.c4
-rw-r--r--drivers/clk/mvebu/mv98dx3236.c4
-rw-r--r--drivers/clk/mvebu/orion.c4
-rw-r--r--drivers/clk/qcom/Kconfig53
-rw-r--r--drivers/clk/qcom/Makefile8
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c1745
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c1
-rw-r--r--drivers/clk/qcom/clk-branch.c2
-rw-r--r--drivers/clk/qcom/clk-hfpll.c244
-rw-r--r--drivers/clk/qcom/clk-hfpll.h44
-rw-r--r--drivers/clk/qcom/clk-krait.c126
-rw-r--r--drivers/clk/qcom/clk-krait.h40
-rw-r--r--drivers/clk/qcom/clk-rcg.h11
-rw-r--r--drivers/clk/qcom/clk-rcg2.c195
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c82
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c172
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c152
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2744
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c2480
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c321
-rw-r--r--drivers/clk/qcom/hfpll.c96
-rw-r--r--drivers/clk/qcom/kpss-xcc.c87
-rw-r--r--drivers/clk/qcom/krait-cc.c397
-rw-r--r--drivers/clk/renesas/Kconfig19
-rw-r--r--drivers/clk/renesas/Makefile3
-rw-r--r--drivers/clk/renesas/clk-div6.c17
-rw-r--r--drivers/clk/renesas/clk-emev2.c22
-rw-r--r--drivers/clk/renesas/clk-mstp.c13
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c9
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c9
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c9
-rw-r--r--drivers/clk/renesas/clk-r8a7779.c9
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c9
-rw-r--r--drivers/clk/renesas/clk-rz.c9
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c9
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c217
-rw-r--r--drivers/clk/renesas/r8a7743-cpg-mssr.c18
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c323
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c286
-rw-r--r--drivers/clk/renesas/r8a7790-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a7791-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a7792-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a7794-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c72
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c72
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c69
-rw-r--r--drivers/clk/renesas/r8a77970-cpg-mssr.c81
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c32
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c17
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c17
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c3
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c5
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h7
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c45
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h33
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c5
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c205
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h38
-rw-r--r--drivers/clk/rockchip/clk-ddr.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c29
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c8
-rw-r--r--drivers/clk/samsung/clk-cpu.c6
-rw-r--r--drivers/clk/samsung/clk-cpu.h2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c1
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c1
-rw-r--r--drivers/clk/samsung/clk-exynos4.c226
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c42
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c76
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c33
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c43
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c43
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c43
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c66
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c41
-rw-r--r--drivers/clk/samsung/clk.c23
-rw-r--r--drivers/clk/samsung/clk.h18
-rw-r--r--drivers/clk/st/clkgen-fsyn.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c48
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c53
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c25
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c52
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.h1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.h30
-rw-r--r--drivers/clk/sunxi/clk-mod0.c6
-rw-r--r--drivers/clk/sunxi/clk-sun9i-core.c20
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c4
-rw-r--r--drivers/clk/tegra/clk-dfll.c8
-rw-r--r--drivers/clk/tegra/clk-tegra210.c7
-rw-r--r--drivers/clk/ti/Makefile9
-rw-r--r--drivers/clk/ti/apll.c18
-rw-r--r--drivers/clk/ti/clk-33xx-compat.c218
-rw-r--r--drivers/clk/ti/clk-33xx.c232
-rw-r--r--drivers/clk/ti/clk-43xx-compat.c225
-rw-r--r--drivers/clk/ti/clk-43xx.c249
-rw-r--r--drivers/clk/ti/clk-7xx-compat.c823
-rw-r--r--drivers/clk/ti/clk-7xx.c590
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c4
-rw-r--r--drivers/clk/ti/clk.c32
-rw-r--r--drivers/clk/ti/clkctrl.c101
-rw-r--r--drivers/clk/ti/clock.h11
-rw-r--r--drivers/clk/ti/composite.c14
-rw-r--r--drivers/clk/ti/divider.c40
-rw-r--r--drivers/clk/ti/dpll.c18
-rw-r--r--drivers/clk/ti/dpll3xxx.c124
-rw-r--r--drivers/clk/ti/fapll.c8
-rw-r--r--drivers/clk/ti/fixed-factor.c4
-rw-r--r--drivers/clk/ti/gate.c5
-rw-r--r--drivers/clk/ti/interface.c2
-rw-r--r--drivers/clk/ti/mux.c33
-rw-r--r--drivers/clk/zynq/clkc.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c100
-rw-r--r--drivers/cpufreq/intel_pstate.c20
-rw-r--r--drivers/cpuidle/governors/menu.c25
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/edac/skx_edac.c193
-rw-r--r--drivers/firmware/Kconfig28
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efi/apple-properties.c4
-rw-r--r--drivers/firmware/efi/memmap.c2
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/firmware/memmap.c5
-rw-r--r--drivers/fsi/fsi-sbefifo.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c85
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c29
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c21
-rw-r--r--drivers/gpu/drm/drm_connector.c11
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c20
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c14
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c29
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-asus.c23
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c139
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h9
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/i2c/i2c-core-base.c7
-rw-r--r--drivers/iommu/mtk_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c3
-rw-r--r--drivers/macintosh/smu.c7
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/media/Makefile3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c260
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c528
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c5
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c5
-rw-r--r--drivers/media/media-device.c24
-rw-r--r--drivers/media/media-request.c501
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c2
-rw-r--r--drivers/media/platform/rcar_drif.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c4
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c7
-rw-r--r--drivers/media/platform/vim2m.c50
-rw-r--r--drivers/media/platform/vivid/vivid-core.c74
-rw-r--r--drivers/media/platform/vivid/vivid-core.h8
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c46
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c12
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c12
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c16
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c10
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c10
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c10
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c10
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c4
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c5
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c3
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h1
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c612
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c18
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c50
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c67
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c9
-rw-r--r--drivers/mfd/cros_ec_dev.h13
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/core.c1
-rw-r--r--drivers/misc/lkdtm/lkdtm.h3
-rw-r--r--drivers/misc/lkdtm/stackleak.c73
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c6
-rw-r--r--drivers/mtd/ar7part.c2
-rw-r--r--drivers/net/arcnet/arc-rimi.c2
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/bonding/bond_netlink.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c117
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c19
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h2
-rw-r--r--drivers/net/ethernet/intel/Kconfig18
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c51
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/pci.c5
-rw-r--r--drivers/nvme/target/io-cmd-file.c2
-rw-r--r--drivers/of/base.c2
-rw-r--r--drivers/of/fdt.c24
-rw-r--r--drivers/of/of_reserved_mem.c14
-rw-r--r--drivers/of/unittest.c4
-rw-r--r--drivers/platform/chrome/chromeos_tbmc.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c18
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.c3
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_mec.h (renamed from include/linux/mfd/cros_ec_lpc_mec.h)6
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_reg.c3
-rw-r--r--drivers/platform/chrome/cros_ec_lpc_reg.h (renamed from include/linux/mfd/cros_ec_lpc_reg.h)6
-rw-r--r--drivers/platform/x86/Kconfig59
-rw-r--r--drivers/platform/x86/Makefile4
-rw-r--r--drivers/platform/x86/acerhdf.c68
-rw-r--r--drivers/platform/x86/asus-wmi.c119
-rw-r--r--drivers/platform/x86/dcdbas.c (renamed from drivers/firmware/dcdbas.c)125
-rw-r--r--drivers/platform/x86/dcdbas.h (renamed from drivers/firmware/dcdbas.h)10
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c2
-rw-r--r--drivers/platform/x86/dell_rbu.c (renamed from drivers/firmware/dell_rbu.c)8
-rw-r--r--drivers/platform/x86/ideapad-laptop.c9
-rw-r--r--drivers/platform/x86/intel-hid.c12
-rw-r--r--drivers/platform/x86/intel-rst.c23
-rw-r--r--drivers/platform/x86/intel-smartconnect.c22
-rw-r--r--drivers/platform/x86/intel-wmi-thunderbolt.c18
-rw-r--r--drivers/platform/x86/intel_atomisp2_pm.c119
-rw-r--r--drivers/platform/x86/intel_bxtwc_tmu.c13
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c7
-rw-r--r--drivers/platform/x86/intel_chtdc_ti_pwrbtn.c1
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c29
-rw-r--r--drivers/platform/x86/intel_ips.c15
-rw-r--r--drivers/platform/x86/intel_ips.h13
-rw-r--r--drivers/platform/x86/intel_menlow.c28
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c18
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c30
-rw-r--r--drivers/platform/x86/intel_oaktrail.c32
-rw-r--r--drivers/platform/x86/intel_pmc_core.c11
-rw-r--r--drivers/platform/x86/intel_pmc_core.h11
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c35
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c12
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c16
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c24
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c12
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c27
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c12
-rw-r--r--drivers/platform/x86/intel_turbo_max_3.c18
-rw-r--r--drivers/platform/x86/lg-laptop.c700
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c107
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/pwm/Kconfig5
-rw-r--r--drivers/pwm/pwm-lpss-platform.c24
-rw-r--r--drivers/pwm/pwm-lpss.c61
-rw-r--r--drivers/pwm/pwm-lpss.h14
-rw-r--r--drivers/pwm/pwm-rcar.c5
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c10
-rw-r--r--drivers/pwm/pwm-tegra.c1
-rw-r--r--drivers/pwm/sysfs.c12
-rw-r--r--drivers/remoteproc/Kconfig46
-rw-r--r--drivers/remoteproc/Makefile5
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5.c43
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c497
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c (renamed from drivers/remoteproc/qcom_q6v5_pil.c)420
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c (renamed from drivers/remoteproc/qcom_adsp_pil.c)28
-rw-r--r--drivers/remoteproc/remoteproc_core.c595
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c1
-rw-r--r--drivers/remoteproc/remoteproc_internal.h2
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c5
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c14
-rw-r--r--drivers/rpmsg/qcom_glink_native.c3
-rw-r--r--drivers/rpmsg/qcom_glink_smem.c14
-rw-r--r--drivers/rpmsg/qcom_smd.c9
-rw-r--r--drivers/rpmsg/rpmsg_char.c27
-rw-r--r--drivers/s390/char/fs3270.c2
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c2
-rw-r--r--drivers/scsi/3w-9xxx.c12
-rw-r--r--drivers/scsi/3w-sas.c8
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/aha152x.c14
-rw-r--r--drivers/scsi/mvsas/mv_sas.c6
-rw-r--r--drivers/scsi/pcmcia/aha152x_core.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c8
-rw-r--r--drivers/sfi/sfi_core.c2
-rw-r--r--drivers/staging/android/ion/Kconfig2
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c7
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c2
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c3
-rw-r--r--drivers/staging/media/sunxi/Kconfig15
-rw-r--r--drivers/staging/media/sunxi/Makefile1
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig14
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile3
-rw-r--r--drivers/staging/media/sunxi/cedrus/TODO7
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c431
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h167
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c70
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.h27
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c327
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h30
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c246
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h235
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c542
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.h30
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c6
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_file.c6
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/thermal/thermal_core.c9
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c2
-rw-r--r--drivers/usb/early/xhci-dbc.c14
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/usbip/usbip_common.c2
-rw-r--r--drivers/vfio/Kconfig2
-rw-r--r--drivers/vfio/pci/vfio_pci.c8
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c31
-rw-r--r--drivers/vhost/scsi.c426
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/fbdev/Kconfig59
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/arcfb.c2
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c43
-rw-r--r--drivers/video/fbdev/aty/atyfb.h12
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c36
-rw-r--r--drivers/video/fbdev/aty/mach64_accel.c32
-rw-r--r--drivers/video/fbdev/cg14.c4
-rw-r--r--drivers/video/fbdev/cg3.c2
-rw-r--r--drivers/video/fbdev/clps711xfb.c314
-rw-r--r--drivers/video/fbdev/core/fbmon.c4
-rw-r--r--drivers/video/fbdev/imsttfb.c2
-rw-r--r--drivers/video/fbdev/leo.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/Kconfig1
-rw-r--r--drivers/video/fbdev/mmp/panel/Kconfig1
-rw-r--r--drivers/video/fbdev/offb.c12
-rw-r--r--drivers/video/fbdev/omap/lcd_ams_delta.c55
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/Kconfig7
-rw-r--r--drivers/video/fbdev/p9100.c2
-rw-r--r--drivers/video/fbdev/pxa168fb.c3
-rw-r--r--drivers/video/fbdev/sbuslib.c28
-rw-r--r--drivers/video/fbdev/sis/init301.c4
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/udlfb.c141
-rw-r--r--drivers/video/of_display_timing.c2
-rw-r--r--drivers/video/vgastate.c2
-rw-r--r--drivers/virtio/virtio_balloon.c374
-rw-r--r--drivers/xen/Kconfig13
-rw-r--r--drivers/xen/balloon.c5
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/pvcalls-back.c8
-rw-r--r--drivers/xen/swiotlb-xen.c8
-rw-r--r--drivers/xen/xen-balloon.c13
-rw-r--r--drivers/xen/xen-selfballoon.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c6
-rw-r--r--fs/9p/vfs_addr.c4
-rw-r--r--fs/9p/vfs_dir.c2
-rw-r--r--fs/9p/xattr.c4
-rw-r--r--fs/afs/Kconfig12
-rw-r--r--fs/afs/Makefile7
-rw-r--r--fs/afs/addr_list.c209
-rw-r--r--fs/afs/afs.h50
-rw-r--r--fs/afs/cache.c2
-rw-r--r--fs/afs/callback.c17
-rw-r--r--fs/afs/cell.c65
-rw-r--r--fs/afs/cmservice.c287
-rw-r--r--fs/afs/dir.c75
-rw-r--r--fs/afs/dynroot.c4
-rw-r--r--fs/afs/file.c8
-rw-r--r--fs/afs/flock.c22
-rw-r--r--fs/afs/fs_probe.c270
-rw-r--r--fs/afs/fsclient.c583
-rw-r--r--fs/afs/inode.c37
-rw-r--r--fs/afs/internal.h322
-rw-r--r--fs/afs/mntpt.c5
-rw-r--r--fs/afs/proc.c110
-rw-r--r--fs/afs/protocol_yfs.h163
-rw-r--r--fs/afs/rotate.c302
-rw-r--r--fs/afs/rxrpc.c115
-rw-r--r--fs/afs/security.c13
-rw-r--r--fs/afs/server.c145
-rw-r--r--fs/afs/server_list.c6
-rw-r--r--fs/afs/super.c5
-rw-r--r--fs/afs/vl_list.c340
-rw-r--r--fs/afs/vl_probe.c273
-rw-r--r--fs/afs/vl_rotate.c355
-rw-r--r--fs/afs/vlclient.c195
-rw-r--r--fs/afs/volume.c56
-rw-r--r--fs/afs/write.c30
-rw-r--r--fs/afs/xattr.c2
-rw-r--r--fs/afs/yfsclient.c2184
-rw-r--r--fs/bfs/inode.c9
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/ctree.h8
-rw-r--r--fs/btrfs/delayed-ref.c50
-rw-r--r--fs/btrfs/extent-tree.c37
-rw-r--r--fs/btrfs/file.c15
-rw-r--r--fs/btrfs/free-space-cache.c32
-rw-r--r--fs/btrfs/inode.c15
-rw-r--r--fs/btrfs/ioctl.c50
-rw-r--r--fs/btrfs/transaction.c9
-rw-r--r--fs/btrfs/tree-log.c5
-rw-r--r--fs/buffer.c10
-rw-r--r--fs/ceph/acl.c13
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c21
-rw-r--r--fs/ceph/file.c578
-rw-r--r--fs/ceph/inode.c13
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/ceph/super.c13
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/ceph/xattr.c3
-rw-r--r--fs/cifs/cifs_debug.c56
-rw-r--r--fs/cifs/cifs_spnego.c6
-rw-r--r--fs/cifs/cifsfs.c41
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h8
-rw-r--r--fs/cifs/cifspdu.h3
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/cifs/file.c440
-rw-r--r--fs/cifs/inode.c4
-rw-r--r--fs/cifs/misc.c4
-rw-r--r--fs/cifs/smb2ops.c18
-rw-r--r--fs/cifs/smb2pdu.c6
-rw-r--r--fs/cifs/smb2pdu.h35
-rw-r--r--fs/cifs/smbdirect.c17
-rw-r--r--fs/cifs/trace.h42
-rw-r--r--fs/cifs/transport.c8
-rw-r--r--fs/cramfs/inode.c7
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/exofs/super.c7
-rw-r--r--fs/ext4/ext4.h9
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/namei.c4
-rw-r--r--fs/ext4/page-io.c2
-rw-r--r--fs/fat/dir.c6
-rw-r--r--fs/fat/fat.h4
-rw-r--r--fs/fat/file.c17
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fat/misc.c91
-rw-r--r--fs/fat/namei_msdos.c17
-rw-r--r--fs/fat/namei_vfat.c15
-rw-r--r--fs/fuse/Makefile2
-rw-r--r--fs/fuse/control.c34
-rw-r--r--fs/fuse/dev.c221
-rw-r--r--fs/fuse/dir.c381
-rw-r--r--fs/fuse/file.c160
-rw-r--r--fs/fuse/fuse_i.h124
-rw-r--r--fs/fuse/inode.c53
-rw-r--r--fs/fuse/readdir.c569
-rw-r--r--fs/hfs/brec.c5
-rw-r--r--fs/hfs/btree.c41
-rw-r--r--fs/hfs/btree.h1
-rw-r--r--fs/hfs/catalog.c16
-rw-r--r--fs/hfs/extent.c10
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/attributes.c10
-rw-r--r--fs/hfsplus/brec.c5
-rw-r--r--fs/hfsplus/btree.c44
-rw-r--r--fs/hfsplus/catalog.c24
-rw-r--r--fs/hfsplus/extents.c8
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c1
-rw-r--r--fs/inode.c2
-rw-r--r--fs/ioctl.c13
-rw-r--r--fs/iomap.c3
-rw-r--r--fs/lockd/host.c2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/dns_resolve.c15
-rw-r--r--fs/nfs/nfs4file.c12
-rw-r--r--fs/nfsd/cache.h20
-rw-r--r--fs/nfsd/export.c14
-rw-r--r--fs/nfsd/export.h2
-rw-r--r--fs/nfsd/netns.h8
-rw-r--r--fs/nfsd/nfs4callback.c98
-rw-r--r--fs/nfsd/nfs4idmap.c11
-rw-r--r--fs/nfsd/nfs4proc.c289
-rw-r--r--fs/nfsd/nfs4state.c41
-rw-r--r--fs/nfsd/nfs4xdr.c50
-rw-r--r--fs/nfsd/nfscache.c142
-rw-r--r--fs/nfsd/nfsctl.c1
-rw-r--r--fs/nfsd/state.h10
-rw-r--r--fs/nfsd/vfs.c17
-rw-r--r--fs/nfsd/xdr4.h28
-rw-r--r--fs/nfsd/xdr4cb.h10
-rw-r--r--fs/ntfs/namei.c2
-rw-r--r--fs/ocfs2/buffer_head_io.c77
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dir.c3
-rw-r--r--fs/ocfs2/dlmglue.c28
-rw-r--r--fs/ocfs2/file.c97
-rw-r--r--fs/ocfs2/journal.c51
-rw-r--r--fs/ocfs2/move_extents.c17
-rw-r--r--fs/ocfs2/refcounttree.c148
-rw-r--r--fs/ocfs2/refcounttree.h24
-rw-r--r--fs/ocfs2/stackglue.c6
-rw-r--r--fs/ocfs2/stackglue.h3
-rw-r--r--fs/orangefs/inode.c2
-rw-r--r--fs/overlayfs/copy_up.c219
-rw-r--r--fs/overlayfs/dir.c34
-rw-r--r--fs/overlayfs/file.c43
-rw-r--r--fs/overlayfs/inode.c17
-rw-r--r--fs/overlayfs/namei.c4
-rw-r--r--fs/overlayfs/overlayfs.h14
-rw-r--r--fs/overlayfs/super.c68
-rw-r--r--fs/overlayfs/util.c46
-rw-r--r--fs/proc/base.c18
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/page.c2
-rw-r--r--fs/proc/vmcore.c4
-rw-r--r--fs/pstore/Kconfig1
-rw-r--r--fs/read_write.c416
-rw-r--r--fs/reiserfs/Makefile9
-rw-r--r--fs/reiserfs/xattr.c7
-rw-r--r--fs/splice.c7
-rw-r--r--fs/xfs/xfs_file.c82
-rw-r--r--fs/xfs/xfs_reflink.c173
-rw-r--r--fs/xfs/xfs_reflink.h15
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/crypto/asym_tpm_subtype.h19
-rw-r--r--include/crypto/public_key.h14
-rw-r--r--include/drm/drm_connector.h71
-rw-r--r--include/dt-bindings/clock/am3.h119
-rw-r--r--include/dt-bindings/clock/am4.h132
-rw-r--r--include/dt-bindings/clock/at91.h15
-rw-r--r--include/dt-bindings/clock/dra7.h326
-rw-r--r--include/dt-bindings/clock/exynos4.h30
-rw-r--r--include/dt-bindings/clock/hi3670-clock.h348
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h3
-rw-r--r--include/dt-bindings/clock/imx6sl-clock.h4
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h3
-rw-r--r--include/dt-bindings/clock/imx6sx-clock.h3
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h3
-rw-r--r--include/dt-bindings/clock/jz4725b-cgu.h35
-rw-r--r--include/dt-bindings/clock/maxim,max77686.h5
-rw-r--r--include/dt-bindings/clock/maxim,max77802.h5
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sdm845.h116
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8960.h2
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8996.h9
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcs404.h165
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm660.h156
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h3
-rw-r--r--include/dt-bindings/clock/r7s72100-clock.h7
-rw-r--r--include/dt-bindings/clock/r7s9210-cpg-mssr.h20
-rw-r--r--include/dt-bindings/clock/r8a7743-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7744-cpg-mssr.h39
-rw-r--r--include/dt-bindings/clock/r8a7745-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a774a1-cpg-mssr.h58
-rw-r--r--include/dt-bindings/clock/r8a774c0-cpg-mssr.h60
-rw-r--r--include/dt-bindings/clock/r8a7790-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7791-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7792-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7793-clock.h12
-rw-r--r--include/dt-bindings/clock/r8a7793-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h8
-rw-r--r--include/dt-bindings/clock/r8a7794-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7795-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7796-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a77970-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a77995-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/renesas-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h3
-rw-r--r--include/dt-bindings/clock/samsung,s2mps11.h5
-rw-r--r--include/dt-bindings/clock/samsung,s3c64xx-clock.h7
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h1
-rw-r--r--include/dt-bindings/reset/actions,s700-reset.h34
-rw-r--r--include/dt-bindings/reset/actions,s900-reset.h65
-rw-r--r--include/keys/asymmetric-subtype.h9
-rw-r--r--include/keys/trusted.h (renamed from security/keys/trusted.h)14
-rw-r--r--include/linux/adxl.h5
-rw-r--r--include/linux/avf/virtchnl.h12
-rw-r--r--include/linux/bio.h26
-rw-r--r--include/linux/bitmap.h37
-rw-r--r--include/linux/bitops.h30
-rw-r--r--include/linux/blk-cgroup.h145
-rw-r--r--include/linux/blk_types.h1
-rw-r--r--include/linux/bootmem.h404
-rw-r--r--include/linux/bpf_verifier.h3
-rw-r--r--include/linux/ceph/libceph.h8
-rw-r--r--include/linux/ceph/messenger.h24
-rw-r--r--include/linux/ceph/msgpool.h11
-rw-r--r--include/linux/ceph/osd_client.h22
-rw-r--r--include/linux/ceph/pagelist.h11
-rw-r--r--include/linux/ceph/rados.h28
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/clk-provider.h9
-rw-r--r--include/linux/clk.h90
-rw-r--r--include/linux/clk/renesas.h8
-rw-r--r--include/linux/clk/ti.h7
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h75
-rw-r--r--include/linux/compiler-intel.h9
-rw-r--r--include/linux/compiler.h24
-rw-r--r--include/linux/compiler_attributes.h258
-rw-r--r--include/linux/compiler_types.h100
-rw-r--r--include/linux/fs.h55
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/hmm.h33
-rw-r--r--include/linux/inetdevice.h4
-rw-r--r--include/linux/key-type.h11
-rw-r--r--include/linux/keyctl.h46
-rw-r--r--include/linux/memblock.h165
-rw-r--r--include/linux/memory_hotplug.h4
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mfd/cros_ec.h214
-rw-r--r--include/linux/mfd/cros_ec_commands.h295
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/notifier.h3
-rw-r--r--include/linux/percpu-defs.h6
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h101
-rw-r--r--include/linux/rbtree_augmented.h4
-rw-r--r--include/linux/remoteproc.h47
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/sched/stat.h1
-rw-r--r--include/linux/signal.h6
-rw-r--r--include/linux/stackleak.h35
-rw-r--r--include/linux/sunrpc/cache.h18
-rw-r--r--include/linux/sunrpc/svc_rdma.h13
-rw-r--r--include/linux/sunrpc/svcauth.h1
-rw-r--r--include/linux/trace_events.h3
-rw-r--r--include/linux/uio.h65
-rw-r--r--include/linux/uprobes.h5
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/media/media-device.h29
-rw-r--r--include/media/media-request.h442
-rw-r--r--include/media/v4l2-ctrls.h141
-rw-r--r--include/media/v4l2-device.h11
-rw-r--r--include/media/v4l2-mem2mem.h4
-rw-r--r--include/media/videobuf2-core.h64
-rw-r--r--include/media/videobuf2-v4l2.h20
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/trace/events/afs.h213
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/fuse.h119
-rw-r--r--include/uapi/linux/keyctl.h30
-rw-r--r--include/uapi/linux/media.h8
-rw-r--r--include/uapi/linux/v4l2-controls.h65
-rw-r--r--include/uapi/linux/vfio.h50
-rw-r--r--include/uapi/linux/videodev2.h33
-rw-r--r--include/uapi/linux/virtio_balloon.h8
-rw-r--r--include/video/udlfb.h3
-rw-r--r--init/do_mounts.c31
-rw-r--r--init/main.c15
-rw-r--r--ipc/ipc_sysctl.c30
-rw-r--r--ipc/util.h9
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/bounds.c4
-rw-r--r--kernel/bpf/verifier.c21
-rw-r--r--kernel/cgroup/cgroup.c48
-rw-r--r--kernel/configs/kvm_guest.config1
-rw-r--r--kernel/dma/direct.c2
-rw-r--r--kernel/dma/swiotlb.c8
-rw-r--r--kernel/events/core.c49
-rw-r--r--kernel/events/uprobes.c278
-rw-r--r--kernel/fail_function.c3
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/hung_task.c30
-rw-r--r--kernel/kexec_file.c2
-rw-r--r--kernel/locking/qspinlock_paravirt.h2
-rw-r--r--kernel/panic.c10
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/printk/printk.c5
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/sched/core.c34
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/stackleak.c132
-rw-r--r--kernel/sysctl.c16
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace_event_perf.c7
-rw-r--r--kernel/trace/trace_events_hist.c4
-rw-r--r--kernel/trace/trace_kprobe.c412
-rw-r--r--kernel/trace/trace_printk.c2
-rw-r--r--kernel/trace/trace_probe.c672
-rw-r--r--kernel/trace/trace_probe.h289
-rw-r--r--kernel/trace/trace_probe_tmpl.h216
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/trace/trace_uprobe.c255
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bitmap.c22
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/iov_iter.c125
-rw-r--r--lib/kstrtox.c16
-rw-r--r--lib/lz4/lz4_decompress.c481
-rw-r--r--lib/lz4/lz4defs.h9
-rw-r--r--lib/parser.c16
-rw-r--r--lib/sg_pool.c7
-rw-r--r--lib/udivmoddi4.c310
-rw-r--r--lib/umoddi3.c32
-rw-r--r--lib/zlib_inflate/inflate.c12
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/Makefile8
-rw-r--r--mm/bootmem.c811
-rw-r--r--mm/filemap.c148
-rw-r--r--mm/gup.c4
-rw-r--r--mm/gup_benchmark.c3
-rw-r--r--mm/hmm.c134
-rw-r--r--mm/huge_memory.c38
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/internal.h2
-rw-r--r--mm/kasan/kasan_init.c7
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memblock.c164
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c9
-rw-r--r--mm/memory_hotplug.c61
-rw-r--r--mm/mempolicy.c35
-rw-r--r--mm/nobootmem.c445
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/page_ext.c6
-rw-r--r--mm/page_idle.c2
-rw-r--r--mm/page_io.c4
-rw-r--r--mm/page_owner.c2
-rw-r--r--mm/page_poison.c8
-rw-r--r--mm/page_vma_mapped.c24
-rw-r--r--mm/percpu.c50
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/sparse-vmemmap.c6
-rw-r--r--mm/sparse.c19
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/bluetooth/6lowpan.c2
-rw-r--r--net/bluetooth/a2mp.c2
-rw-r--r--net/bluetooth/smp.c2
-rw-r--r--net/ceph/messenger.c113
-rw-r--r--net/ceph/msgpool.c27
-rw-r--r--net/ceph/osd_client.c363
-rw-r--r--net/ceph/pagelist.c20
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/ipv4/igmp.c53
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_bpf.c1
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/openvswitch/flow_netlink.c4
-rw-r--r--net/sctp/associola.c10
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/smc/smc_clc.c4
-rw-r--r--net/socket.c6
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c41
-rw-r--r--net/sunrpc/cache.c153
-rw-r--r--net/sunrpc/svc_xprt.c2
-rw-r--r--net/sunrpc/svcauth.c74
-rw-r--r--net/sunrpc/svcauth_unix.c24
-rw-r--r--net/sunrpc/svcsock.c55
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c23
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c10
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--net/tipc/topsrv.c2
-rw-r--r--net/tls/tls_device.c4
-rw-r--r--net/tls/tls_sw.c4
-rw-r--r--net/xfrm/Kconfig1
-rw-r--r--net/xfrm/xfrm_hash.c2
-rw-r--r--samples/vfio-mdev/mbochs.c136
-rw-r--r--scripts/Kbuild.include8
-rw-r--r--scripts/Makefile.extrawarn3
-rw-r--r--scripts/Makefile.gcc-plugins10
-rwxr-xr-xscripts/checkpatch.pl11
-rw-r--r--scripts/gcc-plugins/Kconfig51
-rw-r--r--scripts/gcc-plugins/stackleak_plugin.c427
-rw-r--r--scripts/kconfig/Makefile16
-rw-r--r--scripts/kconfig/conf.c7
-rwxr-xr-xscripts/kconfig/merge_config.sh6
-rw-r--r--security/apparmor/apparmorfs.c2
-rw-r--r--security/apparmor/file.c2
-rw-r--r--security/apparmor/include/cred.h2
-rw-r--r--security/apparmor/include/net.h10
-rw-r--r--security/apparmor/include/policy.h3
-rw-r--r--security/apparmor/include/secid.h3
-rw-r--r--security/apparmor/lib.c6
-rw-r--r--security/apparmor/lsm.c130
-rw-r--r--security/apparmor/net.c83
-rw-r--r--security/apparmor/policy.c3
-rw-r--r--security/apparmor/policy_unpack.c61
-rw-r--r--security/apparmor/secid.c3
-rw-r--r--security/keys/Makefile1
-rw-r--r--security/keys/compat.c18
-rw-r--r--security/keys/internal.h39
-rw-r--r--security/keys/keyctl.c24
-rw-r--r--security/keys/keyctl_pkey.c323
-rw-r--r--security/keys/trusted.c14
-rw-r--r--sound/firewire/amdtp-stream.c57
-rw-r--r--sound/firewire/dice/dice.c4
-rw-r--r--sound/pci/ca0106/ca0106.h2
-rw-r--r--tools/lib/bpf/libbpf.c13
-rw-r--r--tools/perf/util/probe-event.c39
-rw-r--r--tools/perf/util/probe-event.h1
-rw-r--r--tools/perf/util/probe-file.c34
-rw-r--r--tools/perf/util/probe-file.h1
-rw-r--r--tools/perf/util/symbol-elf.c46
-rw-r--r--tools/perf/util/symbol.h7
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_skb_cgroup_id.sh3
-rwxr-xr-xtools/testing/selftests/bpf/test_sock_addr.sh3
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c321
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh95
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc12
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/Makefile9
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile13
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c4
-rw-r--r--tools/testing/selftests/powerpc/security/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/security/rfi_flush.c18
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile11
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/utils.c6
1457 files changed, 51403 insertions, 16820 deletions
diff --git a/.mailmap b/.mailmap
index 89f532caf639..a76be45fef6c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -160,6 +160,11 @@ Peter Oruba <peter.oruba@amd.com>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
+Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
+Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
+Oleksij Rempel <linux@rempel-privat.de> <fixed-term.Oleksij.Rempel@de.bosch.com>
+Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
+Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
Rajesh Shah <rajesh.shah@intel.com>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
diff --git a/Documentation/ABI/testing/sysfs-platform-lg-laptop b/Documentation/ABI/testing/sysfs-platform-lg-laptop
new file mode 100644
index 000000000000..cf47749b19df
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-lg-laptop
@@ -0,0 +1,35 @@
+What: /sys/devices/platform/lg-laptop/reader_mode
+Date: October 2018
+KernelVersion: 4.20
+Contact: "Matan Ziv-Av <matan@svgalib.org>
+Description:
+ Control reader mode. 1 means on, 0 means off.
+
+What: /sys/devices/platform/lg-laptop/fn_lock
+Date: October 2018
+KernelVersion: 4.20
+Contact: "Matan Ziv-Av <matan@svgalib.org>
+Description:
+ Control FN lock mode. 1 means on, 0 means off.
+
+What: /sys/devices/platform/lg-laptop/battery_care_limit
+Date: October 2018
+KernelVersion: 4.20
+Contact: "Matan Ziv-Av <matan@svgalib.org>
+Description:
+ Maximal battery charge level. Accepted values are 80 or 100.
+
+What: /sys/devices/platform/lg-laptop/fan_mode
+Date: October 2018
+KernelVersion: 4.20
+Contact: "Matan Ziv-Av <matan@svgalib.org>
+Description:
+ Control fan mode. 1 for performance mode, 0 for silent mode.
+
+What: /sys/devices/platform/lg-laptop/usb_charge
+Date: October 2018
+KernelVersion: 4.20
+Contact: "Matan Ziv-Av <matan@svgalib.org>
+Description:
+ Control USB port charging when device is turned off.
+ 1 means on, 0 means off.
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 8384c681a4b2..476722b7b636 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1879,10 +1879,8 @@ following two functions.
wbc_init_bio(@wbc, @bio)
Should be called for each bio carrying writeback data and
- associates the bio with the inode's owner cgroup and the
- corresponding request queue. This must be called after
- a queue (device) has been associated with the bio and
- before submission.
+ associates the bio with the inode's owner cgroup. Can be
+ called anytime between bio allocation and submission.
wbc_account_io(@wbc, @page, @bytes)
Should be called for each data segment being written out.
@@ -1901,7 +1899,7 @@ the configuration, the bio may be executed at a lower priority and if
the writeback session is holding shared resources, e.g. a journal
entry, may lead to priority inversion. There is no one easy solution
for the problem. Filesystems can try to work around specific problem
-cases by skipping wbc_init_bio() or using bio_associate_create_blkg()
+cases by skipping wbc_init_bio() or using bio_associate_blkcg()
directly.
diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
index 25157aec5b31..5c4432c96c4b 100644
--- a/Documentation/admin-guide/mm/memory-hotplug.rst
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -5,7 +5,7 @@ Memory Hotplug
==============
:Created: Jul 28 2007
-:Updated: Add description of notifier of memory hotplug: Oct 11 2007
+:Updated: Add some details about locking internals: Aug 20 2018
This document is about memory hotplug including how-to-use and current status.
Because Memory Hotplug is still under development, contents of this text will
@@ -392,6 +392,46 @@ Need more implementation yet....
- Notification completion of remove works by OS to firmware.
- Guard from remove if not yet.
+
+Locking Internals
+=================
+
+When adding/removing memory that uses memory block devices (i.e. ordinary RAM),
+the device_hotplug_lock should be held to:
+
+- synchronize against online/offline requests (e.g. via sysfs). This way, memory
+ block devices can only be accessed (.online/.state attributes) by user
+ space once memory has been fully added. And when removing memory, we
+ know nobody is in critical sections.
+- synchronize against CPU hotplug and similar (e.g. relevant for ACPI and PPC)
+
+Especially, there is a possible lock inversion that is avoided using
+device_hotplug_lock when adding memory and user space tries to online that
+memory faster than expected:
+
+- device_online() will first take the device_lock(), followed by
+ mem_hotplug_lock
+- add_memory_resource() will first take the mem_hotplug_lock, followed by
+ the device_lock() (while creating the devices, during bus_add_device()).
+
+As the device is visible to user space before taking the device_lock(), this
+can result in a lock inversion.
+
+onlining/offlining of memory should be done via device_online()/
+device_offline() - to make sure it is properly synchronized to actions
+via sysfs. Holding device_hotplug_lock is advised (to e.g. protect online_type)
+
+When adding/removing/onlining/offlining memory or adding/removing
+heterogeneous/device memory, we should always hold the mem_hotplug_lock in
+write mode to serialise memory hotplug (e.g. access to global/zone
+variables).
+
+In addition, mem_hotplug_lock (in contrast to device_hotplug_lock) in read
+mode allows for a quite efficient get_online_mems/put_online_mems
+implementation, so code accessing memory can protect from that memory
+vanishing.
+
+
Future Work
===========
diff --git a/Documentation/core-api/boot-time-mm.rst b/Documentation/core-api/boot-time-mm.rst
index 6e12e89a03e0..e5ec9f1a563d 100644
--- a/Documentation/core-api/boot-time-mm.rst
+++ b/Documentation/core-api/boot-time-mm.rst
@@ -5,54 +5,23 @@ Boot time memory management
Early system initialization cannot use "normal" memory management
simply because it is not set up yet. But there is still need to
allocate memory for various data structures, for instance for the
-physical page allocator. To address this, a specialized allocator
-called the :ref:`Boot Memory Allocator <bootmem>`, or bootmem, was
-introduced. Several years later PowerPC developers added a "Logical
-Memory Blocks" allocator, which was later adopted by other
-architectures and renamed to :ref:`memblock <memblock>`. There is also
-a compatibility layer called `nobootmem` that translates bootmem
-allocation interfaces to memblock calls.
+physical page allocator.
-The selection of the early allocator is done using
-``CONFIG_NO_BOOTMEM`` and ``CONFIG_HAVE_MEMBLOCK`` kernel
-configuration options. These options are enabled or disabled
-statically by the architectures' Kconfig files.
-
-* Architectures that rely only on bootmem select
- ``CONFIG_NO_BOOTMEM=n && CONFIG_HAVE_MEMBLOCK=n``.
-* The users of memblock with the nobootmem compatibility layer set
- ``CONFIG_NO_BOOTMEM=y && CONFIG_HAVE_MEMBLOCK=y``.
-* And for those that use both memblock and bootmem the configuration
- includes ``CONFIG_NO_BOOTMEM=n && CONFIG_HAVE_MEMBLOCK=y``.
-
-Whichever allocator is used, it is the responsibility of the
-architecture specific initialization to set it up in
-:c:func:`setup_arch` and tear it down in :c:func:`mem_init` functions.
+A specialized allocator called ``memblock`` performs the
+boot time memory management. The architecture specific initialization
+must set it up in :c:func:`setup_arch` and tear it down in
+:c:func:`mem_init` functions.
Once the early memory management is available it offers a variety of
functions and macros for memory allocations. The allocation request
may be directed to the first (and probably the only) node or to a
particular node in a NUMA system. There are API variants that panic
-when an allocation fails and those that don't. And more recent and
-advanced memblock even allows controlling its own behaviour.
-
-.. _bootmem:
-
-Bootmem
-=======
+when an allocation fails and those that don't.
-(mostly stolen from Mel Gorman's "Understanding the Linux Virtual
-Memory Manager" `book`_)
+Memblock also offers a variety of APIs that control its own behaviour.
-.. _book: https://www.kernel.org/doc/gorman/
-
-.. kernel-doc:: mm/bootmem.c
- :doc: bootmem overview
-
-.. _memblock:
-
-Memblock
-========
+Memblock Overview
+=================
.. kernel-doc:: mm/memblock.c
:doc: memblock overview
@@ -61,26 +30,6 @@ Memblock
Functions and structures
========================
-Common API
-----------
-
-The functions that are described in this section are available
-regardless of what early memory manager is enabled.
-
-.. kernel-doc:: mm/nobootmem.c
-
-Bootmem specific API
---------------------
-
-These interfaces available only with bootmem, i.e when ``CONFIG_NO_BOOTMEM=n``
-
-.. kernel-doc:: include/linux/bootmem.h
-.. kernel-doc:: mm/bootmem.c
- :functions:
-
-Memblock specific API
----------------------
-
Here is the description of memblock data structures, functions and
macros. Some of them are actually internal, but since they are
documented it would be silly to omit them. Besides, reading the
diff --git a/Documentation/crypto/asymmetric-keys.txt b/Documentation/crypto/asymmetric-keys.txt
index 5969bf42562a..8763866b11cf 100644
--- a/Documentation/crypto/asymmetric-keys.txt
+++ b/Documentation/crypto/asymmetric-keys.txt
@@ -183,6 +183,10 @@ and looks like the following:
void (*describe)(const struct key *key, struct seq_file *m);
void (*destroy)(void *payload);
+ int (*query)(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info);
+ int (*eds_op)(struct kernel_pkey_params *params,
+ const void *in, void *out);
int (*verify_signature)(const struct key *key,
const struct public_key_signature *sig);
};
@@ -207,12 +211,22 @@ There are a number of operations defined by the subtype:
asymmetric key will look after freeing the fingerprint and releasing the
reference on the subtype module.
- (3) verify_signature().
+ (3) query().
- Optional. These are the entry points for the key usage operations.
- Currently there is only the one defined. If not set, the caller will be
- given -ENOTSUPP. The subtype may do anything it likes to implement an
- operation, including offloading to hardware.
+ Mandatory. This is a function for querying the capabilities of a key.
+
+ (4) eds_op().
+
+ Optional. This is the entry point for the encryption, decryption and
+ signature creation operations (which are distinguished by the operation ID
+ in the parameter struct). The subtype may do anything it likes to
+ implement an operation, including offloading to hardware.
+
+ (5) verify_signature().
+
+ Optional. This is the entry point for signature verification. The
+ subtype may do anything it likes to implement an operation, including
+ offloading to hardware.
==========================
@@ -234,6 +248,8 @@ Examples of blob formats for which parsers could be implemented include:
- X.509 ASN.1 stream.
- Pointer to TPM key.
- Pointer to UEFI key.
+ - PKCS#8 private key [RFC 5208].
+ - PKCS#5 encrypted private key [RFC 2898].
During key instantiation each parser in the list is tried until one doesn't
return -EBADMSG.
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
index 9b5685a1d15d..84262cdb8d29 100644
--- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt
+++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
@@ -59,9 +59,11 @@ mhz values (normalized w.r.t. the highest value found while parsing the DT).
===========================================
Example 1 (ARM 64-bit, 6-cpu system, two clusters):
-capacities-dmips-mhz are scaled w.r.t. 1024 (cpu@0 and cpu@1)
-supposing cluster0@max-freq=1100 and custer1@max-freq=850,
-final capacities are 1024 for cluster0 and 446 for cluster1
+The capacities-dmips-mhz or DMIPS/MHz values (scaled to 1024)
+are 1024 and 578 for cluster0 and cluster1. Further normalization
+is done by the operating system based on cluster0@max-freq=1100 and
+custer1@max-freq=850, final capacities are 1024 for cluster0 and
+446 for cluster1 (576*850/1100).
cpus {
#address-cells = <2>;
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt
index 1333db9acfee..7f696362a4a1 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt
@@ -21,10 +21,29 @@ PROPERTIES
the register region. An optional second element specifies
the base address and size of the alias register region.
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to the pll parents.
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "pll8_vote", "pxo".
+
+- clock-output-names:
+ Usage: optional
+ Value type: <string>
+ Definition: Name of the output clock. Typically acpuX_aux where X is a
+ CPU number starting at 0.
+
Example:
clock-controller@2088000 {
compatible = "qcom,kpss-acc-v2";
reg = <0x02088000 0x1000>,
<0x02008000 0x1000>;
+ clocks = <&gcc PLL8_VOTE>, <&gcc PXO_SRC>;
+ clock-names = "pll8_vote", "pxo";
+ clock-output-names = "acpu0_aux";
};
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt
new file mode 100644
index 000000000000..e628758950e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt
@@ -0,0 +1,44 @@
+Krait Processor Sub-system (KPSS) Global Clock Controller (GCC)
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: should be one of the following. The generic compatible
+ "qcom,kpss-gcc" should also be included.
+ "qcom,kpss-gcc-ipq8064", "qcom,kpss-gcc"
+ "qcom,kpss-gcc-apq8064", "qcom,kpss-gcc"
+ "qcom,kpss-gcc-msm8974", "qcom,kpss-gcc"
+ "qcom,kpss-gcc-msm8960", "qcom,kpss-gcc"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: base address and size of the register region
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to the pll parents.
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "pll8_vote", "pxo".
+
+- clock-output-names:
+ Usage: required
+ Value type: <string>
+ Definition: Name of the output clock. Typically acpu_l2_aux indicating
+ an L2 cache auxiliary clock.
+
+Example:
+
+ l2cc: clock-controller@2011000 {
+ compatible = "qcom,kpss-gcc-ipq8064", "qcom,kpss-gcc";
+ reg = <0x2011000 0x1000>;
+ clocks = <&gcc PLL8_VOTE>, <&gcc PXO_SRC>;
+ clock-names = "pll8_vote", "pxo";
+ clock-output-names = "acpu_l2_aux";
+ };
diff --git a/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
index d1e60d297387..2ef86ae96df8 100644
--- a/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
+++ b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
@@ -13,6 +13,7 @@ Required Properties:
region.
- clocks: Reference to the parent clocks ("hosc", "losc")
- #clock-cells: should be 1.
+- #reset-cells: should be 1.
Each clock is assigned an identifier, and client nodes can use this identifier
to specify the clock which they consume.
@@ -36,6 +37,7 @@ Example: Clock Management Unit node:
reg = <0x0 0xe0160000 0x0 0x1000>;
clocks = <&hosc>, <&losc>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
Example: UART controller node that consumes clock generated by the clock
diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt
index 8f8f95056f3d..e9f70fcdfe80 100644
--- a/Documentation/devicetree/bindings/clock/at91-clock.txt
+++ b/Documentation/devicetree/bindings/clock/at91-clock.txt
@@ -4,6 +4,8 @@ This binding uses the common clock binding[1].
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+Slow Clock controller:
+
Required properties:
- compatible : shall be one of the following:
"atmel,at91sam9x5-sckc" or
@@ -16,84 +18,6 @@ Required properties:
"atmel,at91sam9x5-clk-slow-rc-osc":
at91 internal slow RC oscillator
-
- "atmel,<chip>-pmc":
- at91 PMC (Power Management Controller)
- All at91 specific clocks (clocks defined below) must be child
- node of the PMC node.
- <chip> can be: at91rm9200, at91sam9260, at91sam9261,
- at91sam9263, at91sam9g45, at91sam9n12, at91sam9rl, at91sam9x5,
- sama5d2, sama5d3 or sama5d4.
-
- "atmel,at91sam9x5-clk-slow" (under sckc node)
- or
- "atmel,at91sam9260-clk-slow" (under pmc node):
- at91 slow clk
-
- "atmel,at91rm9200-clk-main-osc"
- "atmel,at91sam9x5-clk-main-rc-osc"
- at91 main clk sources
-
- "atmel,at91sam9x5-clk-main"
- "atmel,at91rm9200-clk-main":
- at91 main clock
-
- "atmel,at91rm9200-clk-master" or
- "atmel,at91sam9x5-clk-master":
- at91 master clock
-
- "atmel,at91sam9x5-clk-peripheral" or
- "atmel,at91rm9200-clk-peripheral":
- at91 peripheral clocks
-
- "atmel,at91rm9200-clk-pll" or
- "atmel,at91sam9g45-clk-pll" or
- "atmel,at91sam9g20-clk-pllb" or
- "atmel,sama5d3-clk-pll":
- at91 pll clocks
-
- "atmel,at91sam9x5-clk-plldiv":
- at91 plla divisor
-
- "atmel,at91rm9200-clk-programmable" or
- "atmel,at91sam9g45-clk-programmable" or
- "atmel,at91sam9x5-clk-programmable":
- at91 programmable clocks
-
- "atmel,at91sam9x5-clk-smd":
- at91 SMD (Soft Modem) clock
-
- "atmel,at91rm9200-clk-system":
- at91 system clocks
-
- "atmel,at91rm9200-clk-usb" or
- "atmel,at91sam9x5-clk-usb" or
- "atmel,at91sam9n12-clk-usb":
- at91 usb clock
-
- "atmel,at91sam9x5-clk-utmi":
- at91 utmi clock
-
- "atmel,sama5d4-clk-h32mx":
- at91 h32mx clock
-
- "atmel,sama5d2-clk-generated":
- at91 generated clock
-
- "atmel,sama5d2-clk-audio-pll-frac":
- at91 audio fractional pll
-
- "atmel,sama5d2-clk-audio-pll-pad":
- at91 audio pll CLK_AUDIO output pin
-
- "atmel,sama5d2-clk-audio-pll-pmc"
- at91 audio pll output on AUDIOPLLCLK that feeds the PMC
- and can be used by peripheral clock or generic clock
-
- "atmel,sama5d2-clk-i2s-mux" (under pmc node):
- at91 I2S clock source selection
-
-Required properties for SCKC node:
- reg : defines the IO memory reserved for the SCKC.
- #size-cells : shall be 0 (reg is used to encode clk id).
- #address-cells : shall be 1 (reg is used to encode clk id).
@@ -109,428 +33,30 @@ For example:
/* put at91 slow clocks here */
};
+Power Management Controller (PMC):
-Required properties for internal slow RC oscillator:
-- #clock-cells : from common clock binding; shall be set to 0.
-- clock-frequency : define the internal RC oscillator frequency.
-
-Optional properties:
-- clock-accuracy : define the internal RC oscillator accuracy.
-
-For example:
- slow_rc_osc: slow_rc_osc {
- compatible = "atmel,at91sam9x5-clk-slow-rc-osc";
- clock-frequency = <32768>;
- clock-accuracy = <50000000>;
- };
-
-Required properties for slow oscillator:
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall encode the main osc source clk sources (see atmel datasheet).
+Required properties:
+- compatible : shall be "atmel,<chip>-pmc", "syscon":
+ <chip> can be: at91rm9200, at91sam9260, at91sam9261,
+ at91sam9263, at91sam9g45, at91sam9n12, at91sam9rl, at91sam9g15,
+ at91sam9g25, at91sam9g35, at91sam9x25, at91sam9x35, at91sam9x5,
+ sama5d2, sama5d3 or sama5d4.
+- #clock-cells : from common clock binding; shall be set to 2. The first entry
+ is the type of the clock (core, system, peripheral or generated) and the
+ second entry its index as provided by the datasheet
+- clocks : Must contain an entry for each entry in clock-names.
+- clock-names: Must include the following entries: "slow_clk", "main_xtal"
Optional properties:
- atmel,osc-bypass : boolean property. Set this when a clock signal is directly
provided on XIN.
For example:
- slow_osc: slow_osc {
- compatible = "atmel,at91rm9200-clk-slow-osc";
- #clock-cells = <0>;
- clocks = <&slow_xtal>;
- };
-
-Required properties for slow clock:
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall encode the slow clk sources (see atmel datasheet).
-
-For example:
- clk32k: slck {
- compatible = "atmel,at91sam9x5-clk-slow";
- #clock-cells = <0>;
- clocks = <&slow_rc_osc &slow_osc>;
- };
-
-Required properties for PMC node:
-- reg : defines the IO memory reserved for the PMC.
-- #size-cells : shall be 0 (reg is used to encode clk id).
-- #address-cells : shall be 1 (reg is used to encode clk id).
-- interrupts : shall be set to PMC interrupt line.
-- interrupt-controller : tell that the PMC is an interrupt controller.
-- #interrupt-cells : must be set to 1. The first cell encodes the interrupt id,
- and reflect the bit position in the PMC_ER/DR/SR registers.
- You can use the dt macros defined in dt-bindings/clock/at91.h.
- 0 (AT91_PMC_MOSCS) -> main oscillator ready
- 1 (AT91_PMC_LOCKA) -> PLL A ready
- 2 (AT91_PMC_LOCKB) -> PLL B ready
- 3 (AT91_PMC_MCKRDY) -> master clock ready
- 6 (AT91_PMC_LOCKU) -> UTMI PLL clock ready
- 8 .. 15 (AT91_PMC_PCKRDY(id)) -> programmable clock ready
- 16 (AT91_PMC_MOSCSELS) -> main oscillator selected
- 17 (AT91_PMC_MOSCRCS) -> RC main oscillator stabilized
- 18 (AT91_PMC_CFDEV) -> clock failure detected
-
-For example:
- pmc: pmc@fffffc00 {
- compatible = "atmel,sama5d3-pmc";
- interrupts = <1 4 7>;
- interrupt-controller;
- #interrupt-cells = <2>;
- #size-cells = <0>;
- #address-cells = <1>;
-
- /* put at91 clocks here */
- };
-
-Required properties for main clock internal RC oscillator:
-- interrupts : shall be set to "<0>".
-- clock-frequency : define the internal RC oscillator frequency.
-
-Optional properties:
-- clock-accuracy : define the internal RC oscillator accuracy.
-
-For example:
- main_rc_osc: main_rc_osc {
- compatible = "atmel,at91sam9x5-clk-main-rc-osc";
- interrupt-parent = <&pmc>;
- interrupts = <0>;
- clock-frequency = <12000000>;
- clock-accuracy = <50000000>;
- };
-
-Required properties for main clock oscillator:
-- interrupts : shall be set to "<0>".
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall encode the main osc source clk sources (see atmel datasheet).
-
-Optional properties:
-- atmel,osc-bypass : boolean property. Specified if a clock signal is provided
- on XIN.
-
- clock signal is directly provided on XIN pin.
-
-For example:
- main_osc: main_osc {
- compatible = "atmel,at91rm9200-clk-main-osc";
- interrupt-parent = <&pmc>;
- interrupts = <0>;
- #clock-cells = <0>;
- clocks = <&main_xtal>;
- };
-
-Required properties for main clock:
-- interrupts : shall be set to "<0>".
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall encode the main clk sources (see atmel datasheet).
-
-For example:
- main: mainck {
- compatible = "atmel,at91sam9x5-clk-main";
- interrupt-parent = <&pmc>;
- interrupts = <0>;
- #clock-cells = <0>;
- clocks = <&main_rc_osc &main_osc>;
- };
-
-Required properties for master clock:
-- interrupts : shall be set to "<3>".
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall be the master clock sources (see atmel datasheet) phandles.
- e.g. "<&ck32k>, <&main>, <&plla>, <&pllb>".
-- atmel,clk-output-range : minimum and maximum clock frequency (two u32
- fields).
- e.g. output = <0 133000000>; <=> 0 to 133MHz.
-- atmel,clk-divisors : master clock divisors table (four u32 fields).
- 0 <=> reserved value.
- e.g. divisors = <1 2 4 6>;
-- atmel,master-clk-have-div3-pres : some SoC use the reserved value 7 in the
- PRES field as CLOCK_DIV3 (e.g sam9x5).
-
-For example:
- mck: mck {
- compatible = "atmel,at91rm9200-clk-master";
- interrupt-parent = <&pmc>;
- interrupts = <3>;
- #clock-cells = <0>;
- atmel,clk-output-range = <0 133000000>;
- atmel,clk-divisors = <1 2 4 0>;
- };
-
-Required properties for peripheral clocks:
-- #size-cells : shall be 0 (reg is used to encode clk id).
-- #address-cells : shall be 1 (reg is used to encode clk id).
-- clocks : shall be the master clock phandle.
- e.g. clocks = <&mck>;
-- name: device tree node describing a specific peripheral clock.
- * #clock-cells : from common clock binding; shall be set to 0.
- * reg: peripheral id. See Atmel's datasheets to get a full
- list of peripheral ids.
- * atmel,clk-output-range : minimum and maximum clock frequency
- (two u32 fields). Only valid on at91sam9x5-clk-peripheral
- compatible IPs.
-
-For example:
- periph: periphck {
- compatible = "atmel,at91sam9x5-clk-peripheral";
- #size-cells = <0>;
- #address-cells = <1>;
- clocks = <&mck>;
-
- ssc0_clk {
- #clock-cells = <0>;
- reg = <2>;
- atmel,clk-output-range = <0 133000000>;
- };
-
- usart0_clk {
- #clock-cells = <0>;
- reg = <3>;
- atmel,clk-output-range = <0 66000000>;
- };
- };
-
-
-Required properties for pll clocks:
-- interrupts : shall be set to "<1>".
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall be the main clock phandle.
-- reg : pll id.
- 0 -> PLL A
- 1 -> PLL B
-- atmel,clk-input-range : minimum and maximum source clock frequency (two u32
- fields).
- e.g. input = <1 32000000>; <=> 1 to 32MHz.
-- #atmel,pll-clk-output-range-cells : number of cells reserved for pll output
- range description. Sould be set to 2, 3
- or 4.
- * 1st and 2nd cells represent the frequency range (min-max).
- * 3rd cell is optional and represents the OUT field value for the given
- range.
- * 4th cell is optional and represents the ICPLL field (PLLICPR
- register)
-- atmel,pll-clk-output-ranges : pll output frequency ranges + optional parameter
- depending on #atmel,pll-output-range-cells
- property value.
-
-For example:
- plla: pllack {
- compatible = "atmel,at91sam9g45-clk-pll";
- interrupt-parent = <&pmc>;
- interrupts = <1>;
- #clock-cells = <0>;
- clocks = <&main>;
- reg = <0>;
- atmel,clk-input-range = <2000000 32000000>;
- #atmel,pll-clk-output-range-cells = <4>;
- atmel,pll-clk-output-ranges = <74500000 800000000 0 0
- 69500000 750000000 1 0
- 64500000 700000000 2 0
- 59500000 650000000 3 0
- 54500000 600000000 0 1
- 49500000 550000000 1 1
- 44500000 500000000 2 1
- 40000000 450000000 3 1>;
- };
-
-Required properties for plldiv clocks (plldiv = pll / 2):
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall be the plla clock phandle.
-
-The pll divisor is equal to 2 and cannot be changed.
-
-For example:
- plladiv: plladivck {
- compatible = "atmel,at91sam9x5-clk-plldiv";
- #clock-cells = <0>;
- clocks = <&plla>;
- };
-
-Required properties for programmable clocks:
-- #size-cells : shall be 0 (reg is used to encode clk id).
-- #address-cells : shall be 1 (reg is used to encode clk id).
-- clocks : shall be the programmable clock source phandles.
- e.g. clocks = <&clk32k>, <&main>, <&plla>, <&pllb>;
-- name: device tree node describing a specific prog clock.
- * #clock-cells : from common clock binding; shall be set to 0.
- * reg : programmable clock id (register offset from PCKx
- register).
- * interrupts : shall be set to "<(8 + id)>".
-
-For example:
- prog: progck {
- compatible = "atmel,at91sam9g45-clk-programmable";
- #size-cells = <0>;
- #address-cells = <1>;
- interrupt-parent = <&pmc>;
- clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>;
-
- prog0 {
- #clock-cells = <0>;
- reg = <0>;
- interrupts = <8>;
- };
-
- prog1 {
- #clock-cells = <0>;
- reg = <1>;
- interrupts = <9>;
- };
- };
-
-
-Required properties for smd clock:
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall be the smd clock source phandles.
- e.g. clocks = <&plladiv>, <&utmi>;
-
-For example:
- smd: smdck {
- compatible = "atmel,at91sam9x5-clk-smd";
- #clock-cells = <0>;
- clocks = <&plladiv>, <&utmi>;
- };
-
-Required properties for system clocks:
-- #size-cells : shall be 0 (reg is used to encode clk id).
-- #address-cells : shall be 1 (reg is used to encode clk id).
-- name: device tree node describing a specific system clock.
- * #clock-cells : from common clock binding; shall be set to 0.
- * reg: system clock id (bit position in SCER/SCDR/SCSR registers).
- See Atmel's datasheet to get a full list of system clock ids.
-
-For example:
- system: systemck {
- compatible = "atmel,at91rm9200-clk-system";
- #address-cells = <1>;
- #size-cells = <0>;
-
- ddrck {
- #clock-cells = <0>;
- reg = <2>;
- clocks = <&mck>;
- };
-
- uhpck {
- #clock-cells = <0>;
- reg = <6>;
- clocks = <&usb>;
- };
-
- udpck {
- #clock-cells = <0>;
- reg = <7>;
- clocks = <&usb>;
- };
- };
-
-
-Required properties for usb clock:
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall be the smd clock source phandles.
- e.g. clocks = <&pllb>;
-- atmel,clk-divisors (only available for "atmel,at91rm9200-clk-usb"):
- usb clock divisor table.
- e.g. divisors = <1 2 4 0>;
-
-For example:
- usb: usbck {
- compatible = "atmel,at91sam9x5-clk-usb";
- #clock-cells = <0>;
- clocks = <&plladiv>, <&utmi>;
- };
-
- usb: usbck {
- compatible = "atmel,at91rm9200-clk-usb";
- #clock-cells = <0>;
- clocks = <&pllb>;
- atmel,clk-divisors = <1 2 4 0>;
- };
-
-
-Required properties for utmi clock:
-- interrupts : shall be set to "<AT91_PMC_LOCKU IRQ_TYPE_LEVEL_HIGH>".
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall be the main clock source phandle.
-
-For example:
- utmi: utmick {
- compatible = "atmel,at91sam9x5-clk-utmi";
- interrupt-parent = <&pmc>;
- interrupts = <AT91_PMC_LOCKU IRQ_TYPE_LEVEL_HIGH>;
- #clock-cells = <0>;
- clocks = <&main>;
- };
-
-Required properties for 32 bits bus Matrix clock (h32mx clock):
-- #clock-cells : from common clock binding; shall be set to 0.
-- clocks : shall be the master clock source phandle.
-
-For example:
- h32ck: h32mxck {
- #clock-cells = <0>;
- compatible = "atmel,sama5d4-clk-h32mx";
- clocks = <&mck>;
- };
-
-Required properties for generated clocks:
-- #size-cells : shall be 0 (reg is used to encode clk id).
-- #address-cells : shall be 1 (reg is used to encode clk id).
-- clocks : shall be the generated clock source phandles.
- e.g. clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>, <&audio_pll_pmc>;
-- name: device tree node describing a specific generated clock.
- * #clock-cells : from common clock binding; shall be set to 0.
- * reg: peripheral id. See Atmel's datasheets to get a full
- list of peripheral ids.
- * atmel,clk-output-range : minimum and maximum clock frequency
- (two u32 fields).
-
-For example:
- gck {
- compatible = "atmel,sama5d2-clk-generated";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>, <&audio_pll_pmc>;
-
- tcb0_gclk: tcb0_gclk {
- #clock-cells = <0>;
- reg = <35>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- pwm_gclk: pwm_gclk {
- #clock-cells = <0>;
- reg = <38>;
- atmel,clk-output-range = <0 83000000>;
- };
- };
-
-Required properties for I2S mux clocks:
-- #size-cells : shall be 0 (reg is used to encode I2S bus id).
-- #address-cells : shall be 1 (reg is used to encode I2S bus id).
-- name: device tree node describing a specific mux clock.
- * #clock-cells : from common clock binding; shall be set to 0.
- * clocks : shall be the mux clock parent phandles; shall be 2 phandles:
- peripheral and generated clock; the first phandle shall belong to the
- peripheral clock and the second one shall belong to the generated
- clock; "clock-indices" property can be user to specify
- the correct order.
- * reg: I2S bus id of the corresponding mux clock.
- e.g. reg = <0>; for i2s0, reg = <1>; for i2s1
-
-For example:
- i2s_clkmux {
- compatible = "atmel,sama5d2-clk-i2s-mux";
- #address-cells = <1>;
- #size-cells = <0>;
-
- i2s0muxck: i2s0_muxclk {
- clocks = <&i2s0_clk>, <&i2s0_gclk>;
- #clock-cells = <0>;
- reg = <0>;
- };
-
- i2s1muxck: i2s1_muxclk {
- clocks = <&i2s1_clk>, <&i2s1_gclk>;
- #clock-cells = <0>;
- reg = <1>;
- };
+ pmc: pmc@f0018000 {
+ compatible = "atmel,sama5d4-pmc", "syscon";
+ reg = <0xf0018000 0x120>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ #clock-cells = <2>;
+ clocks = <&clk32k>, <&main_xtal>;
+ clock-names = "slow_clk", "main_xtal";
};
diff --git a/Documentation/devicetree/bindings/clock/hi3670-clock.txt b/Documentation/devicetree/bindings/clock/hi3670-clock.txt
new file mode 100644
index 000000000000..66f3697eca78
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/hi3670-clock.txt
@@ -0,0 +1,43 @@
+* Hisilicon Hi3670 Clock Controller
+
+The Hi3670 clock controller generates and supplies clock to various
+controllers within the Hi3670 SoC.
+
+Required Properties:
+
+- compatible: the compatible should be one of the following strings to
+ indicate the clock controller functionality.
+
+ - "hisilicon,hi3670-crgctrl"
+ - "hisilicon,hi3670-pctrl"
+ - "hisilicon,hi3670-pmuctrl"
+ - "hisilicon,hi3670-sctrl"
+ - "hisilicon,hi3670-iomcu"
+ - "hisilicon,hi3670-media1-crg"
+ - "hisilicon,hi3670-media2-crg"
+
+- reg: physical base address of the controller and length of memory mapped
+ region.
+
+- #clock-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes use this identifier
+to specify the clock which they consume.
+
+All these identifier could be found in <dt-bindings/clock/hi3670-clock.h>.
+
+Examples:
+ crg_ctrl: clock-controller@fff35000 {
+ compatible = "hisilicon,hi3670-crgctrl", "syscon";
+ reg = <0x0 0xfff35000 0x0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ uart0: serial@fdf02000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfdf02000 0x0 0x1000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3670_CLK_GATE_UART0>,
+ <&crg_ctrl HI3670_PCLK>;
+ clock-names = "uartclk", "apb_pclk";
+ };
diff --git a/Documentation/devicetree/bindings/clock/ingenic,cgu.txt b/Documentation/devicetree/bindings/clock/ingenic,cgu.txt
index f8d4134ae409..ba5a442026b7 100644
--- a/Documentation/devicetree/bindings/clock/ingenic,cgu.txt
+++ b/Documentation/devicetree/bindings/clock/ingenic,cgu.txt
@@ -6,8 +6,11 @@ to provide many different clock signals derived from only 2 external source
clocks.
Required properties:
-- compatible : Should be "ingenic,<soctype>-cgu".
- For example "ingenic,jz4740-cgu" or "ingenic,jz4780-cgu".
+- compatible : Should be one of:
+ * ingenic,jz4740-cgu
+ * ingenic,jz4725b-cgu
+ * ingenic,jz4770-cgu
+ * ingenic,jz4780-cgu
- reg : The address & length of the CGU registers.
- clocks : List of phandle & clock specifiers for clocks external to the CGU.
Two such external clocks should be specified - first the external crystal
diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc.txt b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
new file mode 100644
index 000000000000..c5eb6694fda9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
@@ -0,0 +1,18 @@
+Qualcomm Camera Clock & Reset Controller Binding
+------------------------------------------------
+
+Required properties :
+- compatible : shall contain "qcom,sdm845-camcc".
+- reg : shall contain base register location and length.
+- #clock-cells : from common clock binding, shall contain 1.
+- #reset-cells : from common reset binding, shall contain 1.
+- #power-domain-cells : from generic power domain binding, shall contain 1.
+
+Example:
+ camcc: clock-controller@ad00000 {
+ compatible = "qcom,sdm845-camcc";
+ reg = <0xad00000 0x10000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 664ea1fd6c76..52d9345c9927 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -19,6 +19,9 @@ Required properties :
"qcom,gcc-msm8996"
"qcom,gcc-msm8998"
"qcom,gcc-mdm9615"
+ "qcom,gcc-qcs404"
+ "qcom,gcc-sdm630"
+ "qcom,gcc-sdm660"
"qcom,gcc-sdm845"
- reg : shall contain base register location and length
diff --git a/Documentation/devicetree/bindings/clock/qcom,hfpll.txt b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt
new file mode 100644
index 000000000000..ec02a024424c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt
@@ -0,0 +1,60 @@
+High-Frequency PLL (HFPLL)
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>:
+ shall contain only one of the following. The generic
+ compatible "qcom,hfpll" should be also included.
+
+ "qcom,hfpll-ipq8064", "qcom,hfpll"
+ "qcom,hfpll-apq8064", "qcom,hfpll"
+ "qcom,hfpll-msm8974", "qcom,hfpll"
+ "qcom,hfpll-msm8960", "qcom,hfpll"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: address and size of HPLL registers. An optional second
+ element specifies the address and size of the alias
+ register region.
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to the xo clock.
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "xo".
+
+- clock-output-names:
+ Usage: required
+ Value type: <string>
+ Definition: Name of the PLL. Typically hfpllX where X is a CPU number
+ starting at 0. Otherwise hfpll_Y where Y is more specific
+ such as "l2".
+
+Example:
+
+1) An HFPLL for the L2 cache.
+
+ clock-controller@f9016000 {
+ compatible = "qcom,hfpll-ipq8064", "qcom,hfpll";
+ reg = <0xf9016000 0x30>;
+ clocks = <&xo_board>;
+ clock-names = "xo";
+ clock-output-names = "hfpll_l2";
+ };
+
+2) An HFPLL for CPU0. This HFPLL has the alias register region.
+
+ clock-controller@f908a000 {
+ compatible = "qcom,hfpll-ipq8064", "qcom,hfpll";
+ reg = <0xf908a000 0x30>, <0xf900a000 0x30>;
+ clocks = <&xo_board>;
+ clock-names = "xo";
+ clock-output-names = "hfpll0";
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt
new file mode 100644
index 000000000000..030ba60dab08
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt
@@ -0,0 +1,34 @@
+Krait Clock Controller
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,krait-cc-v1"
+ "qcom,krait-cc-v2"
+
+- #clock-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 1
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to the clock parents of hfpll, secondary muxes.
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "hfpll0", "hfpll1", "acpu0_aux", "acpu1_aux", "qsb".
+
+Example:
+
+ kraitcc: clock-controller {
+ compatible = "qcom,krait-cc-v1";
+ clocks = <&hfpll0>, <&hfpll1>, <&acpu0_aux>, <&acpu1_aux>, <qsb>;
+ clock-names = "hfpll0", "hfpll1", "acpu0_aux", "acpu1_aux", "qsb";
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
index db542abadb75..916a601b76a7 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
@@ -13,9 +13,13 @@ They provide the following functionalities:
Required Properties:
- compatible: Must be one of:
+ - "renesas,r7s9210-cpg-mssr" for the r7s9210 SoC (RZ/A2)
- "renesas,r8a7743-cpg-mssr" for the r8a7743 SoC (RZ/G1M)
+ - "renesas,r8a7744-cpg-mssr" for the r8a7744 SoC (RZ/G1N)
- "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E)
- "renesas,r8a77470-cpg-mssr" for the r8a77470 SoC (RZ/G1C)
+ - "renesas,r8a774a1-cpg-mssr" for the r8a774a1 SoC (RZ/G2M)
+ - "renesas,r8a774c0-cpg-mssr" for the r8a774c0 SoC (RZ/G2E)
- "renesas,r8a7790-cpg-mssr" for the r8a7790 SoC (R-Car H2)
- "renesas,r8a7791-cpg-mssr" for the r8a7791 SoC (R-Car M2-W)
- "renesas,r8a7792-cpg-mssr" for the r8a7792 SoC (R-Car V2H)
@@ -35,12 +39,13 @@ Required Properties:
- clocks: References to external parent clocks, one entry for each entry in
clock-names
- clock-names: List of external parent clock names. Valid names are:
- - "extal" (r8a7743, r8a7745, r8a77470, r8a7790, r8a7791, r8a7792,
- r8a7793, r8a7794, r8a7795, r8a7796, r8a77965, r8a77970,
- r8a77980, r8a77990, r8a77995)
- - "extalr" (r8a7795, r8a7796, r8a77965, r8a77970, r8a77980)
- - "usb_extal" (r8a7743, r8a7745, r8a77470, r8a7790, r8a7791, r8a7793,
- r8a7794)
+ - "extal" (r7s9210, r8a7743, r8a7744, r8a7745, r8a77470, r8a774a1,
+ r8a774c0, r8a7790, r8a7791, r8a7792, r8a7793, r8a7794,
+ r8a7795, r8a7796, r8a77965, r8a77970, r8a77980, r8a77990,
+ r8a77995)
+ - "extalr" (r8a774a1, r8a7795, r8a7796, r8a77965, r8a77970, r8a77980)
+ - "usb_extal" (r8a7743, r8a7744, r8a7745, r8a77470, r8a7790, r8a7791,
+ r8a7793, r8a7794)
- #clock-cells: Must be 2
- For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt b/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt
index a9b35265fa13..513f03466aba 100644
--- a/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt
+++ b/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt
@@ -1,20 +1,22 @@
-Innolux TV123WAM 12.3 inch eDP 2K display panel
+Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.
Required properties:
-- compatible: should be "innolux,tv123wam"
+- compatible: should be "innolux,p120zdg-bf1"
- power-supply: regulator to provide the supply voltage
Optional properties:
- enable-gpios: GPIO pin to enable or disable the panel
- backlight: phandle of the backlight device attached to the panel
+- no-hpd: If HPD isn't hooked up; add this property.
Example:
panel_edp: panel-edp {
- compatible = "innolux,tv123wam";
+ compatible = "innolux,p120zdg-bf1";
enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
power-supply = <&pm8916_l2>;
backlight = <&backlight>;
+ no-hpd;
};
diff --git a/Documentation/devicetree/bindings/display/panel/simple-panel.txt b/Documentation/devicetree/bindings/display/panel/simple-panel.txt
index 45a457ad38f0..b2b872c710f2 100644
--- a/Documentation/devicetree/bindings/display/panel/simple-panel.txt
+++ b/Documentation/devicetree/bindings/display/panel/simple-panel.txt
@@ -11,6 +11,9 @@ Optional properties:
- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- enable-gpios: GPIO pin to enable or disable the panel
- backlight: phandle of the backlight device attached to the panel
+- no-hpd: This panel is supposed to communicate that it's ready via HPD
+ (hot plug detect) signal, but the signal isn't hooked up so we should
+ hardcode the max delay from the panel spec when powering up the panel.
Example:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
index 091c8dfd3229..b245363d6d60 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
@@ -3,6 +3,7 @@
Required properties:
- compatible :
- "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
+ - "fsl,imx8qxp-lpi2c" for LPI2C compatible with the one integrated on i.MX8QXP soc
- reg : address and length of the lpi2c master registers
- interrupts : lpi2c interrupt
- clocks : lpi2c clock specifier
diff --git a/Documentation/devicetree/bindings/media/cedrus.txt b/Documentation/devicetree/bindings/media/cedrus.txt
new file mode 100644
index 000000000000..a089a0c1ff05
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/cedrus.txt
@@ -0,0 +1,54 @@
+Device-tree bindings for the VPU found in Allwinner SoCs, referred to as the
+Video Engine (VE) in Allwinner literature.
+
+The VPU can only access the first 256 MiB of DRAM, that are DMA-mapped starting
+from the DRAM base. This requires specific memory allocation and handling.
+
+Required properties:
+- compatible : must be one of the following compatibles:
+ - "allwinner,sun4i-a10-video-engine"
+ - "allwinner,sun5i-a13-video-engine"
+ - "allwinner,sun7i-a20-video-engine"
+ - "allwinner,sun8i-a33-video-engine"
+ - "allwinner,sun8i-h3-video-engine"
+- reg : register base and length of VE;
+- clocks : list of clock specifiers, corresponding to entries in
+ the clock-names property;
+- clock-names : should contain "ahb", "mod" and "ram" entries;
+- resets : phandle for reset;
+- interrupts : VE interrupt number;
+- allwinner,sram : SRAM region to use with the VE.
+
+Optional properties:
+- memory-region : CMA pool to use for buffers allocation instead of the
+ default CMA pool.
+
+Example:
+
+reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* Address must be kept in the lower 256 MiBs of DRAM for VE. */
+ cma_pool: cma@4a000000 {
+ compatible = "shared-dma-pool";
+ size = <0x6000000>;
+ alloc-ranges = <0x4a000000 0x6000000>;
+ reusable;
+ linux,cma-default;
+ };
+};
+
+video-codec@1c0e000 {
+ compatible = "allwinner,sun7i-a20-video-engine";
+ reg = <0x01c0e000 0x1000>;
+
+ clocks = <&ccu CLK_AHB_VE>, <&ccu CLK_VE>,
+ <&ccu CLK_DRAM_VE>;
+ clock-names = "ahb", "mod", "ram";
+
+ resets = <&ccu RST_VE>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ allwinner,sram = <&ve_sram 1>;
+};
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.txt b/Documentation/devicetree/bindings/media/rockchip-vpu.txt
new file mode 100644
index 000000000000..35dc464ad7c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/rockchip-vpu.txt
@@ -0,0 +1,29 @@
+device-tree bindings for rockchip VPU codec
+
+Rockchip (Video Processing Unit) present in various Rockchip platforms,
+such as RK3288 and RK3399.
+
+Required properties:
+- compatible: value should be one of the following
+ "rockchip,rk3288-vpu";
+ "rockchip,rk3399-vpu";
+- interrupts: encoding and decoding interrupt specifiers
+- interrupt-names: should be "vepu" and "vdpu"
+- clocks: phandle to VPU aclk, hclk clocks
+- clock-names: should be "aclk" and "hclk"
+- power-domains: phandle to power domain node
+- iommus: phandle to a iommu node
+
+Example:
+SoC-specific DT entry:
+ vpu: video-codec@ff9a0000 {
+ compatible = "rockchip,rk3288-vpu";
+ reg = <0x0 0xff9a0000 0x0 0x800>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "vepu", "vdpu";
+ clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
+ clock-names = "aclk", "hclk";
+ power-domains = <&power RK3288_PD_VIDEO>;
+ iommus = <&vpu_mmu>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
index 06a363d9ccef..b9a1d7402128 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
@@ -7,6 +7,7 @@ Required properties:
for da850 - compatible = "ti,da850-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
for dra746 - compatible = "ti,dra746-ecap", "ti,am3352-ecap";
for 66ak2g - compatible = "ti,k2g-ecap", "ti,am3352-ecap";
+ for am654 - compatible = "ti,am654-ecap", "ti,am3352-ecap";
- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
the cells format. The PWM channel index ranges from 0 to 4. The only third
cell flag supported by this binding is PWM_POLARITY_INVERTED.
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
index e1ef6afbe3a7..7f31fe7e2093 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
@@ -3,7 +3,9 @@
Required Properties:
- compatible: should be "renesas,pwm-rcar" and one of the following.
- "renesas,pwm-r8a7743": for RZ/G1M
+ - "renesas,pwm-r8a7744": for RZ/G1N
- "renesas,pwm-r8a7745": for RZ/G1E
+ - "renesas,pwm-r8a774a1": for RZ/G2M
- "renesas,pwm-r8a7778": for R-Car M1A
- "renesas,pwm-r8a7779": for R-Car H1
- "renesas,pwm-r8a7790": for R-Car H2
@@ -12,6 +14,8 @@ Required Properties:
- "renesas,pwm-r8a7795": for R-Car H3
- "renesas,pwm-r8a7796": for R-Car M3-W
- "renesas,pwm-r8a77965": for R-Car M3-N
+ - "renesas,pwm-r8a77970": for R-Car V3M
+ - "renesas,pwm-r8a77980": for R-Car V3H
- "renesas,pwm-r8a77990": for R-Car E3
- "renesas,pwm-r8a77995": for R-Car D3
- reg: base address and length of the registers block for the PWM.
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
index d53a16715da6..848a92b53d81 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
@@ -2,13 +2,19 @@
Required Properties:
- - compatible: should be one of the following.
+ - compatible: must contain one or more of the following:
- "renesas,tpu-r8a73a4": for R8A73A4 (R-Mobile APE6) compatible PWM controller.
- "renesas,tpu-r8a7740": for R8A7740 (R-Mobile A1) compatible PWM controller.
- "renesas,tpu-r8a7743": for R8A7743 (RZ/G1M) compatible PWM controller.
+ - "renesas,tpu-r8a7744": for R8A7744 (RZ/G1N) compatible PWM controller.
- "renesas,tpu-r8a7745": for R8A7745 (RZ/G1E) compatible PWM controller.
- "renesas,tpu-r8a7790": for R8A7790 (R-Car H2) compatible PWM controller.
- - "renesas,tpu": for generic R-Car and RZ/G1 TPU PWM controller.
+ - "renesas,tpu-r8a77970": for R8A77970 (R-Car V3M) compatible PWM
+ controller.
+ - "renesas,tpu-r8a77980": for R8A77980 (R-Car V3H) compatible PWM
+ controller.
+ - "renesas,tpu": for the generic TPU PWM controller; this is a fallback for
+ the entries listed above.
- reg: Base address and length of each memory resource used by the PWM
controller hardware module.
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp-pil.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp-pil.txt
new file mode 100644
index 000000000000..a842a782b557
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp-pil.txt
@@ -0,0 +1,126 @@
+Qualcomm Technology Inc. ADSP Peripheral Image Loader
+
+This document defines the binding for a component that loads and boots firmware
+on the Qualcomm Technology Inc. ADSP Hexagon core.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,sdm845-adsp-pil"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must specify the base address and size of the qdsp6ss register
+
+- interrupts-extended:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must list the watchdog, fatal IRQs ready, handover and
+ stop-ack IRQs
+
+- interrupt-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "wdog", "fatal", "ready", "handover", "stop-ack"
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: List of 8 phandle and clock specifier pairs for the adsp.
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: List of clock input name strings sorted in the same
+ order as the clocks property. Definition must have
+ "xo", "sway_cbcr", "lpass_aon", "lpass_ahbs_aon_cbcr",
+ "lpass_ahbm_aon_cbcr", "qdsp6ss_xo", "qdsp6ss_sleep"
+ and "qdsp6ss_core".
+
+- power-domains:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to cx power domain node.
+
+- resets:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the list of 2 reset-controller for the adsp.
+
+- reset-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "pdc_sync" and "cc_lpass"
+
+- qcom,halt-regs:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: a phandle reference to a syscon representing TCSR followed
+ by the offset within syscon for lpass halt register.
+
+- memory-region:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the reserved-memory for the ADSP
+
+- qcom,smem-states:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the smem state for requesting the ADSP to
+ shut down
+
+- qcom,smem-state-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "stop"
+
+
+= SUBNODES
+The adsp node may have an subnode named "glink-edge" that describes the
+communication edge, channels and devices related to the ADSP.
+See ../soc/qcom/qcom,glink.txt for details on how to describe these.
+
+= EXAMPLE
+The following example describes the resources needed to boot control the
+ADSP, as it is found on SDM845 boards.
+
+ remoteproc@17300000 {
+ compatible = "qcom,sdm845-adsp-pil";
+ reg = <0x17300000 0x40c>;
+
+ interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_LPASS_SWAY_CLK>,
+ <&lpasscc LPASS_AUDIO_WRAPPER_AON_CLK>,
+ <&lpasscc LPASS_Q6SS_AHBS_AON_CLK>,
+ <&lpasscc LPASS_Q6SS_AHBM_AON_CLK>,
+ <&lpasscc LPASS_QDSP6SS_XO_CLK>,
+ <&lpasscc LPASS_QDSP6SS_SLEEP_CLK>,
+ <&lpasscc LPASS_QDSP6SS_CORE_CLK>;
+ clock-names = "xo", "sway_cbcr", "lpass_aon",
+ "lpass_ahbs_aon_cbcr",
+ "lpass_ahbm_aon_cbcr", "qdsp6ss_xo",
+ "qdsp6ss_sleep", "qdsp6ss_core";
+
+ power-domains = <&rpmhpd SDM845_CX>;
+
+ resets = <&pdc_reset PDC_AUDIO_SYNC_RESET>,
+ <&aoss_reset AOSS_CC_LPASS_RESTART>;
+ reset-names = "pdc_sync", "cc_lpass";
+
+ qcom,halt-regs = <&tcsr_mutex_regs 0x22000>;
+
+ memory-region = <&pil_adsp_mem>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
index 728e4193f7a6..9c0cff3a5ed8 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
@@ -10,6 +10,11 @@ on the Qualcomm ADSP Hexagon core.
"qcom,msm8974-adsp-pil"
"qcom,msm8996-adsp-pil"
"qcom,msm8996-slpi-pil"
+ "qcom,qcs404-adsp-pas"
+ "qcom,qcs404-cdsp-pas"
+ "qcom,qcs404-wcss-pas"
+ "qcom,sdm845-adsp-pas"
+ "qcom,sdm845-cdsp-pas"
- interrupts-extended:
Usage: required
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
index 601dd9f389aa..9ff5b0309417 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
@@ -53,13 +53,17 @@ on the Qualcomm Hexagon core.
Definition: reference to the reset-controller for the modem sub-system
reference to the list of 3 reset-controllers for the
wcss sub-system
+ reference to the list of 2 reset-controllers for the modem
+ sub-system on SDM845 SoCs
- reset-names:
Usage: required
Value type: <stringlist>
Definition: must be "mss_restart" for the modem sub-system
- Definition: must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset"
- for the wcss syb-system
+ must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset"
+ for the wcss sub-system
+ must be "mss_restart", "pdc_reset" for the modem
+ sub-system on SDM845 SoCs
- cx-supply:
- mss-supply:
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
index 8bf62240e10d..1177052701e1 100644
--- a/Documentation/filesystems/ceph.txt
+++ b/Documentation/filesystems/ceph.txt
@@ -151,6 +151,11 @@ Mount Options
Report overall filesystem usage in statfs instead of using the root
directory quota.
+ nocopyfrom
+ Don't use the RADOS 'copy-from' operation to perform remote object
+ copies. Currently, it's only used in copy_file_range, which will revert
+ to the default VFS implementation if this option is used.
+
More Information
================
diff --git a/Documentation/filesystems/nfs/rpc-cache.txt b/Documentation/filesystems/nfs/rpc-cache.txt
index ebcaaee21616..c4dac829db0f 100644
--- a/Documentation/filesystems/nfs/rpc-cache.txt
+++ b/Documentation/filesystems/nfs/rpc-cache.txt
@@ -84,7 +84,7 @@ Creating a Cache
A message from user space has arrived to fill out a
cache entry. It is in 'buf' of length 'len'.
cache_parse should parse this, find the item in the
- cache with sunrpc_cache_lookup, and update the item
+ cache with sunrpc_cache_lookup_rcu, and update the item
with sunrpc_cache_update.
@@ -95,7 +95,7 @@ Creating a Cache
Using a cache
-------------
-To find a value in a cache, call sunrpc_cache_lookup passing a pointer
+To find a value in a cache, call sunrpc_cache_lookup_rcu passing a pointer
to the cache_head in a sample item with the 'key' fields filled in.
This will be passed to ->match to identify the target entry. If no
entry is found, a new entry will be create, added to the cache, and
@@ -116,7 +116,7 @@ item does become valid, the deferred copy of the request will be
revisited (->revisit). It is expected that this method will
reschedule the request for processing.
-The value returned by sunrpc_cache_lookup can also be passed to
+The value returned by sunrpc_cache_lookup_rcu can also be passed to
sunrpc_cache_update to set the content for the item. A second item is
passed which should hold the content. If the item found by _lookup
has valid data, then it is discarded and a new item is created. This
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 51c136c821bf..eef7d9d259e8 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -286,6 +286,12 @@ pointed by REDIRECT. This should not be possible on local system as setting
"trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible
for untrusted layers like from a pen drive.
+Note: redirect_dir={off|nofollow|follow(*)} conflicts with metacopy=on, and
+results in an error.
+
+(*) redirect_dir=follow only conflicts with metacopy=on if upperdir=... is
+given.
+
Sharing and copying layers
--------------------------
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 321d74b73937..cf43bc4dbf31 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -623,6 +623,11 @@ in your dentry operations instead.
On success you get a new struct file sharing the mount/dentry with the
original, on failure - ERR_PTR().
--
+[mandatory]
+ ->clone_file_range() and ->dedupe_file_range have been replaced with
+ ->remap_file_range(). See Documentation/filesystems/vfs.txt for more
+ information.
+--
[recommended]
->lookup() instances doing an equivalent of
if (IS_ERR(inode))
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index a6c6a8af48a2..5f71a252e2e0 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -883,8 +883,9 @@ struct file_operations {
unsigned (*mmap_capabilities)(struct file *);
#endif
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
- int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
- int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+ loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
};
@@ -960,11 +961,18 @@ otherwise noted.
copy_file_range: called by the copy_file_range(2) system call.
- clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
- FICLONE commands.
-
- dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
- command.
+ remap_file_range: called by the ioctl(2) system call for FICLONERANGE and
+ FICLONE and FIDEDUPERANGE commands to remap file ranges. An
+ implementation should remap len bytes at pos_in of the source file into
+ the dest file at pos_out. Implementations must handle callers passing
+ in len == 0; this means "remap to the end of the source file". The
+ return value should the number of bytes remapped, or the usual
+ negative error code if errors occurred before any bytes were remapped.
+ The remap_flags parameter accepts REMAP_FILE_* flags. If
+ REMAP_FILE_DEDUP is set then the implementation must only remap if the
+ requested file ranges have identical contents. If REMAP_CAN_SHORTEN is
+ set, the caller is ok with the implementation shortening the request
+ length to satisfy alignment or EOF requirements (or any other reason).
fadvise: possibly called by the fadvise64() system call.
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 7b6a2b2bdc98..8da26c6dd886 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -537,21 +537,6 @@ more details, with real examples.
The third parameter may be a text as in this example, but it may also
be an expanded variable or a macro.
- cc-fullversion
- cc-fullversion is useful when the exact version of gcc is needed.
- One typical use-case is when a specific GCC version is broken.
- cc-fullversion points out a more specific version than cc-version does.
-
- Example:
- #arch/powerpc/Makefile
- $(Q)if test "$(cc-fullversion)" = "040200" ; then \
- echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
- false ; \
- fi
-
- In this example for a specific GCC version the build will error out
- explaining to the user why it stops.
-
cc-cross-prefix
cc-cross-prefix is used to check if there exists a $(CC) in path with
one of the listed prefixes. The first prefix where there exist a
diff --git a/Documentation/laptops/lg-laptop.rst b/Documentation/laptops/lg-laptop.rst
new file mode 100644
index 000000000000..e486fe7ddc35
--- /dev/null
+++ b/Documentation/laptops/lg-laptop.rst
@@ -0,0 +1,81 @@
+.. SPDX-License-Identifier: GPL-2.0+
+LG Gram laptop extra features
+=============================
+
+By Matan Ziv-Av <matan@svgalib.org>
+
+
+Hotkeys
+-------
+
+The following FN keys are ignored by the kernel without this driver:
+- FN-F1 (LG control panel) - Generates F15
+- FN-F5 (Touchpad toggle) - Generates F13
+- FN-F6 (Airplane mode) - Generates RFKILL
+- FN-F8 (Keyboard backlight) - Generates F16.
+ This key also changes keyboard backlight mode.
+- FN-F9 (Reader mode) - Generates F14
+
+The rest of the FN key work without a need for a special driver.
+
+
+Reader mode
+-----------
+
+Writing 0/1 to /sys/devices/platform/lg-laptop/reader_mode disables/enables
+reader mode. In this mode the screen colors change (blue color reduced),
+and the reader mode indicator LED (on F9 key) turns on.
+
+
+FN Lock
+-------
+
+Writing 0/1 to /sys/devices/platform/lg-laptop/fn_lock disables/enables
+FN lock.
+
+
+Battery care limit
+------------------
+
+Writing 80/100 to /sys/devices/platform/lg-laptop/battery_care_limit
+sets the maximum capacity to charge the battery. Limiting the charge
+reduces battery capacity loss over time.
+
+This value is reset to 100 when the kernel boots.
+
+
+Fan mode
+--------
+
+Writing 1/0 to /sys/devices/platform/lg-laptop/fan_mode disables/enables
+the fan silent mode.
+
+
+USB charge
+----------
+
+Writing 0/1 to /sys/devices/platform/lg-laptop/usb_charge disables/enables
+charging another device from the USB port while the device is turned off.
+
+This value is reset to 0 when the kernel boots.
+
+
+LEDs
+~~~~
+
+The are two LED devices supported by the driver:
+
+Keyboard backlight
+------------------
+
+A led device named kbd_led controls the keyboard backlight. There are three
+lighting level: off (0), low (127) and high (255).
+
+The keyboard backlight is also controlled by the key combination FN-F8
+which cycles through those levels.
+
+
+Touchpad indicator LED
+----------------------
+
+On the F5 key. Controlled by led device names tpad_led.
diff --git a/Documentation/media/kapi/mc-core.rst b/Documentation/media/kapi/mc-core.rst
index 0c05503eaf1f..69362b3135c2 100644
--- a/Documentation/media/kapi/mc-core.rst
+++ b/Documentation/media/kapi/mc-core.rst
@@ -262,3 +262,5 @@ in the end provide a way to use driver-specific callbacks.
.. kernel-doc:: include/media/media-devnode.h
.. kernel-doc:: include/media/media-entity.h
+
+.. kernel-doc:: include/media/media-request.h
diff --git a/Documentation/media/uapi/mediactl/media-controller.rst b/Documentation/media/uapi/mediactl/media-controller.rst
index 0eea4f9a07d5..66aff38cd499 100644
--- a/Documentation/media/uapi/mediactl/media-controller.rst
+++ b/Documentation/media/uapi/mediactl/media-controller.rst
@@ -21,6 +21,7 @@ Part IV - Media Controller API
media-controller-intro
media-controller-model
media-types
+ request-api
media-funcs
media-header
diff --git a/Documentation/media/uapi/mediactl/media-funcs.rst b/Documentation/media/uapi/mediactl/media-funcs.rst
index 076856501cdb..260f9dcadcde 100644
--- a/Documentation/media/uapi/mediactl/media-funcs.rst
+++ b/Documentation/media/uapi/mediactl/media-funcs.rst
@@ -16,3 +16,9 @@ Function Reference
media-ioc-enum-entities
media-ioc-enum-links
media-ioc-setup-link
+ media-ioc-request-alloc
+ request-func-close
+ request-func-ioctl
+ request-func-poll
+ media-request-ioc-queue
+ media-request-ioc-reinit
diff --git a/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst b/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
new file mode 100644
index 000000000000..0f8b31874002
--- /dev/null
+++ b/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
@@ -0,0 +1,66 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _media_ioc_request_alloc:
+
+*****************************
+ioctl MEDIA_IOC_REQUEST_ALLOC
+*****************************
+
+Name
+====
+
+MEDIA_IOC_REQUEST_ALLOC - Allocate a request
+
+
+Synopsis
+========
+
+.. c:function:: int ioctl( int fd, MEDIA_IOC_REQUEST_ALLOC, int *argp )
+ :name: MEDIA_IOC_REQUEST_ALLOC
+
+
+Arguments
+=========
+
+``fd``
+ File descriptor returned by :ref:`open() <media-func-open>`.
+
+``argp``
+ Pointer to an integer.
+
+
+Description
+===========
+
+If the media device supports :ref:`requests <media-request-api>`, then
+this ioctl can be used to allocate a request. If it is not supported, then
+``errno`` is set to ``ENOTTY``. A request is accessed through a file descriptor
+that is returned in ``*argp``.
+
+If the request was successfully allocated, then the request file descriptor
+can be passed to the :ref:`VIDIOC_QBUF <VIDIOC_QBUF>`,
+:ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`,
+:ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` and
+:ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` ioctls.
+
+In addition, the request can be queued by calling
+:ref:`MEDIA_REQUEST_IOC_QUEUE` and re-initialized by calling
+:ref:`MEDIA_REQUEST_IOC_REINIT`.
+
+Finally, the file descriptor can be :ref:`polled <request-func-poll>` to wait
+for the request to complete.
+
+The request will remain allocated until all the file descriptors associated
+with it are closed by :ref:`close() <request-func-close>` and the driver no
+longer uses the request internally. See also
+:ref:`here <media-request-life-time>` for more information.
+
+Return Value
+============
+
+On success 0 is returned, on error -1 and the ``errno`` variable is set
+appropriately. The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
+
+ENOTTY
+ The driver has no support for requests.
diff --git a/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst b/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
new file mode 100644
index 000000000000..6dd2d7fea714
--- /dev/null
+++ b/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
@@ -0,0 +1,78 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _media_request_ioc_queue:
+
+*****************************
+ioctl MEDIA_REQUEST_IOC_QUEUE
+*****************************
+
+Name
+====
+
+MEDIA_REQUEST_IOC_QUEUE - Queue a request
+
+
+Synopsis
+========
+
+.. c:function:: int ioctl( int request_fd, MEDIA_REQUEST_IOC_QUEUE )
+ :name: MEDIA_REQUEST_IOC_QUEUE
+
+
+Arguments
+=========
+
+``request_fd``
+ File descriptor returned by :ref:`MEDIA_IOC_REQUEST_ALLOC`.
+
+
+Description
+===========
+
+If the media device supports :ref:`requests <media-request-api>`, then
+this request ioctl can be used to queue a previously allocated request.
+
+If the request was successfully queued, then the file descriptor can be
+:ref:`polled <request-func-poll>` to wait for the request to complete.
+
+If the request was already queued before, then ``EBUSY`` is returned.
+Other errors can be returned if the contents of the request contained
+invalid or inconsistent data, see the next section for a list of
+common error codes. On error both the request and driver state are unchanged.
+
+Once a request is queued, then the driver is required to gracefully handle
+errors that occur when the request is applied to the hardware. The
+exception is the ``EIO`` error which signals a fatal error that requires
+the application to stop streaming to reset the hardware state.
+
+It is not allowed to mix queuing requests with queuing buffers directly
+(without a request). ``EBUSY`` will be returned if the first buffer was
+queued directly and you next try to queue a request, or vice versa.
+
+A request must contain at least one buffer, otherwise this ioctl will
+return an ``ENOENT`` error.
+
+Return Value
+============
+
+On success 0 is returned, on error -1 and the ``errno`` variable is set
+appropriately. The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
+
+EBUSY
+ The request was already queued or the application queued the first
+ buffer directly, but later attempted to use a request. It is not permitted
+ to mix the two APIs.
+ENOENT
+ The request did not contain any buffers. All requests are required
+ to have at least one buffer. This can also be returned if some required
+ configuration is missing in the request.
+ENOMEM
+ Out of memory when allocating internal data structures for this
+ request.
+EINVAL
+ The request has invalid data.
+EIO
+ The hardware is in a bad state. To recover, the application needs to
+ stop streaming to reset the hardware state and then try to restart
+ streaming.
diff --git a/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst b/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
new file mode 100644
index 000000000000..febe888494c8
--- /dev/null
+++ b/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
@@ -0,0 +1,51 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _media_request_ioc_reinit:
+
+******************************
+ioctl MEDIA_REQUEST_IOC_REINIT
+******************************
+
+Name
+====
+
+MEDIA_REQUEST_IOC_REINIT - Re-initialize a request
+
+
+Synopsis
+========
+
+.. c:function:: int ioctl( int request_fd, MEDIA_REQUEST_IOC_REINIT )
+ :name: MEDIA_REQUEST_IOC_REINIT
+
+
+Arguments
+=========
+
+``request_fd``
+ File descriptor returned by :ref:`MEDIA_IOC_REQUEST_ALLOC`.
+
+Description
+===========
+
+If the media device supports :ref:`requests <media-request-api>`, then
+this request ioctl can be used to re-initialize a previously allocated
+request.
+
+Re-initializing a request will clear any existing data from the request.
+This avoids having to :ref:`close() <request-func-close>` a completed
+request and allocate a new request. Instead the completed request can just
+be re-initialized and it is ready to be used again.
+
+A request can only be re-initialized if it either has not been queued
+yet, or if it was queued and completed. Otherwise it will set ``errno``
+to ``EBUSY``. No other error codes can be returned.
+
+Return Value
+============
+
+On success 0 is returned, on error -1 and the ``errno`` variable is set
+appropriately.
+
+EBUSY
+ The request is queued but not yet completed.
diff --git a/Documentation/media/uapi/mediactl/request-api.rst b/Documentation/media/uapi/mediactl/request-api.rst
new file mode 100644
index 000000000000..5f4a23029c48
--- /dev/null
+++ b/Documentation/media/uapi/mediactl/request-api.rst
@@ -0,0 +1,252 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _media-request-api:
+
+Request API
+===========
+
+The Request API has been designed to allow V4L2 to deal with requirements of
+modern devices (stateless codecs, complex camera pipelines, ...) and APIs
+(Android Codec v2). One such requirement is the ability for devices belonging to
+the same pipeline to reconfigure and collaborate closely on a per-frame basis.
+Another is support of stateless codecs, which require controls to be applied
+to specific frames (aka 'per-frame controls') in order to be used efficiently.
+
+While the initial use-case was V4L2, it can be extended to other subsystems
+as well, as long as they use the media controller.
+
+Supporting these features without the Request API is not always possible and if
+it is, it is terribly inefficient: user-space would have to flush all activity
+on the media pipeline, reconfigure it for the next frame, queue the buffers to
+be processed with that configuration, and wait until they are all available for
+dequeuing before considering the next frame. This defeats the purpose of having
+buffer queues since in practice only one buffer would be queued at a time.
+
+The Request API allows a specific configuration of the pipeline (media
+controller topology + configuration for each media entity) to be associated with
+specific buffers. This allows user-space to schedule several tasks ("requests")
+with different configurations in advance, knowing that the configuration will be
+applied when needed to get the expected result. Configuration values at the time
+of request completion are also available for reading.
+
+Usage
+=====
+
+The Request API extends the Media Controller API and cooperates with
+subsystem-specific APIs to support request usage. At the Media Controller
+level, requests are allocated from the supporting Media Controller device
+node. Their life cycle is then managed through the request file descriptors in
+an opaque way. Configuration data, buffer handles and processing results
+stored in requests are accessed through subsystem-specific APIs extended for
+request support, such as V4L2 APIs that take an explicit ``request_fd``
+parameter.
+
+Request Allocation
+------------------
+
+User-space allocates requests using :ref:`MEDIA_IOC_REQUEST_ALLOC`
+for the media device node. This returns a file descriptor representing the
+request. Typically, several such requests will be allocated.
+
+Request Preparation
+-------------------
+
+Standard V4L2 ioctls can then receive a request file descriptor to express the
+fact that the ioctl is part of said request, and is not to be applied
+immediately. See :ref:`MEDIA_IOC_REQUEST_ALLOC` for a list of ioctls that
+support this. Configurations set with a ``request_fd`` parameter are stored
+instead of being immediately applied, and buffers queued to a request do not
+enter the regular buffer queue until the request itself is queued.
+
+Request Submission
+------------------
+
+Once the configuration and buffers of the request are specified, it can be
+queued by calling :ref:`MEDIA_REQUEST_IOC_QUEUE` on the request file descriptor.
+A request must contain at least one buffer, otherwise ``ENOENT`` is returned.
+A queued request cannot be modified anymore.
+
+.. caution::
+ For :ref:`memory-to-memory devices <codec>` you can use requests only for
+ output buffers, not for capture buffers. Attempting to add a capture buffer
+ to a request will result in an ``EACCES`` error.
+
+If the request contains configurations for multiple entities, individual drivers
+may synchronize so the requested pipeline's topology is applied before the
+buffers are processed. Media controller drivers do a best effort implementation
+since perfect atomicity may not be possible due to hardware limitations.
+
+.. caution::
+
+ It is not allowed to mix queuing requests with directly queuing buffers:
+ whichever method is used first locks this in place until
+ :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` is called or the device is
+ :ref:`closed <func-close>`. Attempts to directly queue a buffer when earlier
+ a buffer was queued via a request or vice versa will result in an ``EBUSY``
+ error.
+
+Controls can still be set without a request and are applied immediately,
+regardless of whether a request is in use or not.
+
+.. caution::
+
+ Setting the same control through a request and also directly can lead to
+ undefined behavior!
+
+User-space can :ref:`poll() <request-func-poll>` a request file descriptor in
+order to wait until the request completes. A request is considered complete
+once all its associated buffers are available for dequeuing and all the
+associated controls have been updated with the values at the time of completion.
+Note that user-space does not need to wait for the request to complete to
+dequeue its buffers: buffers that are available halfway through a request can
+be dequeued independently of the request's state.
+
+A completed request contains the state of the device after the request was
+executed. User-space can query that state by calling
+:ref:`ioctl VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` with the request file
+descriptor. Calling :ref:`ioctl VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` for a
+request that has been queued but not yet completed will return ``EBUSY``
+since the control values might be changed at any time by the driver while the
+request is in flight.
+
+.. _media-request-life-time:
+
+Recycling and Destruction
+-------------------------
+
+Finally, a completed request can either be discarded or be reused. Calling
+:ref:`close() <request-func-close>` on a request file descriptor will make
+that file descriptor unusable and the request will be freed once it is no
+longer in use by the kernel. That is, if the request is queued and then the
+file descriptor is closed, then it won't be freed until the driver completed
+the request.
+
+The :ref:`MEDIA_REQUEST_IOC_REINIT` will clear a request's state and make it
+available again. No state is retained by this operation: the request is as
+if it had just been allocated.
+
+Example for a Codec Device
+--------------------------
+
+For use-cases such as :ref:`codecs <codec>`, the request API can be used
+to associate specific controls to
+be applied by the driver for the OUTPUT buffer, allowing user-space
+to queue many such buffers in advance. It can also take advantage of requests'
+ability to capture the state of controls when the request completes to read back
+information that may be subject to change.
+
+Put into code, after obtaining a request, user-space can assign controls and one
+OUTPUT buffer to it:
+
+.. code-block:: c
+
+ struct v4l2_buffer buf;
+ struct v4l2_ext_controls ctrls;
+ int req_fd;
+ ...
+ if (ioctl(media_fd, MEDIA_IOC_REQUEST_ALLOC, &req_fd))
+ return errno;
+ ...
+ ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
+ ctrls.request_fd = req_fd;
+ if (ioctl(codec_fd, VIDIOC_S_EXT_CTRLS, &ctrls))
+ return errno;
+ ...
+ buf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ buf.flags |= V4L2_BUF_FLAG_REQUEST_FD;
+ buf.request_fd = req_fd;
+ if (ioctl(codec_fd, VIDIOC_QBUF, &buf))
+ return errno;
+
+Note that it is not allowed to use the Request API for CAPTURE buffers
+since there are no per-frame settings to report there.
+
+Once the request is fully prepared, it can be queued to the driver:
+
+.. code-block:: c
+
+ if (ioctl(req_fd, MEDIA_REQUEST_IOC_QUEUE))
+ return errno;
+
+User-space can then either wait for the request to complete by calling poll() on
+its file descriptor, or start dequeuing CAPTURE buffers. Most likely, it will
+want to get CAPTURE buffers as soon as possible and this can be done using a
+regular :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>`:
+
+.. code-block:: c
+
+ struct v4l2_buffer buf;
+
+ memset(&buf, 0, sizeof(buf));
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (ioctl(codec_fd, VIDIOC_DQBUF, &buf))
+ return errno;
+
+Note that this example assumes for simplicity that for every OUTPUT buffer
+there will be one CAPTURE buffer, but this does not have to be the case.
+
+We can then, after ensuring that the request is completed via polling the
+request file descriptor, query control values at the time of its completion via
+a call to :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`.
+This is particularly useful for volatile controls for which we want to
+query values as soon as the capture buffer is produced.
+
+.. code-block:: c
+
+ struct pollfd pfd = { .events = POLLPRI, .fd = req_fd };
+ poll(&pfd, 1, -1);
+ ...
+ ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
+ ctrls.request_fd = req_fd;
+ if (ioctl(codec_fd, VIDIOC_G_EXT_CTRLS, &ctrls))
+ return errno;
+
+Once we don't need the request anymore, we can either recycle it for reuse with
+:ref:`MEDIA_REQUEST_IOC_REINIT`...
+
+.. code-block:: c
+
+ if (ioctl(req_fd, MEDIA_REQUEST_IOC_REINIT))
+ return errno;
+
+... or close its file descriptor to completely dispose of it.
+
+.. code-block:: c
+
+ close(req_fd);
+
+Example for a Simple Capture Device
+-----------------------------------
+
+With a simple capture device, requests can be used to specify controls to apply
+for a given CAPTURE buffer.
+
+.. code-block:: c
+
+ struct v4l2_buffer buf;
+ struct v4l2_ext_controls ctrls;
+ int req_fd;
+ ...
+ if (ioctl(media_fd, MEDIA_IOC_REQUEST_ALLOC, &req_fd))
+ return errno;
+ ...
+ ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
+ ctrls.request_fd = req_fd;
+ if (ioctl(camera_fd, VIDIOC_S_EXT_CTRLS, &ctrls))
+ return errno;
+ ...
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.flags |= V4L2_BUF_FLAG_REQUEST_FD;
+ buf.request_fd = req_fd;
+ if (ioctl(camera_fd, VIDIOC_QBUF, &buf))
+ return errno;
+
+Once the request is fully prepared, it can be queued to the driver:
+
+.. code-block:: c
+
+ if (ioctl(req_fd, MEDIA_REQUEST_IOC_QUEUE))
+ return errno;
+
+User-space can then dequeue buffers, wait for the request completion, query
+controls and recycle the request as in the M2M example above.
diff --git a/Documentation/media/uapi/mediactl/request-func-close.rst b/Documentation/media/uapi/mediactl/request-func-close.rst
new file mode 100644
index 000000000000..098d7f2b9548
--- /dev/null
+++ b/Documentation/media/uapi/mediactl/request-func-close.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _request-func-close:
+
+***************
+request close()
+***************
+
+Name
+====
+
+request-close - Close a request file descriptor
+
+
+Synopsis
+========
+
+.. code-block:: c
+
+ #include <unistd.h>
+
+
+.. c:function:: int close( int fd )
+ :name: req-close
+
+Arguments
+=========
+
+``fd``
+ File descriptor returned by :ref:`MEDIA_IOC_REQUEST_ALLOC`.
+
+
+Description
+===========
+
+Closes the request file descriptor. Resources associated with the request
+are freed once all file descriptors associated with the request are closed
+and the driver has completed the request.
+See :ref:`here <media-request-life-time>` for more information.
+
+
+Return Value
+============
+
+:ref:`close() <request-func-close>` returns 0 on success. On error, -1 is
+returned, and ``errno`` is set appropriately. Possible error codes are:
+
+EBADF
+ ``fd`` is not a valid open file descriptor.
diff --git a/Documentation/media/uapi/mediactl/request-func-ioctl.rst b/Documentation/media/uapi/mediactl/request-func-ioctl.rst
new file mode 100644
index 000000000000..ff7b072a6999
--- /dev/null
+++ b/Documentation/media/uapi/mediactl/request-func-ioctl.rst
@@ -0,0 +1,67 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _request-func-ioctl:
+
+***************
+request ioctl()
+***************
+
+Name
+====
+
+request-ioctl - Control a request file descriptor
+
+
+Synopsis
+========
+
+.. code-block:: c
+
+ #include <sys/ioctl.h>
+
+
+.. c:function:: int ioctl( int fd, int cmd, void *argp )
+ :name: req-ioctl
+
+Arguments
+=========
+
+``fd``
+ File descriptor returned by :ref:`MEDIA_IOC_REQUEST_ALLOC`.
+
+``cmd``
+ The request ioctl command code as defined in the media.h header file, for
+ example :ref:`MEDIA_REQUEST_IOC_QUEUE`.
+
+``argp``
+ Pointer to a request-specific structure.
+
+
+Description
+===========
+
+The :ref:`ioctl() <request-func-ioctl>` function manipulates request
+parameters. The argument ``fd`` must be an open file descriptor.
+
+The ioctl ``cmd`` code specifies the request function to be called. It
+has encoded in it whether the argument is an input, output or read/write
+parameter, and the size of the argument ``argp`` in bytes.
+
+Macros and structures definitions specifying request ioctl commands and
+their parameters are located in the media.h header file. All request ioctl
+commands, their respective function and parameters are specified in
+:ref:`media-user-func`.
+
+
+Return Value
+============
+
+On success 0 is returned, on error -1 and the ``errno`` variable is set
+appropriately. The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
+
+Command-specific error codes are listed in the individual command
+descriptions.
+
+When an ioctl that takes an output or read/write parameter fails, the
+parameter remains unmodified.
diff --git a/Documentation/media/uapi/mediactl/request-func-poll.rst b/Documentation/media/uapi/mediactl/request-func-poll.rst
new file mode 100644
index 000000000000..85191254f381
--- /dev/null
+++ b/Documentation/media/uapi/mediactl/request-func-poll.rst
@@ -0,0 +1,77 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _request-func-poll:
+
+**************
+request poll()
+**************
+
+Name
+====
+
+request-poll - Wait for some event on a file descriptor
+
+
+Synopsis
+========
+
+.. code-block:: c
+
+ #include <sys/poll.h>
+
+
+.. c:function:: int poll( struct pollfd *ufds, unsigned int nfds, int timeout )
+ :name: request-poll
+
+Arguments
+=========
+
+``ufds``
+ List of file descriptor events to be watched
+
+``nfds``
+ Number of file descriptor events at the \*ufds array
+
+``timeout``
+ Timeout to wait for events
+
+
+Description
+===========
+
+With the :c:func:`poll() <request-func-poll>` function applications can wait
+for a request to complete.
+
+On success :c:func:`poll() <request-func-poll>` returns the number of file
+descriptors that have been selected (that is, file descriptors for which the
+``revents`` field of the respective struct :c:type:`pollfd`
+is non-zero). Request file descriptor set the ``POLLPRI`` flag in ``revents``
+when the request was completed. When the function times out it returns
+a value of zero, on failure it returns -1 and the ``errno`` variable is
+set appropriately.
+
+Attempting to poll for a request that is not yet queued will
+set the ``POLLERR`` flag in ``revents``.
+
+
+Return Value
+============
+
+On success, :c:func:`poll() <request-func-poll>` returns the number of
+structures which have non-zero ``revents`` fields, or zero if the call
+timed out. On error -1 is returned, and the ``errno`` variable is set
+appropriately:
+
+``EBADF``
+ One or more of the ``ufds`` members specify an invalid file
+ descriptor.
+
+``EFAULT``
+ ``ufds`` references an inaccessible memory area.
+
+``EINTR``
+ The call was interrupted by a signal.
+
+``EINVAL``
+ The ``nfds`` value exceeds the ``RLIMIT_NOFILE`` value. Use
+ ``getrlimit()`` to obtain this value.
diff --git a/Documentation/media/uapi/v4l/buffer.rst b/Documentation/media/uapi/v4l/buffer.rst
index e2c85ddc990b..2e266d32470a 100644
--- a/Documentation/media/uapi/v4l/buffer.rst
+++ b/Documentation/media/uapi/v4l/buffer.rst
@@ -306,10 +306,23 @@ struct v4l2_buffer
- A place holder for future extensions. Drivers and applications
must set this to 0.
* - __u32
- - ``reserved``
+ - ``request_fd``
-
- - A place holder for future extensions. Drivers and applications
- must set this to 0.
+ - The file descriptor of the request to queue the buffer to. If the flag
+ ``V4L2_BUF_FLAG_REQUEST_FD`` is set, then the buffer will be
+ queued to this request. If the flag is not set, then this field will
+ be ignored.
+
+ The ``V4L2_BUF_FLAG_REQUEST_FD`` flag and this field are only used by
+ :ref:`ioctl VIDIOC_QBUF <VIDIOC_QBUF>` and ignored by other ioctls that
+ take a :c:type:`v4l2_buffer` as argument.
+
+ Applications should not set ``V4L2_BUF_FLAG_REQUEST_FD`` for any ioctls
+ other than :ref:`VIDIOC_QBUF <VIDIOC_QBUF>`.
+
+ If the device does not support requests, then ``EACCES`` will be returned.
+ If requests are supported but an invalid request file descriptor is
+ given, then ``EINVAL`` will be returned.
@@ -514,6 +527,11 @@ Buffer Flags
streaming may continue as normal and the buffer may be reused
normally. Drivers set this flag when the ``VIDIOC_DQBUF`` ioctl is
called.
+ * .. _`V4L2-BUF-FLAG-IN-REQUEST`:
+
+ - ``V4L2_BUF_FLAG_IN_REQUEST``
+ - 0x00000080
+ - This buffer is part of a request that hasn't been queued yet.
* .. _`V4L2-BUF-FLAG-KEYFRAME`:
- ``V4L2_BUF_FLAG_KEYFRAME``
@@ -589,6 +607,11 @@ Buffer Flags
the format. Any Any subsequent call to the
:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl will not block anymore,
but return an ``EPIPE`` error code.
+ * .. _`V4L2-BUF-FLAG-REQUEST-FD`:
+
+ - ``V4L2_BUF_FLAG_REQUEST_FD``
+ - 0x00800000
+ - The ``request_fd`` field contains a valid file descriptor.
* .. _`V4L2-BUF-FLAG-TIMESTAMP-MASK`:
- ``V4L2_BUF_FLAG_TIMESTAMP_MASK``
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index 9f7312bf3365..65a1d873196b 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -1497,6 +1497,182 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
+.. _v4l2-mpeg-mpeg2:
+
+``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (struct)``
+ Specifies the slice parameters (as extracted from the bitstream) for the
+ associated MPEG-2 slice data. This includes the necessary parameters for
+ configuring a stateless hardware decoding pipeline for MPEG-2.
+ The bitstream parameters are defined according to :ref:`mpeg2part2`.
+
+.. c:type:: v4l2_ctrl_mpeg2_slice_params
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_mpeg2_slice_params
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u32
+ - ``bit_size``
+ - Size (in bits) of the current slice data.
+ * - __u32
+ - ``data_bit_offset``
+ - Offset (in bits) to the video data in the current slice data.
+ * - struct :c:type:`v4l2_mpeg2_sequence`
+ - ``sequence``
+ - Structure with MPEG-2 sequence metadata, merging relevant fields from
+ the sequence header and sequence extension parts of the bitstream.
+ * - struct :c:type:`v4l2_mpeg2_picture`
+ - ``picture``
+ - Structure with MPEG-2 picture metadata, merging relevant fields from
+ the picture header and picture coding extension parts of the bitstream.
+ * - __u8
+ - ``quantiser_scale_code``
+ - Code used to determine the quantization scale to use for the IDCT.
+ * - __u8
+ - ``backward_ref_index``
+ - Index for the V4L2 buffer to use as backward reference, used with
+ B-coded and P-coded frames.
+ * - __u8
+ - ``forward_ref_index``
+ - Index for the V4L2 buffer to use as forward reference, used with
+ B-coded frames.
+
+.. c:type:: v4l2_mpeg2_sequence
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_mpeg2_sequence
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u16
+ - ``horizontal_size``
+ - The width of the displayable part of the frame's luminance component.
+ * - __u16
+ - ``vertical_size``
+ - The height of the displayable part of the frame's luminance component.
+ * - __u32
+ - ``vbv_buffer_size``
+ - Used to calculate the required size of the video buffering verifier,
+ defined (in bits) as: 16 * 1024 * vbv_buffer_size.
+ * - __u8
+ - ``profile_and_level_indication``
+ - The current profile and level indication as extracted from the
+ bitstream.
+ * - __u8
+ - ``progressive_sequence``
+ - Indication that all the frames for the sequence are progressive instead
+ of interlaced.
+ * - __u8
+ - ``chroma_format``
+ - The chrominance sub-sampling format (1: 4:2:0, 2: 4:2:2, 3: 4:4:4).
+
+.. c:type:: v4l2_mpeg2_picture
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_mpeg2_picture
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``picture_coding_type``
+ - Picture coding type for the frame covered by the current slice
+ (V4L2_MPEG2_PICTURE_CODING_TYPE_I, V4L2_MPEG2_PICTURE_CODING_TYPE_P or
+ V4L2_MPEG2_PICTURE_CODING_TYPE_B).
+ * - __u8
+ - ``f_code[2][2]``
+ - Motion vector codes.
+ * - __u8
+ - ``intra_dc_precision``
+ - Precision of Discrete Cosine transform (0: 8 bits precision,
+ 1: 9 bits precision, 2: 10 bits precision, 3: 11 bits precision).
+ * - __u8
+ - ``picture_structure``
+ - Picture structure (1: interlaced top field, 2: interlaced bottom field,
+ 3: progressive frame).
+ * - __u8
+ - ``top_field_first``
+ - If set to 1 and interlaced stream, top field is output first.
+ * - __u8
+ - ``frame_pred_frame_dct``
+ - If set to 1, only frame-DCT and frame prediction are used.
+ * - __u8
+ - ``concealment_motion_vectors``
+ - If set to 1, motion vectors are coded for intra macroblocks.
+ * - __u8
+ - ``q_scale_type``
+ - This flag affects the inverse quantization process.
+ * - __u8
+ - ``intra_vlc_format``
+ - This flag affects the decoding of transform coefficient data.
+ * - __u8
+ - ``alternate_scan``
+ - This flag affects the decoding of transform coefficient data.
+ * - __u8
+ - ``repeat_first_field``
+ - This flag affects the decoding process of progressive frames.
+ * - __u8
+ - ``progressive_frame``
+ - Indicates whether the current frame is progressive.
+
+``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (struct)``
+ Specifies quantization matrices (as extracted from the bitstream) for the
+ associated MPEG-2 slice data.
+
+.. c:type:: v4l2_ctrl_mpeg2_quantization
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_mpeg2_quantization
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``load_intra_quantiser_matrix``
+ - One bit to indicate whether to load the ``intra_quantiser_matrix`` data.
+ * - __u8
+ - ``load_non_intra_quantiser_matrix``
+ - One bit to indicate whether to load the ``non_intra_quantiser_matrix``
+ data.
+ * - __u8
+ - ``load_chroma_intra_quantiser_matrix``
+ - One bit to indicate whether to load the
+ ``chroma_intra_quantiser_matrix`` data, only relevant for non-4:2:0 YUV
+ formats.
+ * - __u8
+ - ``load_chroma_non_intra_quantiser_matrix``
+ - One bit to indicate whether to load the
+ ``chroma_non_intra_quantiser_matrix`` data, only relevant for non-4:2:0
+ YUV formats.
+ * - __u8
+ - ``intra_quantiser_matrix[64]``
+ - The quantization matrix coefficients for intra-coded frames, in zigzag
+ scanning order. It is relevant for both luma and chroma components,
+ although it can be superseded by the chroma-specific matrix for
+ non-4:2:0 YUV formats.
+ * - __u8
+ - ``non_intra_quantiser_matrix[64]``
+ - The quantization matrix coefficients for non-intra-coded frames, in
+ zigzag scanning order. It is relevant for both luma and chroma
+ components, although it can be superseded by the chroma-specific matrix
+ for non-4:2:0 YUV formats.
+ * - __u8
+ - ``chroma_intra_quantiser_matrix[64]``
+ - The quantization matrix coefficients for the chominance component of
+ intra-coded frames, in zigzag scanning order. Only relevant for
+ non-4:2:0 YUV formats.
+ * - __u8
+ - ``chroma_non_intra_quantiser_matrix[64]``
+ - The quantization matrix coefficients for the chrominance component of
+ non-intra-coded frames, in zigzag scanning order. Only relevant for
+ non-4:2:0 YUV formats.
MFC 5.1 MPEG Controls
---------------------
diff --git a/Documentation/media/uapi/v4l/pixfmt-compressed.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
index d04b18adac33..ba0f6c49d9bf 100644
--- a/Documentation/media/uapi/v4l/pixfmt-compressed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
@@ -60,6 +60,22 @@ Compressed Formats
- ``V4L2_PIX_FMT_MPEG2``
- 'MPG2'
- MPEG2 video elementary stream.
+ * .. _V4L2-PIX-FMT-MPEG2-SLICE:
+
+ - ``V4L2_PIX_FMT_MPEG2_SLICE``
+ - 'MG2S'
+ - MPEG-2 parsed slice data, as extracted from the MPEG-2 bitstream.
+ This format is adapted for stateless video decoders that implement a
+ MPEG-2 pipeline (using the :ref:`codec` and :ref:`media-request-api`).
+ Metadata associated with the frame to decode is required to be passed
+ through the ``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS`` control and
+ quantization matrices can optionally be specified through the
+ ``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION`` control.
+ See the :ref:`associated Codec Control IDs <v4l2-mpeg-mpeg2>`.
+ Exactly one output and one capture buffer must be provided for use with
+ this pixel format. The output buffer must contain the appropriate number
+ of macroblocks to decode a full corresponding frame to the matching
+ capture buffer.
* .. _V4L2-PIX-FMT-MPEG4:
- ``V4L2_PIX_FMT_MPEG4``
diff --git a/Documentation/media/uapi/v4l/pixfmt-reserved.rst b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
index 38af1472a4b4..0c399858bda2 100644
--- a/Documentation/media/uapi/v4l/pixfmt-reserved.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
@@ -243,7 +243,20 @@ please make a proposal on the linux-media mailing list.
It is an opaque intermediate format and the MDP hardware must be
used to convert ``V4L2_PIX_FMT_MT21C`` to ``V4L2_PIX_FMT_NV12M``,
``V4L2_PIX_FMT_YUV420M`` or ``V4L2_PIX_FMT_YVU420``.
-
+ * .. _V4L2-PIX-FMT-SUNXI-TILED-NV12:
+
+ - ``V4L2_PIX_FMT_SUNXI_TILED_NV12``
+ - 'ST12'
+ - Two-planar NV12-based format used by the video engine found on Allwinner
+ (codenamed sunxi) platforms, with 32x32 tiles for the luminance plane
+ and 32x64 tiles for the chrominance plane. The data in each tile is
+ stored in linear order, within the tile bounds. Each tile follows the
+ previous one linearly in memory (from left to right, top to bottom).
+
+ The associated buffer dimensions are aligned to match an integer number
+ of tiles, resulting in 32-aligned resolutions for the luminance plane
+ and 16-aligned resolutions for the chrominance plane (with 2x2
+ subsampling).
.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
diff --git a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
index a39e18d69511..eadf6f757fbf 100644
--- a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
@@ -102,7 +102,19 @@ than the number requested.
- ``format``
- Filled in by the application, preserved by the driver.
* - __u32
- - ``reserved``\ [8]
+ - ``capabilities``
+ - Set by the driver. If 0, then the driver doesn't support
+ capabilities. In that case all you know is that the driver is
+ guaranteed to support ``V4L2_MEMORY_MMAP`` and *might* support
+ other :c:type:`v4l2_memory` types. It will not support any others
+ capabilities. See :ref:`here <v4l2-buf-capabilities>` for a list of the
+ capabilities.
+
+ If you want to just query the capabilities without making any
+ other changes, then set ``count`` to 0, ``memory`` to
+ ``V4L2_MEMORY_MMAP`` and ``format.type`` to the buffer type.
+ * - __u32
+ - ``reserved``\ [7]
- A place holder for future extensions. Drivers and applications
must set the array to zero.
diff --git a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
index 2011c2b2ee67..d9930fe776cf 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
@@ -95,6 +95,25 @@ appropriate. In the first case the new value is set in struct
is inappropriate (e.g. the given menu index is not supported by the menu
control), then this will also result in an ``EINVAL`` error code error.
+If ``request_fd`` is set to a not-yet-queued :ref:`request <media-request-api>`
+file descriptor and ``which`` is set to ``V4L2_CTRL_WHICH_REQUEST_VAL``,
+then the controls are not applied immediately when calling
+:ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`, but instead are applied by
+the driver for the buffer associated with the same request.
+If the device does not support requests, then ``EACCES`` will be returned.
+If requests are supported but an invalid request file descriptor is given,
+then ``EINVAL`` will be returned.
+
+An attempt to call :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` for a
+request that has already been queued will result in an ``EBUSY`` error.
+
+If ``request_fd`` is specified and ``which`` is set to
+``V4L2_CTRL_WHICH_REQUEST_VAL`` during a call to
+:ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`, then it will return the
+values of the controls at the time of request completion.
+If the request is not yet completed, then this will result in an
+``EACCES`` error.
+
The driver will only set/get these controls if all control values are
correct. This prevents the situation where only some of the controls
were set/get. Only low-level errors (e. g. a failed i2c command) can
@@ -209,13 +228,17 @@ still cause this situation.
- ``which``
- Which value of the control to get/set/try.
``V4L2_CTRL_WHICH_CUR_VAL`` will return the current value of the
- control and ``V4L2_CTRL_WHICH_DEF_VAL`` will return the default
- value of the control.
+ control, ``V4L2_CTRL_WHICH_DEF_VAL`` will return the default
+ value of the control and ``V4L2_CTRL_WHICH_REQUEST_VAL`` indicates that
+ these controls have to be retrieved from a request or tried/set for
+ a request. In the latter case the ``request_fd`` field contains the
+ file descriptor of the request that should be used. If the device
+ does not support requests, then ``EACCES`` will be returned.
.. note::
- You can only get the default value of the control,
- you cannot set or try it.
+ When using ``V4L2_CTRL_WHICH_DEF_VAL`` be aware that you can only
+ get the default value of the control, you cannot set or try it.
For backwards compatibility you can also use a control class here
(see :ref:`ctrl-class`). In that case all controls have to
@@ -272,8 +295,15 @@ still cause this situation.
then you can call :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` to try to discover the
actual control that failed the validation step. Unfortunately,
there is no ``TRY`` equivalent for :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`.
+ * - __s32
+ - ``request_fd``
+ - File descriptor of the request to be used by this operation. Only
+ valid if ``which`` is set to ``V4L2_CTRL_WHICH_REQUEST_VAL``.
+ If the device does not support requests, then ``EACCES`` will be returned.
+ If requests are supported but an invalid request file descriptor is
+ given, then ``EINVAL`` will be returned.
* - __u32
- - ``reserved``\ [2]
+ - ``reserved``\ [1]
- Reserved for future extensions.
Drivers and applications must set the array to zero.
@@ -347,11 +377,14 @@ appropriately. The generic error codes are described at the
EINVAL
The struct :c:type:`v4l2_ext_control` ``id`` is
- invalid, the struct :c:type:`v4l2_ext_controls`
+ invalid, or the struct :c:type:`v4l2_ext_controls`
``which`` is invalid, or the struct
:c:type:`v4l2_ext_control` ``value`` was
inappropriate (e.g. the given menu index is not supported by the
- driver). This error code is also returned by the
+ driver), or the ``which`` field was set to ``V4L2_CTRL_WHICH_REQUEST_VAL``
+ but the given ``request_fd`` was invalid or ``V4L2_CTRL_WHICH_REQUEST_VAL``
+ is not supported by the kernel.
+ This error code is also returned by the
:ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` and :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` ioctls if two or
more control values are in conflict.
@@ -362,7 +395,9 @@ ERANGE
EBUSY
The control is temporarily not changeable, possibly because another
applications took over control of the device function this control
- belongs to.
+ belongs to, or (if the ``which`` field was set to
+ ``V4L2_CTRL_WHICH_REQUEST_VAL``) the request was queued but not yet
+ completed.
ENOSPC
The space reserved for the control's payload is insufficient. The
@@ -370,5 +405,9 @@ ENOSPC
and this error code is returned.
EACCES
- Attempt to try or set a read-only control or to get a write-only
- control.
+ Attempt to try or set a read-only control, or to get a write-only
+ control, or to get a control from a request that has not yet been
+ completed.
+
+ Or the ``which`` field was set to ``V4L2_CTRL_WHICH_REQUEST_VAL`` but the
+ device does not support requests.
diff --git a/Documentation/media/uapi/v4l/vidioc-qbuf.rst b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
index 9e448a4aa3aa..753b3b5946b1 100644
--- a/Documentation/media/uapi/v4l/vidioc-qbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
@@ -65,7 +65,7 @@ To enqueue a :ref:`memory mapped <mmap>` buffer applications set the
with a pointer to this structure the driver sets the
``V4L2_BUF_FLAG_MAPPED`` and ``V4L2_BUF_FLAG_QUEUED`` flags and clears
the ``V4L2_BUF_FLAG_DONE`` flag in the ``flags`` field, or it returns an
-EINVAL error code.
+``EINVAL`` error code.
To enqueue a :ref:`user pointer <userp>` buffer applications set the
``memory`` field to ``V4L2_MEMORY_USERPTR``, the ``m.userptr`` field to
@@ -98,6 +98,28 @@ dequeued, until the :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` or
:ref:`VIDIOC_REQBUFS` ioctl is called, or until the
device is closed.
+The ``request_fd`` field can be used with the ``VIDIOC_QBUF`` ioctl to specify
+the file descriptor of a :ref:`request <media-request-api>`, if requests are
+in use. Setting it means that the buffer will not be passed to the driver
+until the request itself is queued. Also, the driver will apply any
+settings associated with the request for this buffer. This field will
+be ignored unless the ``V4L2_BUF_FLAG_REQUEST_FD`` flag is set.
+If the device does not support requests, then ``EACCES`` will be returned.
+If requests are supported but an invalid request file descriptor is given,
+then ``EINVAL`` will be returned.
+
+.. caution::
+ It is not allowed to mix queuing requests with queuing buffers directly.
+ ``EBUSY`` will be returned if the first buffer was queued directly and
+ then the application tries to queue a request, or vice versa. After
+ closing the file descriptor, calling
+ :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` or calling :ref:`VIDIOC_REQBUFS`
+ the check for this will be reset.
+
+ For :ref:`memory-to-memory devices <codec>` you can specify the
+ ``request_fd`` only for output buffers, not for capture buffers. Attempting
+ to specify this for a capture buffer will result in an ``EACCES`` error.
+
Applications call the ``VIDIOC_DQBUF`` ioctl to dequeue a filled
(capturing) or displayed (output) buffer from the driver's outgoing
queue. They just set the ``type``, ``memory`` and ``reserved`` fields of
@@ -133,7 +155,9 @@ EAGAIN
EINVAL
The buffer ``type`` is not supported, or the ``index`` is out of
bounds, or no buffers have been allocated yet, or the ``userptr`` or
- ``length`` are invalid.
+ ``length`` are invalid, or the ``V4L2_BUF_FLAG_REQUEST_FD`` flag was
+ set but the the given ``request_fd`` was invalid, or ``m.fd`` was
+ an invalid DMABUF file descriptor.
EIO
``VIDIOC_DQBUF`` failed due to an internal error. Can also indicate
@@ -153,3 +177,12 @@ EPIPE
``VIDIOC_DQBUF`` returns this on an empty capture queue for mem2mem
codecs if a buffer with the ``V4L2_BUF_FLAG_LAST`` was already
dequeued and no new buffers are expected to become available.
+
+EACCES
+ The ``V4L2_BUF_FLAG_REQUEST_FD`` flag was set but the device does not
+ support requests for the given buffer type.
+
+EBUSY
+ The first buffer was queued via a request, but the application now tries
+ to queue it directly, or vice versa (it is not permitted to mix the two
+ APIs).
diff --git a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
index 5bd26e8c9a1a..258f5813f281 100644
--- a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
@@ -424,8 +424,18 @@ See also the examples in :ref:`control`.
- any
- An unsigned 32-bit valued control ranging from minimum to maximum
inclusive. The step value indicates the increment between values.
-
-
+ * - ``V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_mpeg2_slice_params`, containing MPEG-2
+ slice parameters for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_MPEG2_QUANTIZATION``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_mpeg2_quantization`, containing MPEG-2
+ quantization matrices for stateless video decoders.
.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
diff --git a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
index 316f52c8a310..d4bbbb0c60e8 100644
--- a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
@@ -88,10 +88,50 @@ any DMA in progress, an implicit
``V4L2_MEMORY_DMABUF`` or ``V4L2_MEMORY_USERPTR``. See
:c:type:`v4l2_memory`.
* - __u32
- - ``reserved``\ [2]
+ - ``capabilities``
+ - Set by the driver. If 0, then the driver doesn't support
+ capabilities. In that case all you know is that the driver is
+ guaranteed to support ``V4L2_MEMORY_MMAP`` and *might* support
+ other :c:type:`v4l2_memory` types. It will not support any others
+ capabilities.
+
+ If you want to query the capabilities with a minimum of side-effects,
+ then this can be called with ``count`` set to 0, ``memory`` set to
+ ``V4L2_MEMORY_MMAP`` and ``type`` set to the buffer type. This will
+ free any previously allocated buffers, so this is typically something
+ that will be done at the start of the application.
+ * - __u32
+ - ``reserved``\ [1]
- A place holder for future extensions. Drivers and applications
must set the array to zero.
+.. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}|
+
+.. _v4l2-buf-capabilities:
+.. _V4L2-BUF-CAP-SUPPORTS-MMAP:
+.. _V4L2-BUF-CAP-SUPPORTS-USERPTR:
+.. _V4L2-BUF-CAP-SUPPORTS-DMABUF:
+.. _V4L2-BUF-CAP-SUPPORTS-REQUESTS:
+
+.. cssclass:: longtable
+
+.. flat-table:: V4L2 Buffer Capabilities Flags
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 3 1 4
+
+ * - ``V4L2_BUF_CAP_SUPPORTS_MMAP``
+ - 0x00000001
+ - This buffer type supports the ``V4L2_MEMORY_MMAP`` streaming mode.
+ * - ``V4L2_BUF_CAP_SUPPORTS_USERPTR``
+ - 0x00000002
+ - This buffer type supports the ``V4L2_MEMORY_USERPTR`` streaming mode.
+ * - ``V4L2_BUF_CAP_SUPPORTS_DMABUF``
+ - 0x00000004
+ - This buffer type supports the ``V4L2_MEMORY_DMABUF`` streaming mode.
+ * - ``V4L2_BUF_CAP_SUPPORTS_REQUESTS``
+ - 0x00000008
+ - This buffer type supports :ref:`requests <media-request-api>`.
Return Value
============
diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
index 1f4340dd9a37..1ec425a7c364 100644
--- a/Documentation/media/videodev2.h.rst.exceptions
+++ b/Documentation/media/videodev2.h.rst.exceptions
@@ -131,6 +131,8 @@ replace symbol V4L2_CTRL_TYPE_STRING :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_U16 :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_U32 :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_U8 :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_MPEG2_QUANTIZATION :c:type:`v4l2_ctrl_type`
# V4L2 capability defines
replace define V4L2_CAP_VIDEO_CAPTURE device-capabilities
@@ -517,6 +519,7 @@ ignore define V4L2_CTRL_DRIVER_PRIV
ignore define V4L2_CTRL_MAX_DIMS
ignore define V4L2_CTRL_WHICH_CUR_VAL
ignore define V4L2_CTRL_WHICH_DEF_VAL
+ignore define V4L2_CTRL_WHICH_REQUEST_VAL
ignore define V4L2_OUT_CAP_CUSTOM_TIMINGS
ignore define V4L2_CID_MAX_CTRLS
diff --git a/Documentation/networking/ice.rst b/Documentation/networking/ice.rst
index 1e4948c9e989..4d118b827bbb 100644
--- a/Documentation/networking/ice.rst
+++ b/Documentation/networking/ice.rst
@@ -20,7 +20,7 @@ Enabling the driver
The driver is enabled via the standard kernel configuration system,
using the make command::
- make oldconfig/silentoldconfig/menuconfig/etc.
+ make oldconfig/menuconfig/etc.
The driver is located in the menu structure at:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 163b5ff1073c..32b21571adfe 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -316,6 +316,17 @@ tcp_frto - INTEGER
By default it's enabled with a non-zero value. 0 disables F-RTO.
+tcp_fwmark_accept - BOOLEAN
+ If set, incoming connections to listening sockets that do not have a
+ socket mark will set the mark of the accepting socket to the fwmark of
+ the incoming SYN packet. This will cause all packets on that connection
+ (starting from the first SYNACK) to be sent with that fwmark. The
+ listening socket's mark is unchanged. Listening sockets that already
+ have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are
+ unaffected.
+
+ Default: 0
+
tcp_invalid_ratelimit - INTEGER
Limit the maximal rate for sending duplicate acknowledgments
in response to incoming TCP packets that are for an existing
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 757808526d9a..878ebfda7eef 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
code-of-conduct-interpretation
development-process
submitting-patches
+ programming-language
coding-style
maintainer-pgp-guide
email-clients
diff --git a/Documentation/process/programming-language.rst b/Documentation/process/programming-language.rst
new file mode 100644
index 000000000000..e5f5f065dc24
--- /dev/null
+++ b/Documentation/process/programming-language.rst
@@ -0,0 +1,45 @@
+.. _programming_language:
+
+Programming Language
+====================
+
+The kernel is written in the C programming language [c-language]_.
+More precisely, the kernel is typically compiled with ``gcc`` [gcc]_
+under ``-std=gnu89`` [gcc-c-dialect-options]_: the GNU dialect of ISO C90
+(including some C99 features).
+
+This dialect contains many extensions to the language [gnu-extensions]_,
+and many of them are used within the kernel as a matter of course.
+
+There is some support for compiling the kernel with ``clang`` [clang]_
+and ``icc`` [icc]_ for several of the architectures, although at the time
+of writing it is not completed, requiring third-party patches.
+
+Attributes
+----------
+
+One of the common extensions used throughout the kernel are attributes
+[gcc-attribute-syntax]_. Attributes allow to introduce
+implementation-defined semantics to language entities (like variables,
+functions or types) without having to make significant syntactic changes
+to the language (e.g. adding a new keyword) [n2049]_.
+
+In some cases, attributes are optional (i.e. a compiler not supporting them
+should still produce proper code, even if it is slower or does not perform
+as many compile-time checks/diagnostics).
+
+The kernel defines pseudo-keywords (e.g. ``__pure``) instead of using
+directly the GNU attribute syntax (e.g. ``__attribute__((__pure__))``)
+in order to feature detect which ones can be used and/or to shorten the code.
+
+Please refer to ``include/linux/compiler_attributes.h`` for more information.
+
+.. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards
+.. [gcc] https://gcc.gnu.org
+.. [clang] https://clang.llvm.org
+.. [icc] https://software.intel.com/en-us/c-compilers
+.. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html
+.. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html
+.. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html
+.. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf
+
diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst
index 9ce7256c6edb..9521c4207f01 100644
--- a/Documentation/security/keys/core.rst
+++ b/Documentation/security/keys/core.rst
@@ -859,6 +859,7 @@ The keyctl syscall functions are:
and either the buffer length or the OtherInfo length exceeds the
allowed length.
+
* Restrict keyring linkage::
long keyctl(KEYCTL_RESTRICT_KEYRING, key_serial_t keyring,
@@ -890,6 +891,116 @@ The keyctl syscall functions are:
applicable to the asymmetric key type.
+ * Query an asymmetric key::
+
+ long keyctl(KEYCTL_PKEY_QUERY,
+ key_serial_t key_id, unsigned long reserved,
+ struct keyctl_pkey_query *info);
+
+ Get information about an asymmetric key. The information is returned in
+ the keyctl_pkey_query struct::
+
+ __u32 supported_ops;
+ __u32 key_size;
+ __u16 max_data_size;
+ __u16 max_sig_size;
+ __u16 max_enc_size;
+ __u16 max_dec_size;
+ __u32 __spare[10];
+
+ ``supported_ops`` contains a bit mask of flags indicating which ops are
+ supported. This is constructed from a bitwise-OR of::
+
+ KEYCTL_SUPPORTS_{ENCRYPT,DECRYPT,SIGN,VERIFY}
+
+ ``key_size`` indicated the size of the key in bits.
+
+ ``max_*_size`` indicate the maximum sizes in bytes of a blob of data to be
+ signed, a signature blob, a blob to be encrypted and a blob to be
+ decrypted.
+
+ ``__spare[]`` must be set to 0. This is intended for future use to hand
+ over one or more passphrases needed unlock a key.
+
+ If successful, 0 is returned. If the key is not an asymmetric key,
+ EOPNOTSUPP is returned.
+
+
+ * Encrypt, decrypt, sign or verify a blob using an asymmetric key::
+
+ long keyctl(KEYCTL_PKEY_ENCRYPT,
+ const struct keyctl_pkey_params *params,
+ const char *info,
+ const void *in,
+ void *out);
+
+ long keyctl(KEYCTL_PKEY_DECRYPT,
+ const struct keyctl_pkey_params *params,
+ const char *info,
+ const void *in,
+ void *out);
+
+ long keyctl(KEYCTL_PKEY_SIGN,
+ const struct keyctl_pkey_params *params,
+ const char *info,
+ const void *in,
+ void *out);
+
+ long keyctl(KEYCTL_PKEY_VERIFY,
+ const struct keyctl_pkey_params *params,
+ const char *info,
+ const void *in,
+ const void *in2);
+
+ Use an asymmetric key to perform a public-key cryptographic operation a
+ blob of data. For encryption and verification, the asymmetric key may
+ only need the public parts to be available, but for decryption and signing
+ the private parts are required also.
+
+ The parameter block pointed to by params contains a number of integer
+ values::
+
+ __s32 key_id;
+ __u32 in_len;
+ __u32 out_len;
+ __u32 in2_len;
+
+ ``key_id`` is the ID of the asymmetric key to be used. ``in_len`` and
+ ``in2_len`` indicate the amount of data in the in and in2 buffers and
+ ``out_len`` indicates the size of the out buffer as appropriate for the
+ above operations.
+
+ For a given operation, the in and out buffers are used as follows::
+
+ Operation ID in,in_len out,out_len in2,in2_len
+ ======================= =============== =============== ===============
+ KEYCTL_PKEY_ENCRYPT Raw data Encrypted data -
+ KEYCTL_PKEY_DECRYPT Encrypted data Raw data -
+ KEYCTL_PKEY_SIGN Raw data Signature -
+ KEYCTL_PKEY_VERIFY Raw data - Signature
+
+ ``info`` is a string of key=value pairs that supply supplementary
+ information. These include:
+
+ ``enc=<encoding>`` The encoding of the encrypted/signature blob. This
+ can be "pkcs1" for RSASSA-PKCS1-v1.5 or
+ RSAES-PKCS1-v1.5; "pss" for "RSASSA-PSS"; "oaep" for
+ "RSAES-OAEP". If omitted or is "raw", the raw output
+ of the encryption function is specified.
+
+ ``hash=<algo>`` If the data buffer contains the output of a hash
+ function and the encoding includes some indication of
+ which hash function was used, the hash function can be
+ specified with this, eg. "hash=sha256".
+
+ The ``__spare[]`` space in the parameter block must be set to 0. This is
+ intended, amongst other things, to allow the passing of passphrases
+ required to unlock a key.
+
+ If successful, encrypt, decrypt and sign all return the amount of data
+ written into the output buffer. Verification returns 0 on success.
+
+
Kernel Services
===============
@@ -1483,6 +1594,112 @@ The structure has a number of fields, some of which are mandatory:
attempted key link operation. If there is no match, -EINVAL is returned.
+ * ``int (*asym_eds_op)(struct kernel_pkey_params *params,
+ const void *in, void *out);``
+ ``int (*asym_verify_signature)(struct kernel_pkey_params *params,
+ const void *in, const void *in2);``
+
+ These methods are optional. If provided the first allows a key to be
+ used to encrypt, decrypt or sign a blob of data, and the second allows a
+ key to verify a signature.
+
+ In all cases, the following information is provided in the params block::
+
+ struct kernel_pkey_params {
+ struct key *key;
+ const char *encoding;
+ const char *hash_algo;
+ char *info;
+ __u32 in_len;
+ union {
+ __u32 out_len;
+ __u32 in2_len;
+ };
+ enum kernel_pkey_operation op : 8;
+ };
+
+ This includes the key to be used; a string indicating the encoding to use
+ (for instance, "pkcs1" may be used with an RSA key to indicate
+ RSASSA-PKCS1-v1.5 or RSAES-PKCS1-v1.5 encoding or "raw" if no encoding);
+ the name of the hash algorithm used to generate the data for a signature
+ (if appropriate); the sizes of the input and output (or second input)
+ buffers; and the ID of the operation to be performed.
+
+ For a given operation ID, the input and output buffers are used as
+ follows::
+
+ Operation ID in,in_len out,out_len in2,in2_len
+ ======================= =============== =============== ===============
+ kernel_pkey_encrypt Raw data Encrypted data -
+ kernel_pkey_decrypt Encrypted data Raw data -
+ kernel_pkey_sign Raw data Signature -
+ kernel_pkey_verify Raw data - Signature
+
+ asym_eds_op() deals with encryption, decryption and signature creation as
+ specified by params->op. Note that params->op is also set for
+ asym_verify_signature().
+
+ Encrypting and signature creation both take raw data in the input buffer
+ and return the encrypted result in the output buffer. Padding may have
+ been added if an encoding was set. In the case of signature creation,
+ depending on the encoding, the padding created may need to indicate the
+ digest algorithm - the name of which should be supplied in hash_algo.
+
+ Decryption takes encrypted data in the input buffer and returns the raw
+ data in the output buffer. Padding will get checked and stripped off if
+ an encoding was set.
+
+ Verification takes raw data in the input buffer and the signature in the
+ second input buffer and checks that the one matches the other. Padding
+ will be validated. Depending on the encoding, the digest algorithm used
+ to generate the raw data may need to be indicated in hash_algo.
+
+ If successful, asym_eds_op() should return the number of bytes written
+ into the output buffer. asym_verify_signature() should return 0.
+
+ A variety of errors may be returned, including EOPNOTSUPP if the operation
+ is not supported; EKEYREJECTED if verification fails; ENOPKG if the
+ required crypto isn't available.
+
+
+ * ``int (*asym_query)(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info);``
+
+ This method is optional. If provided it allows information about the
+ public or asymmetric key held in the key to be determined.
+
+ The parameter block is as for asym_eds_op() and co. but in_len and out_len
+ are unused. The encoding and hash_algo fields should be used to reduce
+ the returned buffer/data sizes as appropriate.
+
+ If successful, the following information is filled in::
+
+ struct kernel_pkey_query {
+ __u32 supported_ops;
+ __u32 key_size;
+ __u16 max_data_size;
+ __u16 max_sig_size;
+ __u16 max_enc_size;
+ __u16 max_dec_size;
+ };
+
+ The supported_ops field will contain a bitmask indicating what operations
+ are supported by the key, including encryption of a blob, decryption of a
+ blob, signing a blob and verifying the signature on a blob. The following
+ constants are defined for this::
+
+ KEYCTL_SUPPORTS_{ENCRYPT,DECRYPT,SIGN,VERIFY}
+
+ The key_size field is the size of the key in bits. max_data_size and
+ max_sig_size are the maximum raw data and signature sizes for creation and
+ verification of a signature; max_enc_size and max_dec_size are the maximum
+ raw data and signature sizes for encryption and decryption. The
+ max_*_size fields are measured in bytes.
+
+ If successful, 0 will be returned. If the key doesn't support this,
+ EOPNOTSUPP will be returned.
+
+
Request-Key Callback Service
============================
diff --git a/Documentation/security/self-protection.rst b/Documentation/security/self-protection.rst
index e1ca698e0006..f584fb74b4ff 100644
--- a/Documentation/security/self-protection.rst
+++ b/Documentation/security/self-protection.rst
@@ -302,11 +302,11 @@ sure structure holes are cleared.
Memory poisoning
----------------
-When releasing memory, it is best to poison the contents (clear stack on
-syscall return, wipe heap memory on a free), to avoid reuse attacks that
-rely on the old contents of memory. This frustrates many uninitialized
-variable attacks, stack content exposures, heap content exposures, and
-use-after-free attacks.
+When releasing memory, it is best to poison the contents, to avoid reuse
+attacks that rely on the old contents of memory. E.g., clear stack on a
+syscall return (``CONFIG_GCC_PLUGIN_STACKLEAK``), wipe heap memory on a
+free. This frustrates many uninitialized variable attacks, stack content
+exposures, heap content exposures, and use-after-free attacks.
Destination tracking
--------------------
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 37a679501ddc..1b8775298cf7 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -89,6 +89,7 @@ show up in /proc/sys/kernel:
- shmmni
- softlockup_all_cpu_backtrace
- soft_watchdog
+- stack_erasing
- stop-a [ SPARC only ]
- sysrq ==> Documentation/admin-guide/sysrq.rst
- sysctl_writes_strict
@@ -987,6 +988,23 @@ detect a hard lockup condition.
==============================================================
+stack_erasing
+
+This parameter can be used to control kernel stack erasing at the end
+of syscalls for kernels built with CONFIG_GCC_PLUGIN_STACKLEAK.
+
+That erasing reduces the information which kernel stack leak bugs
+can reveal and blocks some uninitialized stack variable attacks.
+The tradeoff is the performance impact: on a single CPU system kernel
+compilation sees a 1% slowdown, other systems and workloads may vary.
+
+ 0: kernel stack erasing is disabled, STACKLEAK_METRICS are not updated.
+
+ 1: kernel stack erasing is enabled (default), it is performed before
+ returning to the userspace at the end of syscalls.
+
+==============================================================
+
tainted:
Non-zero if the kernel has been tainted. Numeric values, which can be
diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst
index 8bfc75c90806..47e765c2f2c3 100644
--- a/Documentation/trace/kprobetrace.rst
+++ b/Documentation/trace/kprobetrace.rst
@@ -45,16 +45,18 @@ Synopsis of kprobe_events
@SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol)
$stackN : Fetch Nth entry of stack (N >= 0)
$stack : Fetch stack address.
- $retval : Fetch return value.(*)
+ $argN : Fetch the Nth function argument. (N >= 1) (\*1)
+ $retval : Fetch return value.(\*2)
$comm : Fetch current task comm.
- +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
+ +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(\*3)
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
(u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types
(x8/x16/x32/x64), "string" and bitfield are supported.
- (*) only for return probe.
- (**) this is useful for fetching a field of data structures.
+ (\*1) only for the probe on function entry (offs == 0).
+ (\*2) only for return probe.
+ (\*3) this is useful for fetching a field of data structures.
Types
-----
@@ -64,14 +66,27 @@ respectively. 'x' prefix implies it is unsigned. Traced arguments are shown
in decimal ('s' and 'u') or hexadecimal ('x'). Without type casting, 'x32'
or 'x64' is used depends on the architecture (e.g. x86-32 uses x32, and
x86-64 uses x64).
+These value types can be an array. To record array data, you can add '[N]'
+(where N is a fixed number, less than 64) to the base type.
+E.g. 'x16[4]' means an array of x16 (2bytes hex) with 4 elements.
+Note that the array can be applied to memory type fetchargs, you can not
+apply it to registers/stack-entries etc. (for example, '$stack1:x8[8]' is
+wrong, but '+8($stack):x8[8]' is OK.)
String type is a special type, which fetches a "null-terminated" string from
kernel space. This means it will fail and store NULL if the string container
has been paged out.
+The string array type is a bit different from other types. For other base
+types, <base-type>[1] is equal to <base-type> (e.g. +0(%di):x32[1] is same
+as +0(%di):x32.) But string[1] is not equal to string. The string type itself
+represents "char array", but string array type represents "char * array".
+So, for example, +0(%di):string[1] is equal to +0(+0(%di)):string.
Bitfield is another special type, which takes 3 parameters, bit-width, bit-
offset, and container-size (usually 32). The syntax is::
b<bit-width>@<bit-offset>/<container-size>
+Symbol type('symbol') is an alias of u32 or u64 type (depends on BITS_PER_LONG)
+which shows given pointer in "symbol+offset" style.
For $comm, the default type is "string"; any other type is invalid.
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 702898633b00..73aaaa3da436 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -146,3 +146,6 @@ Their order is preserved but their base will be offset early at boot time.
Be very careful vs. KASLR when changing anything here. The KASLR address
range must not overlap with anything except the KASAN shadow area, which is
correct as KASAN disables KASLR.
+
+For both 4- and 5-level layouts, the STACKLEAK_POISON value in the last 2MB
+hole: ffffffffffff4111
diff --git a/MAINTAINERS b/MAINTAINERS
index 930241eb6e2d..f4855974f325 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -376,7 +376,7 @@ F: drivers/platform/x86/i2c-multi-instantiate.c
ACPI PMIC DRIVERS
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Len Brown <lenb@kernel.org>
-R: Andy Shevchenko <andy@infradead.org>
+R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
R: Mika Westerberg <mika.westerberg@linux.intel.com>
L: linux-acpi@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-acpi/list/
@@ -671,6 +671,13 @@ L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/sunxi-ss/
+ALLWINNER VPU DRIVER
+M: Maxime Ripard <maxime.ripard@bootlin.com>
+M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/staging/media/sunxi/cedrus/
+
ALPHA PORT
M: Richard Henderson <rth@twiddle.net>
M: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
@@ -3160,7 +3167,7 @@ F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
M: Chris Mason <clm@fb.com>
-M: Josef Bacik <jbacik@fb.com>
+M: Josef Bacik <josef@toxicpanda.com>
M: David Sterba <dsterba@suse.com>
L: linux-btrfs@vger.kernel.org
W: http://btrfs.wiki.kernel.org/
@@ -3730,6 +3737,11 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/compal-laptop.c
+COMPILER ATTRIBUTES
+M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
+S: Maintained
+F: include/linux/compiler_attributes.h
+
CONEXANT ACCESSRUNNER USB DRIVER
L: accessrunner-general@lists.sourceforge.net
W: http://accessrunner.sourceforge.net/
@@ -3823,7 +3835,6 @@ W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
S: Maintained
F: drivers/cpufreq/arm_big_little.h
F: drivers/cpufreq/arm_big_little.c
-F: drivers/cpufreq/arm_big_little_dt.c
CPU POWER MONITORING SUBSYSTEM
M: Thomas Renninger <trenn@suse.com>
@@ -4201,6 +4212,12 @@ M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
F: drivers/platform/x86/dell-rbtn.*
+DELL REMOTE BIOS UPDATE DRIVER
+M: Stuart Hayes <stuart.w.hayes@gmail.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/dell_rbu.c
+
DELL LAPTOP SMM DRIVER
M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
@@ -4208,10 +4225,11 @@ F: drivers/hwmon/dell-smm-hwmon.c
F: include/uapi/linux/i8k.h
DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas)
-M: Doug Warzecha <Douglas_Warzecha@dell.com>
+M: Stuart Hayes <stuart.w.hayes@gmail.com>
+L: platform-driver-x86@vger.kernel.org
S: Maintained
F: Documentation/dcdbas.txt
-F: drivers/firmware/dcdbas.*
+F: drivers/platform/x86/dcdbas.*
DELL WMI NOTIFICATIONS DRIVER
M: Matthew Garrett <mjg59@srcf.ucam.org>
@@ -5865,6 +5883,14 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-cpm.c
+FREESCALE IMX LPI2C DRIVER
+M: Dong Aisheng <aisheng.dong@nxp.com>
+L: linux-i2c@vger.kernel.org
+L: linux-imx@nxp.com
+S: Maintained
+F: drivers/i2c/busses/i2c-imx-lpi2c.c
+F: Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
+
FREESCALE IMX / MXC FEC DRIVER
M: Fugang Duan <fugang.duan@nxp.com>
L: netdev@vger.kernel.org
@@ -7341,6 +7367,12 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/intel/
+INTEL ATOMISP2 DUMMY / POWER-MANAGEMENT DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/intel_atomisp2_pm.c
+
INTEL C600 SERIES SAS CONTROLLER DRIVER
M: Intel SCU Linux support <intel-linux-scu@intel.com>
M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
@@ -7527,7 +7559,6 @@ M: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
M: Vishwanath Somayaji <vishwanath.somayaji@intel.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: arch/x86/include/asm/pmc_core.h
F: drivers/platform/x86/intel_pmc_core*
INTEL PMC/P-Unit IPC DRIVER
@@ -7571,7 +7602,8 @@ F: drivers/infiniband/hw/i40iw/
F: include/uapi/rdma/i40iw-abi.h
INTEL TELEMETRY DRIVER
-M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
+M: Rajneesh Bhardwaj <rajneesh.bhardwaj@linux.intel.com>
+M: "David E. Box" <david.e.box@linux.intel.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: arch/x86/include/asm/intel_telemetry.h
@@ -8304,6 +8336,14 @@ W: http://legousb.sourceforge.net/
S: Maintained
F: drivers/usb/misc/legousbtower.c
+LG LAPTOP EXTRAS
+M: Matan Ziv-Av <matan@svgalib.org>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-platform-lg-laptop
+F: Documentation/laptops/lg-laptop.rst
+F: drivers/platform/x86/lg-laptop.c
+
LG2160 MEDIA DRIVER
M: Michael Krufky <mkrufky@linuxtv.org>
L: linux-media@vger.kernel.org
@@ -15823,7 +15863,6 @@ F: net/vmw_vsock/virtio_transport_common.c
F: net/vmw_vsock/virtio_transport.c
F: drivers/net/vsockmon.c
F: drivers/vhost/vsock.c
-F: drivers/vhost/vsock.h
F: tools/testing/vsock/
VIRTIO CONSOLE DRIVER
@@ -16318,6 +16357,7 @@ F: arch/arm64/include/asm/xen/
XEN HYPERVISOR INTERFACE
M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
M: Juergen Gross <jgross@suse.com>
+R: Stefano Stabellini <sstabellini@kernel.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
S: Supported
diff --git a/Makefile b/Makefile
index 9aa352b38815..bce41f4180fc 100644
--- a/Makefile
+++ b/Makefile
@@ -485,7 +485,7 @@ ifneq ($(KBUILD_SRC),)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
endif
-ifeq ($(cc-name),clang)
+ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
ifneq ($(CROSS_COMPILE),)
CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
@@ -702,7 +702,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
-ifeq ($(cc-name),clang)
+ifdef CONFIG_CC_IS_CLANG
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
diff --git a/arch/Kconfig b/arch/Kconfig
index 9d329608913e..e1e540ffa979 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -290,6 +290,13 @@ config HAVE_RSEQ
This symbol should be selected by an architecture if it
supports an implementation of restartable sequences.
+config HAVE_FUNCTION_ARG_ACCESS_API
+ bool
+ help
+ This symbol should be selected by an architecure if it supports
+ the API needed to access function arguments from pt_regs,
+ declared in asm/ptrace.h
+
config HAVE_CLK
bool
help
@@ -422,6 +429,13 @@ config SECCOMP_FILTER
See Documentation/userspace-api/seccomp_filter.rst for details.
+config HAVE_ARCH_STACKLEAK
+ bool
+ help
+ An architecture should select this if it has the code which
+ fills the used part of the kernel stack with the STACKLEAK_POISON
+ value before returning from system calls.
+
config HAVE_STACKPROTECTOR
bool
help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 620b0a711ee4..5b4f88363453 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -31,8 +31,6 @@ config ALPHA
select ODD_RT_SIGACTION
select OLD_SIGSUSPEND
select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index cb05d045efe3..6100431da07a 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -11,12 +11,6 @@
#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
/*
- * Returns current instruction pointer ("program counter").
- */
-#define current_text_addr() \
- ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
-
-/*
* We have a 42-bit user address space: 4TB user VM...
*/
#define TASK_SIZE (0x40000000000UL)
diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c
index 1bf3eef34c22..6df765ff2b10 100644
--- a/arch/alpha/kernel/core_apecs.c
+++ b/arch/alpha/kernel/core_apecs.c
@@ -346,7 +346,8 @@ apecs_init_arch(void)
* Window 1 is direct access 1GB at 1GB
* Window 2 is scatter-gather 8MB at 8MB (for isa)
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
hose->sg_pci = NULL;
__direct_map_base = 0x40000000;
__direct_map_size = 0x40000000;
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
index 4b38386f6e62..867e8730b0c5 100644
--- a/arch/alpha/kernel/core_cia.c
+++ b/arch/alpha/kernel/core_cia.c
@@ -21,7 +21,7 @@
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/ptrace.h>
#include <asm/mce.h>
@@ -331,7 +331,7 @@ cia_prepare_tbia_workaround(int window)
long i;
/* Use minimal 1K map. */
- ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
+ ppte = memblock_alloc_from(CIA_BROKEN_TBIA_SIZE, 32768, 0);
pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c
index f70986683fc6..a9fd133a7fb2 100644
--- a/arch/alpha/kernel/core_irongate.c
+++ b/arch/alpha/kernel/core_irongate.c
@@ -20,7 +20,6 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <asm/ptrace.h>
@@ -234,8 +233,7 @@ albacore_init_arch(void)
unsigned long size;
size = initrd_end - initrd_start;
- free_bootmem_node(NODE_DATA(0), __pa(initrd_start),
- PAGE_ALIGN(size));
+ memblock_free(__pa(initrd_start), PAGE_ALIGN(size));
if (!move_initrd(pci_mem))
printk("irongate_init_arch: initrd too big "
"(%ldK)\ndisabling initrd\n",
diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c
index 81c0c43635b0..57e0750419f2 100644
--- a/arch/alpha/kernel/core_lca.c
+++ b/arch/alpha/kernel/core_lca.c
@@ -275,7 +275,8 @@ lca_init_arch(void)
* Note that we do not try to save any of the DMA window CSRs
* before setting them, since we cannot read those CSRs on LCA.
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
hose->sg_pci = NULL;
__direct_map_base = 0x40000000;
__direct_map_size = 0x40000000;
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index bdebb8c206f1..c1d0c18c71ca 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -18,7 +18,7 @@
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
@@ -82,7 +82,7 @@ mk_resource_name(int pe, int port, char *str)
char *name;
sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
- name = alloc_bootmem(strlen(tmp) + 1);
+ name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES);
strcpy(name, tmp);
return name;
@@ -117,7 +117,7 @@ alloc_io7(unsigned int pe)
return NULL;
}
- io7 = alloc_bootmem(sizeof(*io7));
+ io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES);
io7->pe = pe;
raw_spin_lock_init(&io7->irq_lock);
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c
index b1549db54260..74b1d018124c 100644
--- a/arch/alpha/kernel/core_mcpcia.c
+++ b/arch/alpha/kernel/core_mcpcia.c
@@ -364,9 +364,11 @@ mcpcia_startup_hose(struct pci_controller *hose)
* Window 1 is scatter-gather (up to) 1GB at 1GB (for pci)
* Window 2 is direct access 2GB at 2GB
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
hose->sg_pci = iommu_arena_new(hose, 0x40000000,
- size_for_memory(0x40000000), 0);
+ size_for_memory(0x40000000),
+ SMP_CACHE_BYTES);
__direct_map_base = 0x80000000;
__direct_map_size = 0x80000000;
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index 2c00b61ca379..98d5b6ff8a76 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -351,7 +351,7 @@ t2_sg_map_window2(struct pci_controller *hose,
/* Note we can only do 1 SG window, as the other is for direct, so
do an ISA SG area, especially for the floppy. */
- hose->sg_isa = iommu_arena_new(hose, base, length, 0);
+ hose->sg_isa = iommu_arena_new(hose, base, length, SMP_CACHE_BYTES);
hose->sg_pci = NULL;
temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c
index 132b06bdf903..2a2820fb1be6 100644
--- a/arch/alpha/kernel/core_titan.c
+++ b/arch/alpha/kernel/core_titan.c
@@ -16,7 +16,7 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
@@ -316,10 +316,12 @@ titan_init_one_pachip_port(titan_pachip_port *port, int index)
* Window 1 is direct access 1GB at 2GB
* Window 2 is scatter-gather 1GB at 3GB
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
hose->sg_isa->align_entry = 8; /* 64KB for ISA */
- hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0);
+ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000,
+ SMP_CACHE_BYTES);
hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
port->wsba[0].csr = hose->sg_isa->dma_base | 3;
diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c
index e7c956ea46b6..fc1ab73f23de 100644
--- a/arch/alpha/kernel/core_tsunami.c
+++ b/arch/alpha/kernel/core_tsunami.c
@@ -17,7 +17,7 @@
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
@@ -319,12 +319,14 @@ tsunami_init_one_pchip(tsunami_pchip *pchip, int index)
* NOTE: we need the align_entry settings for Acer devices on ES40,
* specifically floppy and IDE when memory is larger than 2GB.
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
/* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */
hose->sg_isa->align_entry = 4;
hose->sg_pci = iommu_arena_new(hose, 0x40000000,
- size_for_memory(0x40000000), 0);
+ size_for_memory(0x40000000),
+ SMP_CACHE_BYTES);
hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */
__direct_map_base = 0x80000000;
diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c
index cad36fc6ed7d..353c03d15442 100644
--- a/arch/alpha/kernel/core_wildfire.c
+++ b/arch/alpha/kernel/core_wildfire.c
@@ -111,8 +111,10 @@ wildfire_init_hose(int qbbno, int hoseno)
* ??? We ought to scale window 3 memory.
*
*/
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
- hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
+ SMP_CACHE_BYTES);
+ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000,
+ SMP_CACHE_BYTES);
pci = WILDFIRE_pci(qbbno, hoseno);
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index c7c5879869d3..091cff3c68fd 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -7,7 +7,7 @@
#include <linux/pci.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/capability.h>
#include <linux/mm.h>
@@ -33,7 +33,7 @@ alloc_pci_controller(void)
{
struct pci_controller *hose;
- hose = alloc_bootmem(sizeof(*hose));
+ hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
*hose_tail = hose;
hose_tail = &hose->next;
@@ -44,7 +44,7 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- return alloc_bootmem(sizeof(struct resource));
+ return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
}
SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index c668c3b7a167..97098127df83 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/slab.h>
@@ -392,7 +392,7 @@ alloc_pci_controller(void)
{
struct pci_controller *hose;
- hose = alloc_bootmem(sizeof(*hose));
+ hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
*hose_tail = hose;
hose_tail = &hose->next;
@@ -403,7 +403,7 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- return alloc_bootmem(sizeof(struct resource));
+ return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
}
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 6923b0d9c1e1..46e08e0d9181 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -7,7 +7,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/gfp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
@@ -74,26 +74,26 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
#ifdef CONFIG_DISCONTIGMEM
- arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
+ arena = memblock_alloc_node(sizeof(*arena), align, nid);
if (!NODE_DATA(nid) || !arena) {
printk("%s: couldn't allocate arena from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
- arena = alloc_bootmem(sizeof(*arena));
+ arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
}
- arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
+ arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
if (!NODE_DATA(nid) || !arena->ptes) {
printk("%s: couldn't allocate arena ptes from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
- arena->ptes = __alloc_bootmem(mem_size, align, 0);
+ arena->ptes = memblock_alloc_from(mem_size, align, 0);
}
#else /* CONFIG_DISCONTIGMEM */
- arena = alloc_bootmem(sizeof(*arena));
- arena->ptes = __alloc_bootmem(mem_size, align, 0);
+ arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
+ arena->ptes = memblock_alloc_from(mem_size, align, 0);
#endif /* CONFIG_DISCONTIGMEM */
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 4f0d94471bc9..a37fd990bd55 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -29,7 +29,6 @@
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
@@ -294,7 +293,7 @@ move_initrd(unsigned long mem_limit)
unsigned long size;
size = initrd_end - initrd_start;
- start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
+ start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0);
if (!start || __pa(start) + size > mem_limit) {
initrd_start = initrd_end = 0;
return NULL;
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index ff4f54b86c7f..cd9a112d67ff 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -32,7 +32,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/reboot.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 9d74520298ab..a42fc5c4db89 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -19,7 +19,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h> /* max_low_pfn */
+#include <linux/memblock.h> /* max_low_pfn */
#include <linux/vmalloc.h>
#include <linux/gfp.h>
@@ -282,7 +282,7 @@ mem_init(void)
{
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 26cd925d19b1..74846553e3f1 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -10,7 +10,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/swap.h>
#include <linux/initrd.h>
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index e98c6b8e6186..c9e2a1323536 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -37,14 +37,12 @@ config ARC
select HAVE_KERNEL_LZMA
select HAVE_KPROBES
select HAVE_KRETPROBES
- select HAVE_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HANDLE_DOMAIN_IRQ
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
- select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 8ee41e988169..10346d6cf926 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -98,14 +98,6 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc,
extern unsigned int get_wchan(struct task_struct *p);
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- * Should the PC register be read instead ? This macro does not seem to
- * be used in many places so this wont be all that bad.
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 183391d4d33a..d34f69eb1a95 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -15,7 +15,7 @@
#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
@@ -181,8 +181,8 @@ static void init_unwind_hdr(struct unwind_table *table,
*/
static void *__init unw_hdr_alloc_early(unsigned long sz)
{
- return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
- MAX_DMA_ADDRESS);
+ return memblock_alloc_from_nopanic(sz, sizeof(unsigned int),
+ MAX_DMA_ADDRESS);
}
static void *unw_hdr_alloc(unsigned long sz)
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 77ff64a874a1..48e700151810 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -7,7 +7,7 @@
*
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <asm/processor.h>
@@ -123,7 +123,7 @@ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
pud_k = pud_offset(pgd_k, kvaddr);
pmd_k = pmd_offset(pud_k, kvaddr);
- pte_k = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd_k, pte_k);
return pte_k;
}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index ba145065c579..f8fe5668b30f 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/initrd.h>
@@ -218,7 +217,7 @@ void __init mem_init(void)
free_highmem_page(pfn_to_page(tmp));
#endif
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b8c6062ca0c1..91be74d8df65 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -82,7 +82,6 @@ config ARM
select HAVE_KERNEL_XZ
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if (HAVE_KPROBES)
- select HAVE_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
@@ -100,7 +99,6 @@ config ARM
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_REL
select NEED_DMA_MAP_STATE
- select NO_BOOTMEM
select OF_EARLY_FLATTREE if OF
select OF_RESERVED_MEM if OF
select OLD_SIGACTION
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index e5ad0708849a..c8e198631d41 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -7,6 +7,9 @@ config DMABOUNCE
bool
select ZONE_DMA
+config KRAIT_L2_ACCESSORS
+ bool
+
config SHARP_LOCOMO
bool
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 3157be413297..219a260bbe5f 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -7,6 +7,7 @@ obj-y += firmware.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
+obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
diff --git a/arch/arm/common/krait-l2-accessors.c b/arch/arm/common/krait-l2-accessors.c
new file mode 100644
index 000000000000..9a97ddadecd6
--- /dev/null
+++ b/arch/arm/common/krait-l2-accessors.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <asm/barrier.h>
+#include <asm/krait-l2-accessors.h>
+
+static DEFINE_RAW_SPINLOCK(krait_l2_lock);
+
+void krait_set_l2_indirect_reg(u32 addr, u32 val)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&krait_l2_lock, flags);
+ /*
+ * Select the L2 window by poking l2cpselr, then write to the window
+ * via l2cpdr.
+ */
+ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr));
+ isb();
+ asm volatile ("mcr p15, 3, %0, c15, c0, 7 @ l2cpdr" : : "r" (val));
+ isb();
+
+ raw_spin_unlock_irqrestore(&krait_l2_lock, flags);
+}
+EXPORT_SYMBOL(krait_set_l2_indirect_reg);
+
+u32 krait_get_l2_indirect_reg(u32 addr)
+{
+ u32 val;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&krait_l2_lock, flags);
+ /*
+ * Select the L2 window by poking l2cpselr, then read from the window
+ * via l2cpdr.
+ */
+ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr));
+ isb();
+ asm volatile ("mrc p15, 3, %0, c15, c0, 7 @ l2cpdr" : "=r" (val));
+
+ raw_spin_unlock_irqrestore(&krait_l2_lock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL(krait_get_l2_indirect_reg);
diff --git a/arch/arm/include/asm/krait-l2-accessors.h b/arch/arm/include/asm/krait-l2-accessors.h
new file mode 100644
index 000000000000..a5f2cdd6445f
--- /dev/null
+++ b/arch/arm/include/asm/krait-l2-accessors.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASMARM_KRAIT_L2_ACCESSORS_H
+#define __ASMARM_KRAIT_L2_ACCESSORS_H
+
+extern void krait_set_l2_indirect_reg(u32 addr, u32 val);
+extern u32 krait_get_l2_indirect_reg(u32 addr);
+
+#endif
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 1bf65b47808a..120f4c9bbfde 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -11,12 +11,6 @@
#ifndef __ASM_ARM_PROCESSOR_H
#define __ASM_ARM_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#ifdef __KERNEL__
#include <asm/hw_breakpoint.h>
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 13bcd3b867cb..e3057c1b55b9 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -12,7 +12,6 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 4c249cb261f3..ac7e08886863 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -16,7 +16,6 @@
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
#include <linux/of_platform.h>
@@ -857,7 +856,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
*/
boot_alias_start = phys_to_idmap(start);
if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
- res = memblock_virt_alloc(sizeof(*res), 0);
+ res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
res->end = phys_to_idmap(end);
@@ -865,7 +864,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
request_resource(&iomem_resource, res);
}
- res = memblock_virt_alloc(sizeof(*res), 0);
+ res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM";
res->start = start;
res->end = end;
diff --git a/arch/arm/mach-davinci/include/mach/clock.h b/arch/arm/mach-davinci/include/mach/clock.h
deleted file mode 100644
index 42ed4f2f5ce4..000000000000
--- a/arch/arm/mach-davinci/include/mach/clock.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-davinci/include/mach/clock.h
- *
- * Clock control driver for DaVinci - header file
- *
- * Authors: Vladimir Barinov <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#ifndef __ASM_ARCH_DAVINCI_CLOCK_H
-#define __ASM_ARCH_DAVINCI_CLOCK_H
-
-struct clk;
-
-int davinci_clk_reset_assert(struct clk *c);
-int davinci_clk_reset_deassert(struct clk *c);
-
-#endif
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 56a1fe90d394..083dcd9942ce 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -141,7 +141,7 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/platform_data/ti-sysc.h>
@@ -726,7 +726,7 @@ static int __init _setup_clkctrl_provider(struct device_node *np)
u64 size;
int i;
- provider = memblock_virt_alloc(sizeof(*provider), 0);
+ provider = memblock_alloc(sizeof(*provider), SMP_CACHE_BYTES);
if (!provider)
return -ENOMEM;
@@ -736,12 +736,14 @@ static int __init _setup_clkctrl_provider(struct device_node *np)
of_property_count_elems_of_size(np, "reg", sizeof(u32)) / 2;
provider->addr =
- memblock_virt_alloc(sizeof(void *) * provider->num_addrs, 0);
+ memblock_alloc(sizeof(void *) * provider->num_addrs,
+ SMP_CACHE_BYTES);
if (!provider->addr)
return -ENOMEM;
provider->size =
- memblock_virt_alloc(sizeof(u32) * provider->num_addrs, 0);
+ memblock_alloc(sizeof(u32) * provider->num_addrs,
+ SMP_CACHE_BYTES);
if (!provider->size)
return -ENOMEM;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 66566472c153..661fe48ab78d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -9,7 +9,6 @@
*
* DMA uncached mapping support.
*/
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/genalloc.h>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0cc8e04295a4..32e4845af2b6 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -11,7 +11,6 @@
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
@@ -508,7 +507,7 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
free_unused_memmap();
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e46a6a446cdd..f5cc1ccfea3d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -721,7 +721,7 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
{
- void *ptr = __va(memblock_alloc(sz, align));
+ void *ptr = __va(memblock_phys_alloc(sz, align));
memset(ptr, 0, sz);
return ptr;
}
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 785d2a562a23..cb44aa290e73 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -1,6 +1,5 @@
#include <linux/cpu.h>
#include <linux/dma-mapping.h>
-#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/export.h>
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 0641ba54ab62..e70a49fc8dcd 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -1,4 +1,4 @@
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/spinlock.h>
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 964f682a2b7b..787d7850e064 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -139,7 +139,6 @@ config ARM64
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP if NUMA
select HAVE_NMI
select HAVE_PATA_PLATFORM
@@ -161,7 +160,6 @@ config ARM64
select MULTI_IRQ_HANDLER
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
- select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index b4e994cd3a42..6cb9fc7e9382 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -134,6 +134,7 @@ vdso_install:
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
+ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
# In order to do that, we should use the archprepare target, but we can't since
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
@@ -143,6 +144,7 @@ archclean:
prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
+endif
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 9234013e759e..21a81b59a0cc 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -96,6 +96,7 @@ static inline unsigned long __percpu_##op(void *ptr, \
: [val] "Ir" (val)); \
break; \
default: \
+ ret = 0; \
BUILD_BUG(); \
} \
\
@@ -125,6 +126,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
ret = READ_ONCE(*(u64 *)ptr);
break;
default:
+ ret = 0;
BUILD_BUG();
}
@@ -194,6 +196,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
: [val] "r" (val));
break;
default:
+ ret = 0;
BUILD_BUG();
}
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 2bf6691371c2..3e2091708b8e 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -25,13 +25,6 @@
#define USER_DS (TASK_SIZE_64 - 1)
#ifndef __ASSEMBLY__
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#ifdef __KERNEL__
#include <linux/build_bug.h>
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index ed46dc188b22..44e3c351e1ea 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -16,7 +16,6 @@
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/cpumask.h>
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index 4f4f1815e047..eac1d0cc595c 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -18,7 +18,6 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
-#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
index f46d57c31443..6b5037ed15b2 100644
--- a/arch/arm64/kernel/crash_dump.c
+++ b/arch/arm64/kernel/crash_dump.c
@@ -58,7 +58,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
/**
* elfcorehdr_read - read from ELF core header
* @buf: buffer where the data is placed
- * @csize: number of bytes to read
+ * @count: number of bytes to read
* @ppos: address in the memory
*
* This function reads @count bytes from elf core header which exists
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 9b65132e789a..2a5b338b2542 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -23,7 +23,9 @@
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <linux/sched/debug.h>
+#include <linux/set_memory.h>
#include <linux/stringify.h>
+#include <linux/vmalloc.h>
#include <asm/traps.h>
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
@@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+{
+ void *addrs[1];
+ u32 insns[1];
+
+ addrs[0] = addr;
+ insns[0] = opcode;
+
+ return aarch64_insn_patch_text(addrs, insns, 1);
+}
+
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
/* prepare insn slot */
- p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
+ patch_text(p->ainsn.api.insn, p->opcode);
flush_icache_range((uintptr_t) (p->ainsn.api.insn),
(uintptr_t) (p->ainsn.api.insn) +
@@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
-static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+void *alloc_insn_page(void)
{
- void *addrs[1];
- u32 insns[1];
+ void *page;
- addrs[0] = (void *)addr;
- insns[0] = (u32)opcode;
+ page = vmalloc_exec(PAGE_SIZE);
+ if (page)
+ set_memory_ro((unsigned long)page, 1);
- return aarch64_insn_patch_text(addrs, insns, 1);
+ return page;
}
/* arm kprobe: install breakpoint in text */
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index ce99c58cd1f1..d9a4c2d6dd8b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -497,25 +497,3 @@ void arch_setup_new_exec(void)
{
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
}
-
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
-void __used stackleak_check_alloca(unsigned long size)
-{
- unsigned long stack_left;
- unsigned long current_sp = current_stack_pointer;
- struct stack_info info;
-
- BUG_ON(!on_accessible_stack(current, current_sp, &info));
-
- stack_left = current_sp - info.low;
-
- /*
- * There's a good chance we're almost out of stack space if this
- * is true. Using panic() over BUG() is more likely to give
- * reliable debugging output.
- */
- if (size >= stack_left)
- panic("alloca() over the kernel stack boundary\n");
-}
-EXPORT_SYMBOL(stackleak_check_alloca);
-#endif
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d0f62dd24c90..953e316521fc 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -26,7 +26,6 @@
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/cache.h>
-#include <linux/bootmem.h>
#include <linux/screen_info.h>
#include <linux/init.h>
#include <linux/kexec.h>
@@ -217,8 +216,9 @@ static void __init request_standard_resources(void)
kernel_data.end = __pa_symbol(_end - 1);
num_standard_resources = memblock.memory.cnt;
- standard_resources = alloc_bootmem_low(num_standard_resources *
- sizeof(*standard_resources));
+ standard_resources = memblock_alloc_low(num_standard_resources *
+ sizeof(*standard_resources),
+ SMP_CACHE_BYTES);
for_each_memblock(memory, region) {
res = &standard_resources[i++];
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d190612b8f33..a3ac26284845 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -19,7 +19,7 @@
#include <linux/gfp.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/cache.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -160,6 +160,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
__dma_unmap_area(phys_to_virt(paddr), size, dir);
}
+#ifdef CONFIG_IOMMU_DMA
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
struct page *page, size_t size)
{
@@ -188,6 +189,7 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
return ret;
}
+#endif /* CONFIG_IOMMU_DMA */
static int __init atomic_pool_init(void)
{
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3cf87341859f..9d9582cac6c4 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -22,7 +22,6 @@
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/cache.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
@@ -536,7 +535,7 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
* memmap array.
*/
if (pg < pgend)
- free_bootmem(pg, pgend - pg);
+ memblock_free(pg, pgend - pg);
}
/*
@@ -599,7 +598,7 @@ void __init mem_init(void)
free_unused_memmap();
#endif
/* this will put all unused low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
kexec_reserve_crashkres_pages();
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index fccb1a6f8c6f..63527e585aac 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -11,7 +11,6 @@
*/
#define pr_fmt(fmt) "kasan: " fmt
-#include <linux/bootmem.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/sched/task.h>
@@ -38,7 +37,7 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
- void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
+ void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE, node);
return __pa(p);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 9498c15b847b..394b8d554def 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -101,7 +101,7 @@ static phys_addr_t __init early_pgtable_alloc(void)
phys_addr_t phys;
void *ptr;
- phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
/*
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index d7b66fc5e1c5..27a31efd9e8e 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -20,7 +20,6 @@
#define pr_fmt(fmt) "NUMA: " fmt
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -168,7 +167,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
{
int nid = early_cpu_to_node(cpu);
- return memblock_virt_alloc_try_nid(size, align,
+ return memblock_alloc_try_nid(size, align,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
@@ -237,7 +236,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
- nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);
/* report and initialize */
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index f65a084607fd..84420109113d 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -13,7 +13,6 @@ config C6X
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
- select HAVE_MEMBLOCK
select SPARSE_IRQ
select IRQ_DOMAIN
select OF
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index 8f7cce829f8e..a8581f5b27f6 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -18,17 +18,6 @@
#include <asm/current.h>
/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() \
-({ \
- void *__pc; \
- asm("mvc .S2 pce1,%0\n" : "=b"(__pc)); \
- __pc; \
-})
-
-/*
* User space process size. This is mostly meaningless for NOMMU
* but some C6X processors may have RAM addresses up to 0xFFFFFFFF.
* Since calls like mmap() can return an address or an error, we
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
index 05d96a9541b5..e9d6824ae94d 100644
--- a/arch/c6x/kernel/setup.c
+++ b/arch/c6x/kernel/setup.c
@@ -11,7 +11,6 @@
#include <linux/dma-mapping.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
#include <linux/clkdev.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
@@ -291,7 +290,6 @@ notrace void __init machine_init(unsigned long dt_ptr)
void __init setup_arch(char **cmdline_p)
{
- int bootmap_size;
struct memblock_region *reg;
printk(KERN_INFO "Initializing kernel\n");
@@ -348,16 +346,6 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = memory_start;
init_mm.brk = memory_start;
- /*
- * Give all the memory to the bootmap allocator, tell it to put the
- * boot mem_map at the start of memory
- */
- bootmap_size = init_bootmem_node(NODE_DATA(0),
- memory_start >> PAGE_SHIFT,
- PAGE_OFFSET >> PAGE_SHIFT,
- memory_end >> PAGE_SHIFT);
- memblock_reserve(memory_start, bootmap_size);
-
unflatten_and_copy_device_tree();
c6x_cache_init();
@@ -392,22 +380,9 @@ void __init setup_arch(char **cmdline_p)
/* Initialize the coherent memory allocator */
coherent_mem_init(dma_start, dma_size);
- /*
- * Free all memory as a starting point.
- */
- free_bootmem(PAGE_OFFSET, memory_end - PAGE_OFFSET);
-
- /*
- * Then reserve memory which is already being used.
- */
- for_each_memblock(reserved, reg) {
- pr_debug("reserved - 0x%08x-0x%08x\n",
- (u32) reg->base, (u32) reg->size);
- reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
- }
-
max_low_pfn = PFN_DOWN(memory_end);
min_low_pfn = PFN_UP(memory_start);
+ max_pfn = max_low_pfn;
max_mapnr = max_low_pfn - min_low_pfn;
/* Get kmalloc into gear */
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index d0a8e0c4b27e..01305c787201 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -135,8 +135,8 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
if (dma_size & (PAGE_SIZE - 1))
++dma_pages;
- bitmap_phys = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
- sizeof(long));
+ bitmap_phys = memblock_phys_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
+ sizeof(long));
dma_bitmap = phys_to_virt(bitmap_phys);
memset(dma_bitmap, 0, dma_pages * PAGE_SIZE);
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index 4cc72b0d1c1d..af5ada0520be 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -11,7 +11,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#ifdef CONFIG_BLK_DEV_RAM
#include <linux/blkdev.h>
#endif
@@ -38,7 +38,8 @@ void __init paging_init(void)
struct pglist_data *pgdat = NODE_DATA(0);
unsigned long zones_size[MAX_NR_ZONES] = {0, };
- empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE,
+ PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
/*
@@ -61,7 +62,7 @@ void __init mem_init(void)
high_memory = (void *)(memory_end & PAGE_MASK);
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 0a0558567eaa..cb64f8dacd08 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -36,10 +36,8 @@ config CSKY
select HAVE_C_RECORDMCOUNT
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
- select HAVE_MEMBLOCK
select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA if MODULES
- select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
diff --git a/arch/csky/Kconfig.debug b/arch/csky/Kconfig.debug
index 48cf6ff9df4a..22a162cd99e8 100644
--- a/arch/csky/Kconfig.debug
+++ b/arch/csky/Kconfig.debug
@@ -1,9 +1 @@
-menu "C-SKY Debug Options"
-config CSKY_BUILTIN_DTB
- string "Use kernel builtin dtb"
- help
- User could define the dtb instead of the one which is passed from
- bootloader.
- Sometimes for debug, we want to use a built-in dtb and then we needn't
- modify bootloader at all.
-endmenu
+# dummy file, do not delete
diff --git a/arch/csky/Makefile b/arch/csky/Makefile
index 67a4ae1fba2b..c639fc167895 100644
--- a/arch/csky/Makefile
+++ b/arch/csky/Makefile
@@ -65,26 +65,15 @@ libs-y += arch/csky/lib/ \
$(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
boot := arch/csky/boot
-ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""'
core-y += $(boot)/dts/
-endif
all: zImage
-
-dtbs: scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts
-
-%.dtb %.dtb.S %.dtb.o: scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
-
-zImage Image uImage: vmlinux dtbs
+zImage Image uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
- $(Q)$(MAKE) $(clean)=$(boot)/dts
- rm -rf arch/csky/include/generated
define archhelp
echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)'
diff --git a/arch/csky/boot/dts/Makefile b/arch/csky/boot/dts/Makefile
index 305e81a5e91e..c57ad3c880bf 100644
--- a/arch/csky/boot/dts/Makefile
+++ b/arch/csky/boot/dts/Makefile
@@ -1,13 +1,3 @@
dtstree := $(srctree)/$(src)
-ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""'
-builtindtb-y := $(patsubst "%",%,$(CONFIG_CSKY_BUILTIN_DTB))
-dtb-y += $(builtindtb-y).dtb
-obj-y += $(builtindtb-y).dtb.o
-.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
-else
dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
-endif
-
-always += $(dtb-y)
-clean-files += *.dtb *.dtb.S
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 5ad4f0b83092..b1748659b2e9 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -4,12 +4,6 @@
#ifndef __ASM_CSKY_PROCESSOR_H
#define __ASM_CSKY_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
#include <linux/bitops.h>
#include <asm/segment.h>
#include <asm/ptrace.h>
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index a5e3ab1d5360..dff8b89444ec 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -3,7 +3,6 @@
#include <linux/console.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index e168ac087ccb..53b1bfa4c462 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -4,7 +4,7 @@
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/smp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -140,7 +140,7 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(pte)));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index ce2711e050ad..dc07c078f9b8 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -14,7 +14,6 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/swap.h>
@@ -47,7 +46,7 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_HIGHMEM
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 0b334b671e90..d19c6b16cd5d 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -15,8 +15,6 @@ config H8300
select OF
select OF_IRQ
select OF_EARLY_FLATTREE
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
select TIMER_OF
select H8300_TMR8
select HAVE_KERNEL_GZIP
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
index 985346393e4a..a060b41b2d31 100644
--- a/arch/h8300/include/asm/processor.h
+++ b/arch/h8300/include/asm/processor.h
@@ -12,12 +12,6 @@
#ifndef __ASM_H8300_PROCESSOR_H
#define __ASM_H8300_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
#include <linux/compiler.h>
#include <asm/segment.h>
#include <asm/ptrace.h>
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 34e2df5c0d6d..b32bfa1fe99e 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -18,7 +18,6 @@
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/of.h>
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 015287ac8ce8..6519252ac4db 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -30,7 +30,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <asm/setup.h>
@@ -67,7 +67,7 @@ void __init paging_init(void)
* Initialize the bad page table and bad page to point
* to a couple of allocated pages.
*/
- empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
/*
@@ -96,7 +96,7 @@ void __init mem_init(void)
max_mapnr = MAP_NR(high_memory);
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 7b25d7c8fa49..2b688af379e6 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -21,9 +21,7 @@ config HEXAGON
select GENERIC_IRQ_SHOW
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
- select HAVE_MEMBLOCK
select ARCH_DISCARD_MEMBLOCK
- select NO_BOOTMEM
select NEED_SG_DMA_LENGTH
select NO_IOPORT_MAP
select GENERIC_IOMAP
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
index ce67940860a5..227bcb9cfdac 100644
--- a/arch/hexagon/include/asm/processor.h
+++ b/arch/hexagon/include/asm/processor.h
@@ -27,9 +27,6 @@
#include <asm/registers.h>
#include <asm/hexagon_vm.h>
-/* must be a macro */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
/* task_struct, defined elsewhere, is the "process descriptor" */
struct task_struct;
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 706699374444..38eaa7b703e7 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -19,7 +19,7 @@
*/
#include <linux/dma-noncoherent.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/genalloc.h>
#include <linux/module.h>
#include <asm/page.h>
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index dc8c7e75b5d1..b3c3e04d4e57 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -20,7 +20,7 @@
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index d789b9cc0189..1719ede9e9bd 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -20,7 +20,6 @@
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <asm/atomic.h>
#include <linux/highmem.h>
@@ -68,7 +67,7 @@ unsigned long long kmap_generation;
void __init mem_init(void)
{
/* No idea where this is actually declared. Seems to evade LXR. */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
/*
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 8b4a0c1748c0..36773def6920 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,9 +26,7 @@ config IA64
select HAVE_FUNCTION_TRACER
select TTY
select HAVE_ARCH_TRACEHOOK
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select NO_BOOTMEM
select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_SG_CHAIN
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 10061ccf0440..c91ef98ed6bf 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -602,12 +602,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
*unat = (*unat & ~mask) | (nat << bit);
}
-/*
- * Get the current instruction/program counter value.
- */
-#define current_text_addr() \
- ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
-
static inline __u64
ia64_get_ivr (void)
{
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 39f4433a6f0e..bec762a9b418 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -12,7 +12,7 @@
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/kexec.h>
#include <linux/elfcore.h>
#include <linux/sysctl.h>
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index f77d80edddfe..8f106638913c 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -23,7 +23,7 @@
* Skip non-WB memory and ignore empty memory ranges.
*/
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 6b51c88e3578..b49fe6f618ed 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -6,7 +6,7 @@
#ifdef CONFIG_VIRTUAL_MEM_MAP
#include <linux/compiler.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */
EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */
#endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 550243a94b5d..fe6e4946672e 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -90,7 +90,7 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/delay.h>
#include <asm/hw_irq.h>
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6115464d5f03..91bd1e129379 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -77,7 +77,7 @@
#include <linux/sched/task.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/module.h>
@@ -361,9 +361,9 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
#define IA64_LOG_ALLOCATE(it, size) \
{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
- (ia64_err_rec_t *)alloc_bootmem(size); \
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \
ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
- (ia64_err_rec_t *)alloc_bootmem(size);}
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);}
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
@@ -1835,8 +1835,8 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
/* Caller prevents this from being called after init */
static void * __ref mca_bootmem(void)
{
- return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
- KERNEL_STACK_SIZE, 0);
+ return memblock_alloc_from(sizeof(struct ia64_mca_cpu),
+ KERNEL_STACK_SIZE, 0);
}
/* Do per-CPU MCA-related initialization. */
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index dfe40cbdf3b3..45f956ad715a 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kallsyms.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/module.h>
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 0e6c2d9fb498..583a3746d70b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -27,7 +27,6 @@
#include <linux/init.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/cpu.h>
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 9a960829a01d..99099f73b207 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -344,10 +344,10 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
get_signal(&ksig);
/*
- * get_signal_to_deliver() may have run a debugger (via notify_parent())
+ * get_signal() may have run a debugger (via notify_parent())
* and the debugger may have modified the state (e.g., to arrange for an
* inferior call), thus it's important to check for restarting _after_
- * get_signal_to_deliver().
+ * get_signal().
*/
if ((long) scr->pt.r10 != -1)
/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 74fe317477e6..51ec944b036c 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -24,7 +24,7 @@
#include <linux/module.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/init.h>
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 9b820f7a6a98..e311ee13e61d 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -19,7 +19,7 @@
#include <linux/node.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/nodemask.h>
#include <linux/notifier.h>
#include <linux/export.h>
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index e04efa088902..7601fe0622d2 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -28,7 +28,7 @@
* acquired, then the read-write lock must be acquired first.
*/
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index e2e40bbd391c..6e447234205c 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -14,7 +14,6 @@
* Routines used by ia64 machines with contiguous (or virtually contiguous)
* memory.
*/
-#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/mm.h>
@@ -85,8 +84,9 @@ skip:
static inline void
alloc_per_cpu_data(void)
{
- cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
- PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(),
+ PERCPU_PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS));
}
/**
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 1928d5719e41..8a965784340c 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -19,7 +19,6 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/efi.h>
@@ -451,8 +450,10 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
if (bestnode == -1)
bestnode = anynode;
- ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
- PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+ ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ bestnode);
return ptr;
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 3b85c3ecac38..d5e12ff1d73c 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/memblock.h>
@@ -447,19 +446,19 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
if (pgd_none(*pgd))
- pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ pgd_populate(&init_mm, pgd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
pud = pud_offset(pgd, address);
if (pud_none(*pud))
- pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ pud_populate(&init_mm, pud, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
- pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ pmd_populate_kernel(&init_mm, pmd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
pte = pte_offset_kernel(pmd, address);
if (pte_none(*pte))
- set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
+ set_pte(pte, pfn_pte(__pa(memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)) >> PAGE_SHIFT,
PAGE_KERNEL));
}
return 0;
@@ -627,7 +626,7 @@ mem_init (void)
set_max_mapnr(max_low_pfn);
high_memory = __va(max_low_pfn * PAGE_SIZE);
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
/*
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index aa19b7ac8222..3861d6e32d5f 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -15,7 +15,7 @@
#include <linux/mm.h>
#include <linux/node.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <asm/mmzone.h>
#include <asm/numa.h>
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index acf10eb9da15..9340bcb4f29c 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -21,7 +21,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <asm/delay.h>
@@ -59,8 +59,10 @@ struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
void __init
mmu_context_init (void)
{
- ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
- ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
+ ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
+ SMP_CACHE_BYTES);
+ ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
+ SMP_CACHE_BYTES);
}
/*
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 5d71800df431..196a0dd7ff97 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -20,7 +20,7 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <asm/machvec.h>
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index 9146192b86f5..9900e6d4add6 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -16,7 +16,7 @@
#include <asm/nodedata.h>
#include <asm/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 102aabad6d20..8df13d0d96fa 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -6,7 +6,7 @@
* Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/sn/types.h>
@@ -385,16 +385,15 @@ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
{
struct hubdev_info *hubdev_info;
int size;
- pg_data_t *pg;
size = sizeof(struct hubdev_info);
if (node >= num_online_nodes()) /* Headless/memless IO nodes */
- pg = NODE_DATA(0);
- else
- pg = NODE_DATA(node);
+ node = 0;
- hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
+ hubdev_info = (struct hubdev_info *)memblock_alloc_node(size,
+ SMP_CACHE_BYTES,
+ node);
npda->pdinfo = (void *)hubdev_info;
}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 5f6b6b48c1d5..a6d40a2c5bff 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -20,7 +20,7 @@
#include <linux/mm.h>
#include <linux/serial.h>
#include <linux/irq.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/interrupt.h>
#include <linux/acpi.h>
@@ -511,7 +511,8 @@ static void __init sn_init_pdas(char **cmdline_p)
*/
for_each_online_node(cnode) {
nodepdaindr[cnode] =
- alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
+ memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES,
+ cnode);
memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid));
spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
@@ -522,7 +523,7 @@ static void __init sn_init_pdas(char **cmdline_p)
*/
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++)
nodepdaindr[cnode] =
- alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
+ memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0);
/*
* Now copy the array of nodepda pointers to each nodepda.
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index c7b2a8d60a41..1bc9f1ba759a 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -27,9 +27,7 @@ config M68K
select OLD_SIGSUSPEND3
select OLD_SIGACTION
select DMA_DIRECT_OPS if HAS_DMA
- select HAVE_MEMBLOCK
select ARCH_DISCARD_MEMBLOCK
- select NO_BOOTMEM
config CPU_BIG_ENDIAN
def_bool y
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index c83d66442612..6ffc204eb07d 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mount.h>
#include <linux/blkdev.h>
#include <linux/module.h>
@@ -95,7 +95,8 @@ void __init atari_stram_reserve_pages(void *start_mem)
{
if (kernel_in_stram) {
pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n");
- stram_pool.start = (resource_size_t)alloc_bootmem_low_pages(pool_size);
+ stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size,
+ PAGE_SIZE);
stram_pool.end = stram_pool.start + pool_size - 1;
request_resource(&iomem_resource, &stram_pool);
stram_virt_offset = 0;
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index adad03ca6e11..360c723c0ae6 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/clk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 464e9f5f50ee..3750819ac5a1 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -8,12 +8,6 @@
#ifndef __ASM_M68K_PROCESSOR_H
#define __ASM_M68K_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#include <linux/thread_info.h>
#include <asm/segment.h>
#include <asm/fpu.h>
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 5d3596c180f9..a1a3eaeaf58c 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index cfd5475bfc31..3c5def10d486 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -27,7 +27,6 @@
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/init.h>
diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
index 107082877064..1b4c562753da 100644
--- a/arch/m68k/kernel/uboot.c
+++ b/arch/m68k/kernel/uboot.c
@@ -16,7 +16,7 @@
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/initrd.h>
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 38e2b272c220..933c33e76a48 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -17,7 +17,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <asm/setup.h>
@@ -93,7 +93,7 @@ void __init paging_init(void)
high_memory = (void *) end_mem;
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
@@ -140,7 +140,7 @@ static inline void init_pointer_tables(void)
void __init mem_init(void)
{
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
init_pointer_tables();
mem_init_print_info(NULL);
}
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index f5453d944ff5..0de4999a3810 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -13,7 +13,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <asm/setup.h>
@@ -44,7 +43,7 @@ void __init paging_init(void)
enum zone_type zone;
int i;
- empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
memset((void *) empty_zero_page, 0, PAGE_SIZE);
pg_dir = swapper_pg_dir;
@@ -52,7 +51,7 @@ void __init paging_init(void)
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
- next_pgtable = (unsigned long) alloc_bootmem_pages(size);
+ next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 4e17ecb5928a..7497cf30bf1c 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -18,7 +18,6 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
@@ -55,7 +54,7 @@ static pte_t * __init kernel_page_table(void)
{
pte_t *ptablep;
- ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
clear_page(ptablep);
__flush_page_to_ram(ptablep);
@@ -95,7 +94,8 @@ static pmd_t * __init kernel_ptr_table(void)
last_pgtable += PTRS_PER_PMD;
if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
- last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
clear_page(last_pgtable);
__flush_page_to_ram(last_pgtable);
@@ -275,7 +275,7 @@ void __init paging_init(void)
* initialize the bad page table and bad page to point
* to a couple of allocated pages
*/
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index 4a9979908357..f736db48a2e1 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -16,7 +16,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
@@ -45,7 +45,7 @@ void __init paging_init(void)
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long size;
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
address = PAGE_OFFSET;
pg_dir = swapper_pg_dir;
@@ -55,7 +55,7 @@ void __init paging_init(void)
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
- next_pgtable = (unsigned long)alloc_bootmem_pages(size);
+ next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
/* Map whole memory from PAGE_OFFSET (0x0E000000) */
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 79a2bb857906..542c4404861c 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -15,7 +15,7 @@
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/platform_device.h>
#include <asm/oplib.h>
diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c
index 5f92c72b05c3..a2c1c9304895 100644
--- a/arch/m68k/sun3/dvma.c
+++ b/arch/m68k/sun3/dvma.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/list.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index d30da12a1702..582a1284059a 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/sched/mm.h>
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index 8546922adb47..4d64711d3d47 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -7,7 +7,7 @@
* Contains common routines for sun3/sun3x DVMA management.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -267,7 +267,8 @@ void __init dvma_init(void)
list_add(&(hole->list), &hole_list);
- iommu_use = alloc_bootmem(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
+ iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
+ SMP_CACHE_BYTES);
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c
index b2acbc862f60..89e630e66555 100644
--- a/arch/m68k/sun3x/dvma.c
+++ b/arch/m68k/sun3x/dvma.c
@@ -15,7 +15,7 @@
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <asm/sun3x.h>
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 164a4857737a..effed2efd306 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,8 +28,6 @@ config MICROBLAZE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
- select NO_BOOTMEM
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_OPROFILE
select IRQ_DOMAIN
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 330d556860ba..66b537b8d138 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -46,12 +46,6 @@ extern void ret_from_kernel_thread(void);
# define TASK_SIZE (0x81000000 - 0x80000000)
/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-# define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
-/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's. We won't be using it
*/
@@ -92,12 +86,6 @@ extern unsigned long get_wchan(struct task_struct *p);
# ifndef __ASSEMBLY__
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-# define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
/* If you change this, you must change the associated assembly-languages
* constants defined below, THREAD_*.
*/
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index d801cc5f5b95..45e0a1aa9357 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -28,7 +28,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index df6de7ccdc2e..b17fd8aafd64 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -7,10 +7,9 @@
* for more details.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/memblock.h>
#include <linux/mm.h> /* mem_init */
#include <linux/initrd.h>
#include <linux/pagemap.h>
@@ -204,7 +203,7 @@ void __init mem_init(void)
high_memory = (void *)__va(memory_start + lowmem_size - 1);
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_HIGHMEM
highmem_setup();
#endif
@@ -377,7 +376,7 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
if (mem_init_done)
p = kzalloc(size, mask);
else {
- p = alloc_bootmem(size);
+ p = memblock_alloc(size, SMP_CACHE_BYTES);
if (p)
memset(p, 0, size);
}
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 2ffd171af8b6..6b89a66ec1a5 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -20,7 +20,7 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/shmem_fs.h>
#include <linux/list.h>
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 80778b40f8fa..8272ea4c7264 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -60,7 +60,6 @@ config MIPS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KPROBES
select HAVE_KRETPROBES
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
@@ -78,7 +77,6 @@ config MIPS
select RTC_LIB
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS
- select NO_BOOTMEM
menu "Machine selection"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 15a84cfd0719..68410490e12f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -128,7 +128,7 @@ cflags-y += -ffreestanding
# clang's output will be based upon the build machine. So for clang we simply
# unconditionally specify -EB or -EL as appropriate.
#
-ifeq ($(cc-name),clang)
+ifdef CONFIG_CC_IS_CLANG
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -EL
else
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
index 0332f0514d05..80390a9ec264 100644
--- a/arch/mips/ar7/memory.c
+++ b/arch/mips/ar7/memory.c
@@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/pfn.h>
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 4c7a93f4039a..9728abcb18fa 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index 7019e2967009..77a836e661c9 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -7,7 +7,7 @@
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/smp.h>
#include <asm/bootinfo.h>
#include <asm/bmips.h>
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 2be9caaa2085..e28ee9a7cc7e 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/pm.h>
#include <asm/bootinfo.h>
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 6329c5f780d6..1738a06396f9 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/bitops.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/clk-provider.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 236833be6fbe..e8eb60ed99f2 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -11,7 +11,7 @@
* Copyright (C) 2010 Cavium Networks, Inc.
*/
#include <linux/dma-direct.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/swiotlb.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -244,7 +244,7 @@ void __init plat_swiotlb_setup(void)
swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
- octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
+ octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
panic("Cannot allocate SWIOTLB buffer");
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index a2acc6454cf3..5073d2ed78bb 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -8,7 +8,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/types.h>
#include <asm/addrspace.h>
diff --git a/arch/mips/emma/common/prom.c b/arch/mips/emma/common/prom.c
index cae42259d6da..675337b8a4a0 100644
--- a/arch/mips/emma/common/prom.c
+++ b/arch/mips/emma/common/prom.c
@@ -22,7 +22,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index dd9496f26e6a..429b7f8d2aeb 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -17,7 +17,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/swap.h>
#include <asm/sgialib.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index c373eb605040..ce3ed4d17813 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -24,11 +24,6 @@
#include <asm/prefetch.h>
/*
- * Return current * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-/*
* System setup and hardware flags..
*/
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 0a0aaf39fd16..4c41ed0a637e 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -13,7 +13,7 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <linux/dma-direct.h>
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 2c7288041a99..81845ba04835 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -3,7 +3,7 @@
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/irq.h>
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index a8657d29c62e..01b2bd95ba1f 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 89950b7bf536..93b8e0b4332f 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -12,7 +12,7 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 01a5ff4c41ff..ea09ed6a80a9 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -15,7 +15,6 @@
#include <linux/export.h>
#include <linux/screen_info.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
@@ -561,7 +560,7 @@ static void __init bootmem_init(void)
extern void show_kernel_relocation(const char *level);
offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
- free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
+ memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
/*
@@ -859,7 +858,7 @@ static void __init arch_mem_init(char **cmdline_p)
* Prevent memblock from allocating high memory.
* This cannot be done before max_low_pfn is detected, so up
* to this point is possible to only reserve physical memory
- * with memblock_reserve; memblock_virt_alloc* can be used
+ * with memblock_reserve; memblock_alloc* can be used
* only after this point
*/
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
@@ -917,7 +916,7 @@ static void __init resource_init(void)
if (end >= HIGHMEM_START)
end = HIGHMEM_START - 1;
- res = alloc_bootmem(sizeof(struct resource));
+ res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
res->start = start;
res->end = end;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 5feef28deac8..0f852e1b5891 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -28,7 +28,6 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
@@ -2263,7 +2262,7 @@ void __init trap_init(void)
memblock_set_bottom_up(true);
ebase = (unsigned long)
- __alloc_bootmem(size, 1 << fls(size), 0);
+ memblock_alloc_from(size, 1 << fls(size), 0);
memblock_set_bottom_up(false);
/*
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 0bef238d2c0c..6176b9acba95 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -26,7 +26,7 @@
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
index f43629979a0e..5812e6145801 100644
--- a/arch/mips/kvm/commpage.c
+++ b/arch/mips/kvm/commpage.c
@@ -14,7 +14,7 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
index f8e772564d74..d77b61b3d6ee 100644
--- a/arch/mips/kvm/dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -16,7 +16,7 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include "commpage.h"
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 4144bfaef137..ec9ed23bca7f 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -15,7 +15,7 @@
#include <linux/kvm_host.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/random.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index aa0a1a00faf6..7257e8b6f5a9 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index f7ea8e21656b..1fcc4d149054 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -18,7 +18,7 @@
#include <linux/vmalloc.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/fpu.h>
#include <asm/page.h>
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index d984bd5c2ec5..14d4c5e2b42f 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -8,7 +8,7 @@
#include <linux/export.h>
#include <linux/clk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 37b8fc5b9ac9..5ce1407de2d5 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -8,7 +8,7 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <asm/bootinfo.h>
#include <asm/lasat/lasat.h>
diff --git a/arch/mips/loongson64/common/init.c b/arch/mips/loongson64/common/init.c
index 6ef17120722f..c073fbcb9805 100644
--- a/arch/mips/loongson64/common/init.c
+++ b/arch/mips/loongson64/common/init.c
@@ -8,7 +8,7 @@
* option) any later version.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/bootinfo.h>
#include <asm/traps.h>
#include <asm/smp-ops.h>
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index c1e6ec52c614..622761878cd1 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -18,7 +18,6 @@
#include <linux/nodemask.h>
#include <linux/swap.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/highmem.h>
#include <asm/page.h>
@@ -272,7 +271,7 @@ void __init paging_init(void)
void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
mem_init_print_info(NULL);
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 15cae0f11880..b521d8e2d359 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -22,7 +22,7 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
@@ -243,7 +243,8 @@ void __init fixrange_init(unsigned long start, unsigned long end,
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
@@ -462,7 +463,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
maar_init();
- free_all_bootmem();
+ memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
mem_init_free_highmem();
mem_init_print_info(NULL);
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index b19a3c506b1e..e2a33adc0f29 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -7,7 +7,7 @@
*/
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index a47556723b85..868921adef1d 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -12,7 +12,7 @@
* Steven J. Hill <sjhill@mips.com>
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
index b5ba83f4c646..c856f2a3ea42 100644
--- a/arch/mips/netlogic/xlp/dt.c
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -33,7 +33,7 @@
*/
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 3c3b1e6abb53..687513880fbf 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -11,7 +11,7 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index c2e94cf5ecda..e68b44b27c0d 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -11,7 +11,7 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 1ada8492733b..d544e7b07f7a 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -14,7 +14,7 @@
#include <linux/sizes.h>
#include <linux/of_fdt.h>
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
index 6484e4a4597b..361a690facbf 100644
--- a/arch/mips/rb532/prom.c
+++ b/arch/mips/rb532/prom.c
@@ -29,7 +29,7 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/blkdev.h>
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 6f7bef052b7f..d8b8444d6795 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -18,7 +18,6 @@
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/swap.h>
-#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/highmem.h>
#include <asm/page.h>
@@ -475,7 +474,7 @@ void __init paging_init(void)
void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
mem_init_print_info(NULL);
}
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 092fb2a6ec4a..12a780f251e1 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -21,7 +21,7 @@
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/blkdev.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pm.h>
#include <linux/smp.h>
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index 152ca71cc2d7..3b034b7178d6 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -23,7 +23,7 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/arch/mips/txx9/rbtx4938/prom.c b/arch/mips/txx9/rbtx4938/prom.c
index bcb469247e8c..2b36a2ee744c 100644
--- a/arch/mips/txx9/rbtx4938/prom.c
+++ b/arch/mips/txx9/rbtx4938/prom.c
@@ -11,7 +11,7 @@
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/bootinfo.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/rbtx4938.h>
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 34605ca21498..58a0315ad743 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -10,7 +10,7 @@ ccflags-vdso := \
$(filter -march=%,$(KBUILD_CFLAGS)) \
-D__VDSO__
-ifeq ($(cc-name),clang)
+ifdef CONFIG_CC_IS_CLANG
ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
endif
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 56992330026a..7a04adacb2f0 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -29,14 +29,12 @@ config NDS32
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_KMEMLEAK
- select HAVE_MEMBLOCK
select HAVE_REGS_AND_STACK_ACCESS_API
select IRQ_DOMAIN
select LOCKDEP_SUPPORT
select MODULES_USE_ELF_RELA
select OF
select OF_EARLY_FLATTREE
- select NO_BOOTMEM
select NO_IOPORT_MAP
select RTC_LIB
select THREAD_INFO_IN_TASK
diff --git a/arch/nds32/include/asm/processor.h b/arch/nds32/include/asm/processor.h
index 9c83caf4269f..c2660f566bac 100644
--- a/arch/nds32/include/asm/processor.h
+++ b/arch/nds32/include/asm/processor.h
@@ -4,12 +4,6 @@
#ifndef __ASM_NDS32_PROCESSOR_H
#define __ASM_NDS32_PROCESSOR_H
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
#ifdef __KERNEL__
#include <asm/ptrace.h>
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index 63a1a5ef5219..eacc79024879 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -2,9 +2,8 @@
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/cpu.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
#include <linux/memblock.h>
+#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/delay.h>
diff --git a/arch/nds32/mm/highmem.c b/arch/nds32/mm/highmem.c
index e17cb8a69315..022779af6148 100644
--- a/arch/nds32/mm/highmem.c
+++ b/arch/nds32/mm/highmem.c
@@ -6,7 +6,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index c713d2ad55dc..131104bd2538 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -7,12 +7,11 @@
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
-#include <linux/memblock.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -81,7 +80,7 @@ static void __init map_ram(void)
}
/* Alloc one page for holding PTE's... */
- pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
memset(pte, 0, PAGE_SIZE);
set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
@@ -114,7 +113,7 @@ static void __init fixedrange_init(void)
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
- fixmap_pmd_p = (pmd_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ fixmap_pmd_p = (pmd_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
memset(fixmap_pmd_p, 0, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
@@ -127,7 +126,7 @@ static void __init fixedrange_init(void)
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
- pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
memset(pte, 0, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
pkmap_page_table = pte;
@@ -153,7 +152,7 @@ void __init paging_init(void)
fixedrange_init();
/* allocate space for empty_zero_page */
- zero_page = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ zero_page = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
memset(zero_page, 0, PAGE_SIZE);
zone_sizes_init();
@@ -192,7 +191,7 @@ void __init mem_init(void)
free_highmem();
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 2df0c57f2833..7e95506e957a 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -23,9 +23,7 @@ config NIOS2
select SPARSE_IRQ
select USB_ARCH_HAS_HCD if USB_SUPPORT
select CPU_NO_EFFICIENT_FFS
- select HAVE_MEMBLOCK
select ARCH_DISCARD_MEMBLOCK
- select NO_BOOTMEM
config GENERIC_CSUM
def_bool y
diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h
index 4944e2e1d8b0..94bcb86f679f 100644
--- a/arch/nios2/include/asm/processor.h
+++ b/arch/nios2/include/asm/processor.h
@@ -38,12 +38,6 @@
#define KUSER_SIZE (PAGE_SIZE)
#ifndef __ASSEMBLY__
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
# define TASK_SIZE 0x7FFF0000UL
# define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index a6d4f7530247..232a36b511aa 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -25,7 +25,7 @@
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/io.h>
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 2d0011ddd4d5..6bbd4ae2beb0 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -16,7 +16,6 @@
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index c92fe4234009..16cea5776b87 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -23,7 +23,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/binfmts.h>
@@ -73,7 +73,7 @@ void __init mem_init(void)
high_memory = __va(end_mem);
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index a655ae280637..285f7d05c8ed 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -12,7 +12,6 @@ config OPENRISC
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
select HANDLE_DOMAIN_IRQ
- select HAVE_MEMBLOCK
select GPIOLIB
select HAVE_ARCH_TRACEHOOK
select SPARSE_IRQ
@@ -32,7 +31,6 @@ config OPENRISC
select HAVE_DEBUG_STACKOVERFLOW
select OR1K_PIC
select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
- select NO_BOOTMEM
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_RWLOCKS
select OMPIC if SMP
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index af31a9fe736a..351d3aed7a06 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -30,11 +30,6 @@
| SPR_SR_DCE | SPR_SR_SM)
#define USER_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
| SPR_SR_DCE | SPR_SR_IEE | SPR_SR_TEE)
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
/*
* User space process size. This is hardcoded into a few places,
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index e17fcd83120f..c605bdad1746 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -30,13 +30,12 @@
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
#include <linux/of.h>
-#include <linux/memblock.h>
#include <linux/device.h>
#include <asm/sections.h>
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 6972d5d6f23f..d157310eb377 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -26,12 +26,11 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/blkdev.h> /* for initrd_* */
#include <linux/pagemap.h>
-#include <linux/memblock.h>
#include <asm/segment.h>
#include <asm/pgalloc.h>
@@ -106,7 +105,7 @@ static void __init map_ram(void)
}
/* Alloc one page for holding PTE's... */
- pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
/* Fill the newly allocated page with PTE'S */
@@ -213,7 +212,7 @@ void __init mem_init(void)
memset((void *)empty_zero_page, 0, PAGE_SIZE);
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 2175e4bfd9fc..c9697529b3f0 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -126,7 +126,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm,
if (likely(mem_init_done)) {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
} else {
- pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
}
if (pte)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index f1cd12afd943..92a339ee28b3 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -15,8 +15,6 @@ config PARISC
select RTC_CLASS
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
select BUG
select BUILDTIME_EXTABLE_SORT
select HAVE_PERF_EVENTS
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 2bd5e695bdad..6e2a8176b0dd 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -20,17 +20,6 @@
#include <asm/percpu.h>
#endif /* __ASSEMBLY__ */
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#ifdef CONFIG_PA20
-#define current_ia(x) __asm__("mfia %0" : "=r"(x))
-#else /* mfia added in pa2.0 */
-#define current_ia(x) __asm__("blr 0,%0\n\tnop" : "=r"(x))
-#endif
-#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
-
#define HAVE_ARCH_PICK_MMAP_LAYOUT
#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index f88a52b8531c..2d7cffcaa476 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/delay.h>
@@ -621,7 +620,7 @@ void __init mem_init(void)
high_memory = __va((max_pfn << PAGE_SHIFT));
set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_PA11
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e84943d24e5c..8be31261aec8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -206,7 +206,6 @@ config PPC
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
@@ -231,7 +230,6 @@ config PPC
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE
select NEED_SG_DMA_LENGTH
- select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
@@ -932,10 +930,6 @@ config FSL_GTM
help
Freescale General-purpose Timers support
-# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
-config MCA
- bool
-
# Platforms that what PCI turned unconditionally just do select PCI
# in their config node. Platforms that want to choose at config
# time should select PPC_PCI_CHOICE
@@ -946,7 +940,6 @@ config PCI
bool "PCI support" if PPC_PCI_CHOICE
default y if !40x && !CPM2 && !PPC_8xx && !PPC_83xx \
&& !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON
- default PCI_QSPAN if PPC_8xx
select GENERIC_PCI_IOMAP
help
Find out whether your system includes a PCI bus. PCI is the name of
@@ -960,14 +953,6 @@ config PCI_DOMAINS
config PCI_SYSCALL
def_bool PCI
-config PCI_QSPAN
- bool "QSpan PCI"
- depends on PPC_8xx
- select PPC_I8259
- help
- Say Y here if you have a system based on a Motorola 8xx-series
- embedded processor with a QSPAN PCI interface, otherwise say N.
-
config PCI_8260
bool
depends on PCI && 8260
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 17be664dafa2..8a2ce14d68d0 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -96,7 +96,7 @@ aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
endif
-ifneq ($(cc-name),clang)
+ifndef CONFIG_CC_IS_CLANG
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
endif
@@ -175,7 +175,7 @@ endif
# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
-ifneq ($(cc-name),clang)
+ifndef CONFIG_CC_IS_CLANG
CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
endif
endif
diff --git a/arch/powerpc/boot/dts/fsl/t2080rdb.dts b/arch/powerpc/boot/dts/fsl/t2080rdb.dts
index 55c0210a771d..092a400740f8 100644
--- a/arch/powerpc/boot/dts/fsl/t2080rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t2080rdb.dts
@@ -77,12 +77,12 @@
};
ethernet@f0000 {
- phy-handle = <&xg_cs4315_phy1>;
+ phy-handle = <&xg_cs4315_phy2>;
phy-connection-type = "xgmii";
};
ethernet@f2000 {
- phy-handle = <&xg_cs4315_phy2>;
+ phy-handle = <&xg_cs4315_phy1>;
phy-connection-type = "xgmii";
};
diff --git a/arch/powerpc/boot/dts/mpc885ads.dts b/arch/powerpc/boot/dts/mpc885ads.dts
index 5b037f51741d..3aa300afbbca 100644
--- a/arch/powerpc/boot/dts/mpc885ads.dts
+++ b/arch/powerpc/boot/dts/mpc885ads.dts
@@ -72,7 +72,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
- ranges = <0x0 0xff000000 0x4000>;
+ ranges = <0x0 0xff000000 0x28000>;
bus-frequency = <0>;
// Temporary -- will go away once kernel uses ranges for get_immrbase().
@@ -224,6 +224,17 @@
#size-cells = <0>;
};
};
+
+ crypto@20000 {
+ compatible = "fsl,sec1.2", "fsl,sec1.0";
+ reg = <0x20000 0x8000>;
+ interrupts = <1 1>;
+ interrupt-parent = <&PIC>;
+ fsl,num-channels = <1>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0x4c>;
+ fsl,descriptor-types-mask = <0x05000154>;
+ };
};
chosen {
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 31733a95bbd0..3d5acd2b113a 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -36,6 +36,11 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr);
int patch_instruction_site(s32 *addr, unsigned int instr);
int patch_branch_site(s32 *site, unsigned long target, int flags);
+static inline unsigned long patch_site_addr(s32 *site)
+{
+ return (unsigned long)site + *site;
+}
+
int instr_is_relative_branch(unsigned int instr);
int instr_is_relative_link_branch(unsigned int instr);
int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 4f547752ae79..fa05aa566ece 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -34,20 +34,12 @@
* respectively NA for All or X for Supervisor and no access for User.
* Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all)
- * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP:
- * When that bit is not set access is done iaw "all user"
- * which means no access iaw page rules.
- * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
- * 0x => No access => 11 (all accesses performed as user iaw page definition)
- * 10 => No user => 01 (all accesses performed according to page definition)
- * 11 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * Therefore, we define 2 APG groups. lsb is _PMD_USER
+ * 0 => No user => 01 (all accesses performed according to page definition)
+ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
* We define all 16 groups so that all other bits of APG can take any value
*/
-#ifdef CONFIG_SWAP
-#define MI_APG_INIT 0xf4f4f4f4
-#else
#define MI_APG_INIT 0x44444444
-#endif
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
@@ -115,20 +107,12 @@
* Supervisor and no access for user and NA for ALL.
* Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all)
- * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP:
- * When that bit is not set access is done iaw "all user"
- * which means no access iaw page rules.
- * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
- * 0x => No access => 11 (all accesses performed as user iaw page definition)
- * 10 => No user => 01 (all accesses performed according to page definition)
- * 11 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * Therefore, we define 2 APG groups. lsb is _PMD_USER
+ * 0 => No user => 01 (all accesses performed according to page definition)
+ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
* We define all 16 groups so that all other bits of APG can take any value
*/
-#ifdef CONFIG_SWAP
-#define MD_APG_INIT 0xf4f4f4f4
-#else
#define MD_APG_INIT 0x44444444
-#endif
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
@@ -180,12 +164,6 @@
*/
#define SPRN_M_TW 799
-/* APGs */
-#define M_APG0 0x00000000
-#define M_APG1 0x00000020
-#define M_APG2 0x00000040
-#define M_APG3 0x00000060
-
#ifdef CONFIG_PPC_MM_SLICES
#include <asm/nohash/32/slice.h>
#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1))
@@ -251,6 +229,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG();
}
+/* patch sites */
+extern s32 patch__itlbmiss_linmem_top;
+extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
+extern s32 patch__fixupdar_linmem_top;
+
+extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
+extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
+extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
+
#endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC_4K_PAGES)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7d04d60a39c9..ee58526cb6c2 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -67,12 +67,6 @@ extern int _chrp_type;
#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
/* Macros for adjusting thread priority (hardware multi-threading) */
#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
#define HMT_low() asm volatile("or 1,1,1 # low priority")
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index bb38dd67d47d..1b06add4f092 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <asm/page.h>
#include <linux/time.h>
+#include <linux/cpumask.h>
/*
* Definitions for talking to the RTAS on CHRP machines.
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index f432054234a4..8be3721d9302 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -1008,9 +1008,7 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
/* Count and allocate space for cpu features */
of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
&nr_dt_cpu_features);
- dt_cpu_features = __va(
- memblock_alloc(sizeof(struct dt_cpu_feature)*
- nr_dt_cpu_features, PAGE_SIZE));
+ dt_cpu_features = __va(memblock_phys_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE));
cpufeatures_setup_start(isa);
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 134a573a9f2d..3b67b9533c82 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -31,6 +31,7 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/code-patching-asm.h>
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
/* By simply checking Address >= 0x80000000, we know if its a kernel address */
@@ -318,8 +319,8 @@ InstructionTLBMiss:
cmpli cr0, r11, PAGE_OFFSET@h
#ifndef CONFIG_PIN_TLB_TEXT
/* It is assumed that kernel code fits into the first 8M page */
-_ENTRY(ITLBMiss_cmp)
- cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h
+0: cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h
+ patch_site 0b, patch__itlbmiss_linmem_top
#endif
#endif
#endif
@@ -353,13 +354,14 @@ _ENTRY(ITLBMiss_cmp)
#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
mtcr r12
#endif
-
-#ifdef CONFIG_SWAP
- rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1
-#endif
/* Load the MI_TWC with the attributes for this "segment." */
mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
+#ifdef CONFIG_SWAP
+ rlwinm r11, r10, 32-5, _PAGE_PRESENT
+ and r11, r11, r10
+ rlwimi r10, r11, 0, _PAGE_PRESENT
+#endif
li r11, RPN_PATTERN | 0x200
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 20 and 23 must be clear.
@@ -372,16 +374,17 @@ _ENTRY(ITLBMiss_cmp)
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
/* Restore registers */
-_ENTRY(itlb_miss_exit_1)
- mfspr r10, SPRN_SPRG_SCRATCH0
+0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
mfspr r12, SPRN_SPRG_SCRATCH2
#endif
rfi
+ patch_site 0b, patch__itlbmiss_exit_1
+
#ifdef CONFIG_PERF_EVENTS
-_ENTRY(itlb_miss_perf)
- lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha
+ patch_site 0f, patch__itlbmiss_perf
+0: lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha
lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
addi r11, r11, 1
stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
@@ -435,11 +438,11 @@ DataStoreTLBMiss:
#ifndef CONFIG_PIN_TLB_IMMR
cmpli cr0, r11, VIRT_IMMR_BASE@h
#endif
-_ENTRY(DTLBMiss_cmp)
- cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+ patch_site 0b, patch__dtlbmiss_linmem_top
#ifndef CONFIG_PIN_TLB_IMMR
-_ENTRY(DTLBMiss_jmp)
- beq- DTLBMissIMMR
+0: beq- DTLBMissIMMR
+ patch_site 0b, patch__dtlbmiss_immr_jmp
#endif
blt cr7, DTLBMissLinear
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
@@ -470,14 +473,22 @@ _ENTRY(DTLBMiss_jmp)
* above.
*/
rlwimi r11, r10, 0, _PAGE_GUARDED
-#ifdef CONFIG_SWAP
- /* _PAGE_ACCESSED has to be set. We use second APG bit for that, 0
- * on that bit will represent a Non Access group
- */
- rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1
-#endif
mtspr SPRN_MD_TWC, r11
+ /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
+ * We also need to know if the insn is a load/store, so:
+ * Clear _PAGE_PRESENT and load that which will
+ * trap into DTLB Error with store bit set accordinly.
+ */
+ /* PRESENT=0x1, ACCESSED=0x20
+ * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
+ * r10 = (r10 & ~PRESENT) | r11;
+ */
+#ifdef CONFIG_SWAP
+ rlwinm r11, r10, 32-5, _PAGE_PRESENT
+ and r11, r11, r10
+ rlwimi r10, r11, 0, _PAGE_PRESENT
+#endif
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
@@ -489,14 +500,16 @@ _ENTRY(DTLBMiss_jmp)
/* Restore registers */
mtspr SPRN_DAR, r11 /* Tag DAR */
-_ENTRY(dtlb_miss_exit_1)
- mfspr r10, SPRN_SPRG_SCRATCH0
+
+0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r12, SPRN_SPRG_SCRATCH2
rfi
+ patch_site 0b, patch__dtlbmiss_exit_1
+
#ifdef CONFIG_PERF_EVENTS
-_ENTRY(dtlb_miss_perf)
- lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
+ patch_site 0f, patch__dtlbmiss_perf
+0: lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
addi r11, r11, 1
stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
@@ -637,8 +650,8 @@ InstructionBreakpoint:
*/
DTLBMissIMMR:
mtcr r12
- /* Set 512k byte guarded page and mark it valid and accessed */
- li r10, MD_PS512K | MD_GUARDED | MD_SVALID | M_APG2
+ /* Set 512k byte guarded page and mark it valid */
+ li r10, MD_PS512K | MD_GUARDED | MD_SVALID
mtspr SPRN_MD_TWC, r10
mfspr r10, SPRN_IMMR /* Get current IMMR */
rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
@@ -648,16 +661,17 @@ DTLBMissIMMR:
li r11, RPN_PATTERN
mtspr SPRN_DAR, r11 /* Tag DAR */
-_ENTRY(dtlb_miss_exit_2)
- mfspr r10, SPRN_SPRG_SCRATCH0
+
+0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r12, SPRN_SPRG_SCRATCH2
rfi
+ patch_site 0b, patch__dtlbmiss_exit_2
DTLBMissLinear:
mtcr r12
- /* Set 8M byte page and mark it valid and accessed */
- li r11, MD_PS8MEG | MD_SVALID | M_APG2
+ /* Set 8M byte page and mark it valid */
+ li r11, MD_PS8MEG | MD_SVALID
mtspr SPRN_MD_TWC, r11
rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
@@ -666,28 +680,29 @@ DTLBMissLinear:
li r11, RPN_PATTERN
mtspr SPRN_DAR, r11 /* Tag DAR */
-_ENTRY(dtlb_miss_exit_3)
- mfspr r10, SPRN_SPRG_SCRATCH0
+
+0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r12, SPRN_SPRG_SCRATCH2
rfi
+ patch_site 0b, patch__dtlbmiss_exit_3
#ifndef CONFIG_PIN_TLB_TEXT
ITLBMissLinear:
mtcr r12
- /* Set 8M byte page and mark it valid,accessed */
- li r11, MI_PS8MEG | MI_SVALID | M_APG2
+ /* Set 8M byte page and mark it valid */
+ li r11, MI_PS8MEG | MI_SVALID
mtspr SPRN_MI_TWC, r11
rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
_PAGE_PRESENT
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
-_ENTRY(itlb_miss_exit_2)
- mfspr r10, SPRN_SPRG_SCRATCH0
+0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r12, SPRN_SPRG_SCRATCH2
rfi
+ patch_site 0b, patch__itlbmiss_exit_2
#endif
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
@@ -705,8 +720,10 @@ FixupDAR:/* Entry point for dcbx workaround. */
mfspr r11, SPRN_M_TW /* Get level 1 table */
blt+ 3f
rlwinm r11, r10, 16, 0xfff8
-_ENTRY(FixupDAR_cmp)
- cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+
+0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+ patch_site 0b, patch__fixupdar_linmem_top
+
/* create physical page address from effective address */
tophys(r11, r10)
blt- cr7, 201f
@@ -960,7 +977,7 @@ initial_mmu:
ori r8, r8, MI_EVALID /* Mark it valid */
mtspr SPRN_MI_EPN, r8
li r8, MI_PS8MEG /* Set 8M byte page */
- ori r8, r8, MI_SVALID | M_APG2 /* Make it valid, APG 2 */
+ ori r8, r8, MI_SVALID /* Make it valid */
mtspr SPRN_MI_TWC, r8
li r8, MI_BOOTINIT /* Create RPN for address 0 */
mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
@@ -987,7 +1004,7 @@ initial_mmu:
ori r8, r8, MD_EVALID /* Mark it valid */
mtspr SPRN_MD_EPN, r8
li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
- ori r8, r8, MD_SVALID | M_APG2 /* Make it valid and accessed */
+ ori r8, r8, MD_SVALID /* Make it valid */
mtspr SPRN_MD_TWC, r8
mr r8, r9 /* Create paddr for TLB */
ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0ee3e6d50f28..913bfca09c4f 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -198,7 +198,7 @@ void __init allocate_paca_ptrs(void)
paca_nr_cpu_ids = nr_cpu_ids;
paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
- paca_ptrs = __va(memblock_alloc(paca_ptrs_size, 0));
+ paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, SMP_CACHE_BYTES));
memset(paca_ptrs, 0x88, paca_ptrs_size);
}
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 4da8ed576229..d3f04f2d8249 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -10,7 +10,7 @@
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/list.h>
@@ -203,7 +203,8 @@ pci_create_OF_bus_map(void)
struct property* of_prop;
struct device_node *dn;
- of_prop = memblock_virt_alloc(sizeof(struct property) + 256, 0);
+ of_prop = memblock_alloc(sizeof(struct property) + 256,
+ SMP_CACHE_BYTES);
dn = of_find_node_by_path("/");
if (dn) {
memset(of_prop, -1, sizeof(struct property) + 256);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 4d5322cfad25..96f34730010f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -590,12 +590,11 @@ void flush_all_to_thread(struct task_struct *tsk)
if (tsk->thread.regs) {
preempt_disable();
BUG_ON(tsk != current);
- save_all(tsk);
-
#ifdef CONFIG_SPE
if (tsk->thread.regs->msr & MSR_SPE)
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
#endif
+ save_all(tsk);
preempt_enable();
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index c4d7078e5295..fe758cedb93f 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -126,7 +126,7 @@ static void __init move_device_tree(void)
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
overlaps_crashkernel(start, size) ||
overlaps_initrd(start, size)) {
- p = __va(memblock_alloc(size, PAGE_SIZE));
+ p = __va(memblock_phys_alloc(size, PAGE_SIZE));
memcpy(p, initial_boot_params, size);
initial_boot_params = p;
DBG("Moved device tree to 0x%p\n", p);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9ca9db707bcb..93ee3703b42f 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -33,7 +33,6 @@
#include <linux/serial_8250.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/of_platform.h>
#include <linux/hugetlb.h>
#include <asm/debugfs.h>
@@ -460,8 +459,7 @@ void __init smp_setup_cpu_maps(void)
DBG("smp_setup_cpu_maps()\n");
- cpu_to_phys_id = __va(memblock_alloc(nr_cpu_ids * sizeof(u32),
- __alignof__(u32)));
+ cpu_to_phys_id = __va(memblock_phys_alloc(nr_cpu_ids * sizeof(u32), __alignof__(u32)));
memset(cpu_to_phys_id, 0, nr_cpu_ids * sizeof(u32));
for_each_node_by_type(dn, "cpu") {
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 8c507be12c3c..81909600013a 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -206,9 +206,9 @@ void __init irqstack_early_init(void)
* as the memblock is limited to lowmem by default */
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
hardirq_ctx[i] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
}
}
@@ -227,12 +227,12 @@ void __init exc_lvl_early_init(void)
#endif
critirq_ctx[hw_cpu] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE
dbgirq_ctx[hw_cpu] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
mcheckirq_ctx[hw_cpu] = (struct thread_info *)
- __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+ __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE));
#endif
}
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index faf00222b324..2a51e4cc8246 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -29,10 +29,9 @@
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pci.h>
#include <linux/lockdep.h>
-#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/nmi.h>
@@ -763,13 +762,15 @@ void __init emergency_stack_init(void)
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
- return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
- __pa(MAX_DMA_ADDRESS));
+ return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ early_cpu_to_node(cpu));
+
}
static void __init pcpu_fc_free(void *ptr, size_t size)
{
- free_bootmem(__pa(ptr), size);
+ memblock_free(__pa(ptr), size);
}
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index bf8def2159c3..d65b961661fb 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2337,8 +2337,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
kvmppc_core_prepare_to_enter(vcpu);
return;
}
- dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
- / tb_ticks_per_sec;
+ dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
vcpu->arch.timer_running = 1;
}
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index fa888bfc347e..9f5b8c01c4e1 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -61,11 +61,10 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
dec_time = vcpu->arch.dec;
/*
- * Guest timebase ticks at the same frequency as host decrementer.
- * So use the host decrementer calculations for decrementer emulation.
+ * Guest timebase ticks at the same frequency as host timebase.
+ * So use the host timebase calculations for decrementer emulation.
*/
- dec_time = dec_time << decrementer_clockevent.shift;
- do_div(dec_time, decrementer_clockevent.mult);
+ dec_time = tb_to_ns(dec_time);
dec_nsec = do_div(dec_time, NSEC_PER_SEC);
hrtimer_start(&vcpu->arch.dec_timer,
ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index 06796dec01ea..dedf88a76f58 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -2,7 +2,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/string.h>
#include <asm/setup.h>
@@ -14,7 +14,7 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
if (slab_is_available())
p = kzalloc(size, mask);
else {
- p = memblock_virt_alloc(size, 0);
+ p = memblock_alloc(size, SMP_CACHE_BYTES);
}
return p;
}
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 36484a2ef915..01b7f5107c3a 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -13,6 +13,7 @@
*/
#include <linux/memblock.h>
+#include <linux/mmu_context.h>
#include <asm/fixmap.h>
#include <asm/code-patching.h>
@@ -79,7 +80,7 @@ void __init MMU_init_hw(void)
for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
mtspr(SPRN_MD_CTR, ctr | (i << 8));
mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
- mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2);
+ mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
addr += LARGE_PAGE_SIZE_8M;
mem -= LARGE_PAGE_SIZE_8M;
@@ -97,22 +98,13 @@ static void __init mmu_mapin_immr(void)
map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
}
-/* Address of instructions to patch */
-#ifndef CONFIG_PIN_TLB_IMMR
-extern unsigned int DTLBMiss_jmp;
-#endif
-extern unsigned int DTLBMiss_cmp, FixupDAR_cmp;
-#ifndef CONFIG_PIN_TLB_TEXT
-extern unsigned int ITLBMiss_cmp;
-#endif
-
-static void __init mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
+static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
{
- unsigned int instr = *addr;
+ unsigned int instr = *(unsigned int *)patch_site_addr(site);
instr &= 0xffff0000;
instr |= (unsigned long)__va(mapped) >> 16;
- patch_instruction(addr, instr);
+ patch_instruction_site(site, instr);
}
unsigned long __init mmu_mapin_ram(unsigned long top)
@@ -123,17 +115,17 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
mapped = 0;
mmu_mapin_immr();
#ifndef CONFIG_PIN_TLB_IMMR
- patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP);
+ patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
#endif
#ifndef CONFIG_PIN_TLB_TEXT
- mmu_patch_cmp_limit(&ITLBMiss_cmp, 0);
+ mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
#endif
} else {
mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
}
- mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped);
- mmu_patch_cmp_limit(&FixupDAR_cmp, mapped);
+ mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
+ mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
/* If the size of RAM is not an exact power of two, we may not
* have covered RAM in its entirety with 8 MiB
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a7226ed9cae6..8cf035e68378 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -15,7 +15,6 @@
#include <linux/export.h>
#include <linux/of_fdt.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/moduleparam.h>
#include <linux/swap.h>
#include <linux/swapops.h>
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index dd949d6649a2..0a64fffabee1 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -27,12 +27,11 @@
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
#include <linux/suspend.h>
-#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -349,7 +348,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
set_max_mapnr(max_pfn);
- free_all_bootmem();
+ memblock_free_all();
#ifdef CONFIG_HIGHMEM
{
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 4d80239ef83c..2faca46ad720 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -44,7 +44,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/slab.h>
@@ -461,10 +461,11 @@ void __init mmu_context_init(void)
/*
* Allocate the maps used by context management
*/
- context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0);
- context_mm = memblock_virt_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 0);
+ context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
+ SMP_CACHE_BYTES);
#ifdef CONFIG_SMP
- stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
+ stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
"powerpc/mmu/ctx:prepare",
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 693ae1c1acba..3a048e98a132 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -11,7 +11,7 @@
#define pr_fmt(fmt) "numa: " fmt
#include <linux/threads.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
@@ -19,7 +19,6 @@
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
-#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/pfn.h>
#include <linux/cpuset.h>
@@ -788,7 +787,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd;
int tnid;
- nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);
/* report and initialize */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5877f5aa8f5d..bda3c6f1bd32 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -50,7 +50,7 @@ __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
if (slab_is_available()) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
} else {
- pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
if (pte)
clear_page(pte);
}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 38a793bfca37..f6f575bae3bc 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -224,7 +224,7 @@ void __init MMU_init_hw(void)
* Find some memory for the hash table.
*/
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
- Hash = __va(memblock_alloc(Hash_size, Hash_size));
+ Hash = __va(memblock_phys_alloc(Hash_size, Hash_size));
memset(Hash, 0, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index 6c0020d1c561..e38f74e9e7a4 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -31,9 +31,6 @@
extern unsigned long itlb_miss_counter, dtlb_miss_counter;
extern atomic_t instruction_counter;
-extern unsigned int itlb_miss_perf, dtlb_miss_perf;
-extern unsigned int itlb_miss_exit_1, itlb_miss_exit_2;
-extern unsigned int dtlb_miss_exit_1, dtlb_miss_exit_2, dtlb_miss_exit_3;
static atomic_t insn_ctr_ref;
static atomic_t itlb_miss_ref;
@@ -103,22 +100,22 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags)
break;
case PERF_8xx_ID_ITLB_LOAD_MISS:
if (atomic_inc_return(&itlb_miss_ref) == 1) {
- unsigned long target = (unsigned long)&itlb_miss_perf;
+ unsigned long target = patch_site_addr(&patch__itlbmiss_perf);
- patch_branch(&itlb_miss_exit_1, target, 0);
+ patch_branch_site(&patch__itlbmiss_exit_1, target, 0);
#ifndef CONFIG_PIN_TLB_TEXT
- patch_branch(&itlb_miss_exit_2, target, 0);
+ patch_branch_site(&patch__itlbmiss_exit_2, target, 0);
#endif
}
val = itlb_miss_counter;
break;
case PERF_8xx_ID_DTLB_LOAD_MISS:
if (atomic_inc_return(&dtlb_miss_ref) == 1) {
- unsigned long target = (unsigned long)&dtlb_miss_perf;
+ unsigned long target = patch_site_addr(&patch__dtlbmiss_perf);
- patch_branch(&dtlb_miss_exit_1, target, 0);
- patch_branch(&dtlb_miss_exit_2, target, 0);
- patch_branch(&dtlb_miss_exit_3, target, 0);
+ patch_branch_site(&patch__dtlbmiss_exit_1, target, 0);
+ patch_branch_site(&patch__dtlbmiss_exit_2, target, 0);
+ patch_branch_site(&patch__dtlbmiss_exit_3, target, 0);
}
val = dtlb_miss_counter;
break;
@@ -180,17 +177,17 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
break;
case PERF_8xx_ID_ITLB_LOAD_MISS:
if (atomic_dec_return(&itlb_miss_ref) == 0) {
- patch_instruction(&itlb_miss_exit_1, insn);
+ patch_instruction_site(&patch__itlbmiss_exit_1, insn);
#ifndef CONFIG_PIN_TLB_TEXT
- patch_instruction(&itlb_miss_exit_2, insn);
+ patch_instruction_site(&patch__itlbmiss_exit_2, insn);
#endif
}
break;
case PERF_8xx_ID_DTLB_LOAD_MISS:
if (atomic_dec_return(&dtlb_miss_ref) == 0) {
- patch_instruction(&dtlb_miss_exit_1, insn);
- patch_instruction(&dtlb_miss_exit_2, insn);
- patch_instruction(&dtlb_miss_exit_3, insn);
+ patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
+ patch_instruction_site(&patch__dtlbmiss_exit_2, insn);
+ patch_instruction_site(&patch__dtlbmiss_exit_3, insn);
}
break;
}
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 2a9d66254ffc..5326ece36120 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -29,6 +29,7 @@ config KILAUEA
select 405EX
select PPC40x_SIMPLE
select PPC4xx_PCI_EXPRESS
+ select PCI
select PCI_MSI
select PPC4xx_MSI
help
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index f024efd5a4c2..9a85d350b1b6 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -21,6 +21,7 @@ config BLUESTONE
depends on 44x
select PPC44x_SIMPLE
select APM821xx
+ select PCI
select PCI_MSI
select PPC4xx_MSI
select PPC4xx_PCI_EXPRESS
@@ -200,6 +201,7 @@ config AKEBONO
select SWIOTLB
select 476FPE
select PPC4xx_PCI_EXPRESS
+ select PCI
select PCI_MSI
select PPC4xx_HSTA_MSI
select I2C
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index f06c83f321e6..f2971522fb4a 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -213,7 +213,7 @@ static int __init iob_init(struct device_node *dn)
pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
/* Allocate a spare page to map all invalid IOTLB pages. */
- tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
+ tmp = memblock_phys_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
if (!tmp)
panic("IOBMAP: Cannot allocate spare page!");
/* Empty l1 is marked invalid */
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 60b03a1703d1..ae54d7fe68f3 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -18,7 +18,7 @@
#include <linux/errno.h>
#include <linux/adb.h>
#include <linux/pmu.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <asm/sections.h>
@@ -513,7 +513,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
printk(KERN_ERR "nvram: no address\n");
return -EINVAL;
}
- nvram_image = memblock_virt_alloc(NVRAM_SIZE, 0);
+ nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES);
nvram_data = ioremap(addr, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index a29fdf8a2e56..84d038ed3882 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -70,6 +70,7 @@ static int change_memblock_state(struct memory_block *mem, void *arg)
return 0;
}
+/* called with device_hotplug_lock held */
static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
{
u64 end_pfn = start_pfn + nr_pages - 1;
@@ -110,6 +111,7 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
/* Trace memory needs to be aligned to the size */
end_pfn = round_down(end_pfn - nr_pages, nr_pages);
+ lock_device_hotplug();
for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
/*
@@ -118,15 +120,15 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
* we never try to remove memory that spans two iomem
* resources.
*/
- lock_device_hotplug();
end_pfn = base_pfn + nr_pages;
for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
- remove_memory(nid, pfn << PAGE_SHIFT, bytes);
+ __remove_memory(nid, pfn << PAGE_SHIFT, bytes);
}
unlock_device_hotplug();
return base_pfn << PAGE_SHIFT;
}
}
+ unlock_device_hotplug();
return 0;
}
@@ -242,9 +244,11 @@ static int memtrace_online(void)
* we need to online the memory ourselves.
*/
if (!memhp_auto_online) {
+ lock_device_hotplug();
walk_memory_range(PFN_DOWN(ent->start),
PFN_UP(ent->start + ent->size - 1),
NULL, online_mem_block);
+ unlock_device_hotplug();
}
/*
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index a4641515956f..beed86f4224b 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -171,7 +171,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
/*
* Allocate a buffer to hold the MC recoverable ranges.
*/
- mc_recoverable_range =__va(memblock_alloc(size, __alignof__(u64)));
+ mc_recoverable_range =__va(memblock_phys_alloc(size, __alignof__(u64)));
memset(mc_recoverable_range, 0, size);
for (i = 0; i < mc_recoverable_range_len; i++) {
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index cde710297a4e..dd807446801e 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -17,11 +17,10 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/msi.h>
-#include <linux/memblock.h>
#include <linux/iommu.h>
#include <linux/rculist.h>
#include <linux/sizes.h>
@@ -3770,7 +3769,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb_id = be64_to_cpup(prop64);
pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
- phb = memblock_virt_alloc(sizeof(*phb), 0);
+ phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
/* Allocate PCI controller */
phb->hose = hose = pcibios_alloc_controller(np);
@@ -3816,7 +3815,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
else
phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
- phb->diag_data = memblock_virt_alloc(phb->diag_data_size, 0);
+ phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
/* Parse 32-bit and IO ranges (if any) */
pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
@@ -3875,7 +3874,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
}
pemap_off = size;
size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
- aux = memblock_virt_alloc(size, 0);
+ aux = memblock_alloc(size, SMP_CACHE_BYTES);
phb->ioda.pe_alloc = aux;
phb->ioda.m64_segmap = aux + m64map_off;
phb->ioda.m32_segmap = aux + m32map_off;
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 77a37520068d..658bfab3350b 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -24,7 +24,7 @@
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
@@ -126,7 +126,7 @@ static void __init prealloc(struct ps3_prealloc *p)
if (!p->size)
return;
- p->address = memblock_virt_alloc(p->size, p->align);
+ p->address = memblock_alloc(p->size, p->align);
printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
p->address);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2b796da822c2..2a983b5a52e1 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -300,7 +300,7 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
nid = memory_add_physaddr_to_nid(base);
for (i = 0; i < sections_per_block; i++) {
- remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
+ __remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
base += MIN_MEMORY_BLOCK_SIZE;
}
@@ -389,7 +389,7 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
block_sz = pseries_memory_block_size();
nid = memory_add_physaddr_to_nid(lmb->base_addr);
- remove_memory(nid, lmb->base_addr, block_sz);
+ __remove_memory(nid, lmb->base_addr, block_sz);
/* Update memory regions for memory remove */
memblock_remove(lmb->base_addr, block_sz);
@@ -668,7 +668,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
nid = memory_add_physaddr_to_nid(lmb->base_addr);
/* Add the memory */
- rc = add_memory(nid, lmb->base_addr, block_sz);
+ rc = __add_memory(nid, lmb->base_addr, block_sz);
if (rc) {
invalidate_lmb_associativity_index(lmb);
return rc;
@@ -676,7 +676,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
rc = dlpar_online_lmb(lmb);
if (rc) {
- remove_memory(nid, lmb->base_addr, block_sz);
+ __remove_memory(nid, lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
} else {
lmb->flags |= DRCONF_MEM_ASSIGNED;
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 8bd590af488a..794487313cc8 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -26,6 +26,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/hugetlb.h>
#include <asm/lppaca.h>
#include <asm/hvcall.h>
#include <asm/firmware.h>
@@ -36,6 +37,7 @@
#include <asm/vio.h>
#include <asm/mmu.h>
#include <asm/machdep.h>
+#include <asm/drmem.h>
#include "pseries.h"
@@ -433,6 +435,16 @@ static void parse_em_data(struct seq_file *m)
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
}
+static void maxmem_data(struct seq_file *m)
+{
+ unsigned long maxmem = 0;
+
+ maxmem += drmem_info->n_lmbs * drmem_info->lmb_size;
+ maxmem += hugetlb_total_pages() * PAGE_SIZE;
+
+ seq_printf(m, "MaxMem=%ld\n", maxmem);
+}
+
static int pseries_lparcfg_data(struct seq_file *m, void *v)
{
int partition_potential_processors;
@@ -491,6 +503,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "slb_size=%d\n", mmu_slb_size);
#endif
parse_em_data(m);
+ maxmem_data(m);
return 0;
}
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 5ca3e22d0512..a5b40d1460f1 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -261,7 +261,7 @@ static void allocate_dart(void)
* that to work around what looks like a problem with the HT bridge
* prefetching into invalid pages and corrupting data
*/
- tmp = memblock_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
+ tmp = memblock_phys_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
DARTMAP_RPNMASK);
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index e64a411d1a00..d45450f6666a 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/bitmap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/msi_bitmap.h>
#include <asm/setup.h>
@@ -128,7 +128,7 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
if (bmp->bitmap_from_slab)
bmp->bitmap = kzalloc(size, GFP_KERNEL);
else {
- bmp->bitmap = memblock_virt_alloc(size, 0);
+ bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES);
/* the bitmap won't be freed from memblock allocator */
kmemleak_not_leak(bmp->bitmap);
}
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 69e7fb47bcaa..878f9c1d3615 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -11,6 +11,12 @@ UBSAN_SANITIZE := n
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
+ifdef CONFIG_CC_IS_CLANG
+# clang stores addresses on the stack causing the frame size to blow
+# out. See https://github.com/ClangBuiltLinux/linux/issues/252
+KBUILD_CFLAGS += -Wframe-larger-than=4096
+endif
+
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y += xmon.o nonstdio.o spr_access.o
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index fe451348ae57..55da93f4e818 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -28,14 +28,12 @@ config RISCV
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GENERIC_DMA_COHERENT
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
- select NO_BOOTMEM
select RISCV_ISA_A if SMP
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
@@ -109,7 +107,6 @@ config ARCH_RV32I
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2
- select GENERIC_LIB_UMODDI3
config ARCH_RV64I
bool "RV64I"
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 36473d7dbaac..07fa9ea75fea 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -1,6 +1,3 @@
-CONFIG_SMP=y
-CONFIG_PCI=y
-CONFIG_PCIE_XILINX=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
@@ -11,10 +8,15 @@ CONFIG_CFS_BANDWIDTH=y
CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
+CONFIG_SMP=y
+CONFIG_PCI=y
+CONFIG_PCIE_XILINX=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -59,6 +61,7 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_VIRTIO_MMIO=y
+CONFIG_SIFIVE_PLIC=y
CONFIG_RAS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -72,8 +75,5 @@ CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
-# CONFIG_RCU_TRACE is not set
CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_SIFIVE_PLIC=y
+# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index a1ef503d616e..697fc23b0d5a 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -16,9 +16,6 @@
#include <asm/auxvec.h>
#include <asm/byteorder.h>
-/* TODO: Move definition into include/uapi/linux/elf-em.h */
-#define EM_RISCV 0xF3
-
/*
* These are used to set parameters in the core dumps.
*/
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 50de774d827a..0531f49af5c3 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -33,12 +33,6 @@
struct task_struct;
struct pt_regs;
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
/* CPU-specific state of a task */
struct thread_struct {
/* Callee-saved registers */
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 5493f3228704..0339087aa652 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -28,7 +28,7 @@ bool has_fpu __read_mostly;
void riscv_fill_hwcap(void)
{
- struct device_node *node;
+ struct device_node *node = NULL;
const char *isa;
size_t i;
static unsigned long isa2hwcap[256] = {0};
@@ -44,9 +44,11 @@ void riscv_fill_hwcap(void)
/*
* We don't support running Linux on hertergenous ISA systems. For
- * now, we just check the ISA of the first processor.
+ * now, we just check the ISA of the first "okay" processor.
*/
- node = of_find_node_by_type(NULL, "cpu");
+ while ((node = of_find_node_by_type(node, "cpu")))
+ if (riscv_of_processor_hartid(node) >= 0)
+ break;
if (!node) {
pr_warning("Unable to find \"cpu\" devicetree entry");
return;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 58a522f9bcc3..1d9bfaff60bc 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -13,9 +13,8 @@
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/initrd.h>
#include <linux/memblock.h>
+#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/sizes.h>
@@ -55,7 +54,7 @@ void __init mem_init(void)
#endif /* CONFIG_FLATMEM */
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8b25e1f45b27..5173366af8f3 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -163,7 +163,6 @@ config S390
select HAVE_LIVEPATCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
select HAVE_MOD_ARCH_SPECIFIC
@@ -175,7 +174,6 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select MODULES_USE_ELF_RELA
- select NO_BOOTMEM
select OLD_SIGACTION
select OLD_SIGSUSPEND3
select SPARSE_IRQ
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 34768e6ef4fb..302795c47c06 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -73,12 +73,6 @@ static inline int test_cpu_flag_of(int flag, int cpu)
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
-
static inline void get_cpu_id(struct cpuid *ptr)
{
asm volatile("stidp %0" : "=Q" (*ptr));
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 376f6b6dfb3c..97eae3871868 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -13,10 +13,9 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/elf.h>
#include <asm/asm-offsets.h>
-#include <linux/memblock.h>
#include <asm/os_info.h>
#include <asm/elf.h>
#include <asm/ipl.h>
@@ -61,7 +60,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
{
struct save_area *sa;
- sa = (void *) memblock_alloc(sizeof(*sa), 8);
+ sa = (void *) memblock_phys_alloc(sizeof(*sa), 8);
if (is_boot_cpu)
list_add(&sa->list, &dump_save_areas);
else
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a2e952b66248..72dd23ef771b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -34,7 +34,6 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/kernel_stat.h>
@@ -378,7 +377,7 @@ static void __init setup_lowcore(void)
* Setup lowcore for boot cpu
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
- lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
+ lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS |
@@ -422,7 +421,7 @@ static void __init setup_lowcore(void)
* Allocate the global restart stack which is the same for
* all CPUs in cast *one* of them does a PSW restart.
*/
- restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE);
+ restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
restart_stack += STACK_INIT_OFFSET;
/*
@@ -488,7 +487,7 @@ static void __init setup_resources(void)
bss_resource.end = (unsigned long) __bss_stop - 1;
for_each_memblock(memory, reg) {
- res = memblock_virt_alloc(sizeof(*res), 8);
+ res = memblock_alloc(sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
@@ -502,7 +501,7 @@ static void __init setup_resources(void)
std_res->start > res->end)
continue;
if (std_res->end > res->end) {
- sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
+ sub_res = memblock_alloc(sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
@@ -967,7 +966,8 @@ static void __init setup_randomness(void)
{
struct sysinfo_3_2_2 *vmms;
- vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
+ PAGE_SIZE);
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free((unsigned long) vmms, PAGE_SIZE);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1b3188f57b58..f82b3d3c36e2 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -20,7 +20,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -35,7 +35,6 @@
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/crash_dump.h>
-#include <linux/memblock.h>
#include <linux/kprobes.h>
#include <asm/asm-offsets.h>
#include <asm/diag.h>
@@ -761,7 +760,7 @@ void __init smp_detect_cpus(void)
u16 address;
/* Get CPU information */
- info = memblock_virt_alloc(sizeof(*info), 8);
+ info = memblock_alloc(sizeof(*info), 8);
smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e8184a15578a..8992b04c0ade 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -8,7 +8,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/uaccess.h>
#include <linux/sysctl.h>
#include <linux/cpuset.h>
@@ -519,7 +519,7 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
- mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
+ mask->next = memblock_alloc(sizeof(*mask->next), 8);
mask = mask->next;
}
}
@@ -537,7 +537,7 @@ void __init topology_init_early(void)
}
if (!MACHINE_HAS_TOPOLOGY)
goto out;
- tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
+ tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index ec31b48a42a5..ebe748a9f472 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -18,7 +18,7 @@
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 84111a43ea29..eba2def3414d 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -16,7 +16,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ctype.h>
#include <linux/ioport.h>
#include <asm/diag.h>
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 92d7a153e72a..76d0708438e9 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -21,7 +21,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/pfn.h>
#include <linux/poison.h>
@@ -29,7 +29,6 @@
#include <linux/export.h>
#include <linux/cma.h>
#include <linux/gfp.h>
-#include <linux/memblock.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
@@ -139,7 +138,7 @@ void __init mem_init(void)
cmma_init();
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
cmma_init_nodat();
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index db55561c5981..0472e27febdf 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -4,14 +4,13 @@
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
-#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -36,7 +35,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
- return (void *) memblock_alloc(size, size);
+ return (void *) memblock_phys_alloc(size, size);
}
void *vmem_crst_alloc(unsigned long val)
@@ -57,7 +56,7 @@ pte_t __ref *vmem_pte_alloc(void)
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
- pte = (pte_t *) memblock_alloc(size, size);
+ pte = (pte_t *) memblock_phys_alloc(size, size);
if (!pte)
return NULL;
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 83b222c57609..bfba273c32c0 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/node.h>
#include <linux/memory.h>
#include <linux/slab.h>
@@ -313,7 +312,7 @@ static void __ref create_core_to_node_map(void)
{
int i;
- emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
+ emu_cores = memblock_alloc(sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 5bd374491f94..ae0d9e889534 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/cpumask.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/node.h>
@@ -64,7 +63,7 @@ static __init pg_data_t *alloc_node_data(void)
{
pg_data_t *res;
- res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8);
+ res = (pg_data_t *) memblock_phys_alloc(sizeof(pg_data_t), 8);
memset(res, 0, sizeof(pg_data_t));
return res;
}
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
index 21d1e8a1546d..71a608cd4f61 100644
--- a/arch/s390/numa/toptree.c
+++ b/arch/s390/numa/toptree.c
@@ -8,7 +8,7 @@
*/
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <linux/list_sort.h>
@@ -34,7 +34,7 @@ struct toptree __ref *toptree_alloc(int level, int id)
if (slab_is_available())
res = kzalloc(sizeof(*res), GFP_KERNEL);
else
- res = memblock_virt_alloc(sizeof(*res), 8);
+ res = memblock_alloc(sizeof(*res), 8);
if (!res)
return res;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 475d786a65b0..f82a4da7adf3 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -9,9 +9,7 @@ config SUPERH
select CLKDEV_LOOKUP
select DMA_DIRECT_OPS
select HAVE_IDE if HAS_IOPORT_MAP
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select NO_BOOTMEM
select ARCH_DISCARD_MEMBLOCK
select HAVE_OPROFILE
select HAVE_GENERIC_DMA_COHERENT
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 95100d8a0b7b..0e0ecc0132e3 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -16,12 +16,6 @@
#include <asm/types.h>
#include <asm/hw_breakpoint.h>
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n.align 2\n1:":"=z" (pc)); pc; })
-
/* Core Processor Version Register */
#define CCN_PVR 0xff000030
#define CCN_CVR 0xff000040
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 777a16318aff..f3d7075648d0 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -19,21 +19,6 @@
#include <asm/types.h>
#include <cpu/registers.h>
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ \
-void *pc; \
-unsigned long long __dummy = 0; \
-__asm__("gettr tr0, %1\n\t" \
- "pta 4, tr0\n\t" \
- "gettr tr0, %0\n\t" \
- "ptabs %1, tr0\n\t" \
- :"=r" (pc), "=r" (__dummy) \
- : "1" (__dummy)); \
-pc; })
-
#endif
/*
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 7713c084d040..c8c13c777162 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -11,12 +11,11 @@
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/gfp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/pagemap.h>
#include <linux/percpu.h>
#include <linux/io.h>
-#include <linux/memblock.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <asm/mmu_context.h>
@@ -128,7 +127,7 @@ static pmd_t * __init one_md_table_init(pud_t *pud)
if (pud_none(*pud)) {
pmd_t *pmd;
- pmd = alloc_bootmem_pages(PAGE_SIZE);
+ pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, pmd);
BUG_ON(pmd != pmd_offset(pud, 0));
}
@@ -141,7 +140,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
if (pmd_none(*pmd)) {
pte_t *pte;
- pte = alloc_bootmem_pages(PAGE_SIZE);
+ pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
@@ -350,7 +349,7 @@ void __init mem_init(void)
high_memory = max_t(void *, high_memory,
__va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
- free_all_bootmem();
+ memblock_free_all();
/* Set this up early, so we can take care of the zero page */
cpu_cache_init();
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
index 927a1294c465..07e744d75fa0 100644
--- a/arch/sh/mm/ioremap_fixed.c
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/io.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <asm/fixmap.h>
#include <asm/page.h>
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 7e2aa59fcc29..490b2c95c212 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -45,8 +45,6 @@ config SPARC
select LOCKDEP_SMALL if LOCKDEP
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
config SPARC32
def_bool !64BIT
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 192493c257fa..3c4bc2189092 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -7,12 +7,6 @@
#ifndef __ASM_SPARC_PROCESSOR_H
#define __ASM_SPARC_PROCESSOR_H
-/*
- * Sparc32 implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("sethi %%hi(1f), %0; or %0, %%lo(1f), %0;\n1:" : "=r" (pc)); pc; })
-
#include <asm/psr.h>
#include <asm/ptrace.h>
#include <asm/head.h>
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index aac23d4a4ddd..5cf145f18f36 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -8,12 +8,6 @@
#ifndef __ASM_SPARC64_PROCESSOR_H
#define __ASM_SPARC64_PROCESSOR_H
-/*
- * Sparc64 implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ void *pc; __asm__("rd %%pc, %0" : "=r" (pc)); pc; })
-
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 39a2503fa3e1..9a26b442f820 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -5,13 +5,12 @@
*/
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/memblock.h>
#include <linux/log2.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/refcount.h>
@@ -170,7 +169,7 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size
mdesc_size);
alloc_size = PAGE_ALIGN(handle_size);
- paddr = memblock_alloc(alloc_size, PAGE_SIZE);
+ paddr = memblock_phys_alloc(alloc_size, PAGE_SIZE);
hp = NULL;
if (paddr) {
@@ -190,7 +189,7 @@ static void __init mdesc_memblock_free(struct mdesc_handle *hp)
alloc_size = PAGE_ALIGN(hp->handle_size);
start = __pa(hp);
- free_bootmem_late(start, alloc_size);
+ memblock_free_late(start, alloc_size);
}
static struct mdesc_mem_ops memblock_mdesc_ops = {
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 67b3e6b3ce5d..47c871394ccb 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1849,16 +1849,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
{
u64 saved_fault_address = current_thread_info()->fault_address;
u8 saved_fault_code = get_thread_fault_code();
- mm_segment_t old_fs;
perf_callchain_store(entry, regs->tpc);
if (!current->mm)
return;
- old_fs = get_fs();
- set_fs(USER_DS);
-
flushw_user();
pagefault_disable();
@@ -1870,7 +1866,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
pagefault_enable();
- set_fs(old_fs);
set_thread_fault_code(saved_fault_code);
current_thread_info()->fault_address = saved_fault_address;
}
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index b51cbb9e87dc..d41e2a749c5d 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -19,7 +19,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/prom.h>
#include <asm/oplib.h>
@@ -32,7 +32,7 @@ void * __init prom_early_alloc(unsigned long size)
{
void *ret;
- ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+ ret = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
if (ret != NULL)
memset(ret, 0, size);
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index baeaeed64993..c37955d127fe 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -34,7 +34,7 @@
void * __init prom_early_alloc(unsigned long size)
{
- unsigned long paddr = memblock_alloc(size, SMP_CACHE_BYTES);
+ unsigned long paddr = memblock_phys_alloc(size, SMP_CACHE_BYTES);
void *ret;
if (!paddr) {
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 7944b3ca216a..cd2825cb8420 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -32,7 +32,7 @@
#include <linux/initrd.h>
#include <linux/module.h>
#include <linux/start_kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -621,12 +621,10 @@ void __init alloc_irqstack_bootmem(void)
for_each_possible_cpu(i) {
node = cpu_to_node(i);
- softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
- THREAD_SIZE,
- THREAD_SIZE, 0);
- hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
- THREAD_SIZE,
- THREAD_SIZE, 0);
+ softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
+ THREAD_SIZE, node);
+ hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
+ THREAD_SIZE, node);
}
}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index d3ea1f3c06a0..4792e08ad36b 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -22,7 +22,7 @@
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/profile.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
@@ -1588,26 +1588,26 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
void *ptr;
if (!node_online(node) || !NODE_DATA(node)) {
- ptr = __alloc_bootmem(size, align, goal);
+ ptr = memblock_alloc_from(size, align, goal);
pr_info("cpu %d has no node %d or node-local memory\n",
cpu, node);
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
cpu, size, __pa(ptr));
} else {
- ptr = __alloc_bootmem_node(NODE_DATA(node),
- size, align, goal);
+ ptr = memblock_alloc_try_nid(size, align, goal,
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
"%016lx\n", cpu, size, node, __pa(ptr));
}
return ptr;
#else
- return __alloc_bootmem(size, align, goal);
+ return memblock_alloc_from(size, align, goal);
#endif
}
static void __init pcpu_free_bootmem(void *ptr, size_t size)
{
- free_bootmem(__pa(ptr), size);
+ memblock_free(__pa(ptr), size);
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
@@ -1627,7 +1627,7 @@ static void __init pcpu_populate_pte(unsigned long addr)
if (pgd_none(*pgd)) {
pud_t *new;
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pgd_populate(&init_mm, pgd, new);
}
@@ -1635,7 +1635,7 @@ static void __init pcpu_populate_pte(unsigned long addr)
if (pud_none(*pud)) {
pmd_t *new;
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, new);
}
@@ -1643,7 +1643,7 @@ static void __init pcpu_populate_pte(unsigned long addr)
if (!pmd_present(*pmd)) {
pte_t *new;
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, new);
}
}
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index bb68c805b891..ff9389a1c9f3 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -47,9 +47,9 @@ sys_call_table32:
.word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown
.word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
-/*140*/ .word sys_sendfile64, sys_nis_syscall, compat_sys_futex, sys_gettid, compat_sys_getrlimit
+/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit
.word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
+/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
.word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
.word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 92634d4e440c..d900952bfc5f 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -22,7 +22,6 @@
#include <linux/initrd.h>
#include <linux/init.h>
#include <linux/highmem.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/poison.h>
@@ -265,7 +264,7 @@ void __init mem_init(void)
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;
sparc_valid_addr_bitmap = (unsigned long *)
- __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
+ memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL);
if (sparc_valid_addr_bitmap == NULL) {
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
@@ -277,7 +276,7 @@ void __init mem_init(void)
max_mapnr = last_valid_pfn - pfn_base;
high_memory = __va(max_low_pfn << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 39822f611c01..3c8aac21f426 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -11,7 +11,7 @@
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/initrd.h>
@@ -25,7 +25,6 @@
#include <linux/sort.h>
#include <linux/ioport.h>
#include <linux/percpu.h>
-#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/gfp.h>
@@ -1092,7 +1091,8 @@ static void __init allocate_node_data(int nid)
#ifdef CONFIG_NEED_MULTIPLE_NODES
unsigned long paddr;
- paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
+ paddr = memblock_phys_alloc_try_nid(sizeof(struct pglist_data),
+ SMP_CACHE_BYTES, nid);
if (!paddr) {
prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
prom_halt();
@@ -1266,8 +1266,8 @@ static int __init grab_mlgroups(struct mdesc_handle *md)
if (!count)
return -ENOENT;
- paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
- SMP_CACHE_BYTES);
+ paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
+ SMP_CACHE_BYTES);
if (!paddr)
return -ENOMEM;
@@ -1307,8 +1307,8 @@ static int __init grab_mblocks(struct mdesc_handle *md)
if (!count)
return -ENOENT;
- paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
- SMP_CACHE_BYTES);
+ paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
+ SMP_CACHE_BYTES);
if (!paddr)
return -ENOMEM;
@@ -1810,7 +1810,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
if (pgd_none(*pgd)) {
pud_t *new;
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
+ PAGE_SIZE);
alloc_bytes += PAGE_SIZE;
pgd_populate(&init_mm, pgd, new);
}
@@ -1822,7 +1823,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
vstart = kernel_map_hugepud(vstart, vend, pud);
continue;
}
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
+ PAGE_SIZE);
alloc_bytes += PAGE_SIZE;
pud_populate(&init_mm, pud, new);
}
@@ -1835,7 +1837,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
vstart = kernel_map_hugepmd(vstart, vend, pmd);
continue;
}
- new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
+ PAGE_SIZE);
alloc_bytes += PAGE_SIZE;
pmd_populate_kernel(&init_mm, pmd, new);
}
@@ -2541,12 +2544,12 @@ void __init mem_init(void)
{
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
/*
* Must be done after boot memory is put on freelist, because here we
* might set fields in deferred struct pages that have not yet been
- * initialized, and free_all_bootmem() initializes all the reserved
+ * initialized, and memblock_free_all() initializes all the reserved
* deferred pages for us.
*/
register_page_bootmem_info();
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index be9cb0065179..a6142c5abf61 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -11,7 +11,7 @@
#include <linux/seq_file.h>
#include <linux/spinlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/kdebug.h>
@@ -303,13 +303,13 @@ static void __init srmmu_nocache_init(void)
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
- srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
- SRMMU_NOCACHE_ALIGN_MAX, 0UL);
+ srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size,
+ SRMMU_NOCACHE_ALIGN_MAX, 0UL);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap =
- __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
- SMP_CACHE_BYTES, 0UL);
+ memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+ SMP_CACHE_BYTES, 0UL);
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
@@ -467,7 +467,7 @@ static void __init sparc_context_init(int numctx)
unsigned long size;
size = numctx * sizeof(struct ctx_list);
- ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+ ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
for (ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 10c15b8853ae..6b9938919f0b 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -12,8 +12,6 @@ config UML
select HAVE_UID16
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_DEBUG_KMEMLEAK
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 8d80b27502e6..7e524efed584 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -261,7 +261,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
if (err == 0) {
spin_unlock(&line->lock);
return IRQ_NONE;
- } else if (err < 0) {
+ } else if ((err < 0) && (err != -EAGAIN)) {
line->head = line->buffer;
line->tail = line->buffer;
}
@@ -284,7 +284,7 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
if (err)
return err;
if (output)
- err = um_request_irq(driver->write_irq, fd, IRQ_NONE,
+ err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
line_write_interrupt, IRQF_SHARED,
driver->write_irq_name, data);
return err;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 3ef1b48e064a..624cb47cc9cd 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -6,7 +6,7 @@
* Licensed under the GPL.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/inetdevice.h>
@@ -650,7 +650,7 @@ static int __init eth_setup(char *str)
return 1;
}
- new = alloc_bootmem(sizeof(*new));
+ new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
INIT_LIST_HEAD(&new->list);
new->index = n;
diff --git a/arch/um/drivers/port_user.c b/arch/um/drivers/port_user.c
index 9a8e1b64c22e..5f56d11b886f 100644
--- a/arch/um/drivers/port_user.c
+++ b/arch/um/drivers/port_user.c
@@ -168,7 +168,7 @@ int port_connection(int fd, int *socket, int *pid_out)
{
int new, err;
char *argv[] = { "/usr/sbin/in.telnetd", "-L",
- "/usr/lib/uml/port-helper", NULL };
+ OS_LIB_PATH "/uml/port-helper", NULL };
struct port_pre_exec_data data;
new = accept(fd, NULL, 0);
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 50ee3bb5a63a..046fa9ea0ccc 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -9,7 +9,7 @@
*/
#include <linux/version.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/inetdevice.h>
@@ -1118,16 +1118,11 @@ static int vector_net_close(struct net_device *dev)
os_close_file(vp->fds->tx_fd);
vp->fds->tx_fd = -1;
}
- if (vp->bpf != NULL)
- kfree(vp->bpf);
- if (vp->fds->remote_addr != NULL)
- kfree(vp->fds->remote_addr);
- if (vp->transport_data != NULL)
- kfree(vp->transport_data);
- if (vp->header_rxbuffer != NULL)
- kfree(vp->header_rxbuffer);
- if (vp->header_txbuffer != NULL)
- kfree(vp->header_txbuffer);
+ kfree(vp->bpf);
+ kfree(vp->fds->remote_addr);
+ kfree(vp->transport_data);
+ kfree(vp->header_rxbuffer);
+ kfree(vp->header_txbuffer);
if (vp->rx_queue != NULL)
destroy_queue(vp->rx_queue);
if (vp->tx_queue != NULL)
@@ -1580,7 +1575,7 @@ static int __init vector_setup(char *str)
str, error);
return 1;
}
- new = alloc_bootmem(sizeof(*new));
+ new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
INIT_LIST_HEAD(&new->list);
new->unit = n;
new->arguments = str;
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
index 4d6a78e31089..3d8cdbdb4e66 100644
--- a/arch/um/drivers/vector_user.c
+++ b/arch/um/drivers/vector_user.c
@@ -267,8 +267,7 @@ cleanup:
os_close_file(rxfd);
if (txfd >= 0)
os_close_file(txfd);
- if (result != NULL)
- kfree(result);
+ kfree(result);
return NULL;
}
@@ -434,8 +433,7 @@ cleanup:
if (fd >= 0)
os_close_file(fd);
if (result != NULL) {
- if (result->remote_addr != NULL)
- kfree(result->remote_addr);
+ kfree(result->remote_addr);
kfree(result);
}
return NULL;
diff --git a/arch/um/include/shared/aio.h b/arch/um/include/shared/aio.h
deleted file mode 100644
index 423bae9153f8..000000000000
--- a/arch/um/include/shared/aio.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2004 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef AIO_H__
-#define AIO_H__
-
-enum aio_type { AIO_READ, AIO_WRITE, AIO_MMAP };
-
-struct aio_thread_reply {
- void *data;
- int err;
-};
-
-struct aio_context {
- int reply_fd;
- struct aio_context *next;
-};
-
-#define INIT_AIO_CONTEXT { .reply_fd = -1, \
- .next = NULL }
-
-extern int submit_aio(enum aio_type type, int fd, char *buf, int len,
- unsigned long long offset, int reply_fd,
- struct aio_context *aio);
-
-#endif
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 6f6e7896e53f..ce169ea87e61 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -4,7 +4,7 @@
*/
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/initrd.h>
#include <asm/types.h>
#include <init.h>
@@ -36,7 +36,7 @@ int __init read_initrd(void)
return 0;
}
- area = alloc_bootmem(size);
+ area = memblock_alloc(size, SMP_CACHE_BYTES);
if (load_initrd(initrd, area, size) == -1)
return 0;
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 6b7f3827d6e4..8360fa3f676d 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -244,8 +244,7 @@ static void garbage_collect_irq_entries(void)
to_free = NULL;
}
walk = walk->next;
- if (to_free != NULL)
- kfree(to_free);
+ kfree(to_free);
}
}
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 3c0e470ea646..1067469ba2ea 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -5,7 +5,7 @@
#include <linux/stddef.h>
#include <linux/module.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/swap.h>
@@ -46,11 +46,11 @@ void __init mem_init(void)
*/
brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
- free_bootmem(__pa(brk_end), uml_reserved - brk_end);
+ memblock_free(__pa(brk_end), uml_reserved - brk_end);
uml_reserved = brk_end;
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
max_low_pfn = totalram_pages;
max_pfn = totalram_pages;
mem_init_print_info(NULL);
@@ -64,7 +64,8 @@ void __init mem_init(void)
static void __init one_page_table_init(pmd_t *pmd)
{
if (pmd_none(*pmd)) {
- pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
if (pte != pte_offset_kernel(pmd, 0))
@@ -75,7 +76,7 @@ static void __init one_page_table_init(pmd_t *pmd)
static void __init one_md_table_init(pud_t *pud)
{
#ifdef CONFIG_3_LEVEL_PGTABLES
- pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
if (pmd_table != pmd_offset(pud, 0))
BUG();
@@ -124,7 +125,7 @@ static void __init fixaddr_user_init( void)
return;
fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
- v = (unsigned long) alloc_bootmem_low_pages(size);
+ v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
memcpy((void *) v , (void *) FIXADDR_USER_START, size);
p = __pa(v);
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
@@ -143,7 +144,8 @@ void __init paging_init(void)
unsigned long zones_size[MAX_NR_ZONES], vaddr;
int i;
- empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
+ empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(zones_size); i++)
zones_size[i] = 0;
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 296a91a04598..5bf56af4d5b9 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -4,7 +4,6 @@
*/
#include <linux/module.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/pfn.h>
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index cced82946042..0e8b6158f224 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -19,7 +19,7 @@
#include <skas.h>
/*
- * Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by
+ * Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by
* segv().
*/
int handle_page_fault(unsigned long address, unsigned long ip,
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index ada473bf6f46..455b500afe97 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -6,18 +6,14 @@
# Don't instrument UML-specific code
KCOV_INSTRUMENT := n
-obj-y = aio.o execvp.o file.o helper.o irq.o main.o mem.o process.o \
+obj-y = execvp.o file.o helper.o irq.o main.o mem.o process.o \
registers.o sigio.o signal.o start_up.o time.o tty.o \
umid.o user_syms.o util.o drivers/ skas/
obj-$(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA) += elf_aux.o
-USER_OBJS := $(user-objs-y) aio.o elf_aux.o execvp.o file.o helper.o irq.o \
+USER_OBJS := $(user-objs-y) elf_aux.o execvp.o file.o helper.o irq.o \
main.o mem.o process.o registers.o sigio.o signal.o start_up.o time.o \
tty.o umid.o util.o
-HAVE_AIO_ABI := $(shell [ -r /usr/include/linux/aio_abi.h ] && \
- echo -DHAVE_AIO_ABI )
-CFLAGS_aio.o += $(HAVE_AIO_ABI)
-
include arch/um/scripts/Makefile.rules
diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c
deleted file mode 100644
index 014eb35fd13b..000000000000
--- a/arch/um/os-Linux/aio.c
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <unistd.h>
-#include <sched.h>
-#include <signal.h>
-#include <errno.h>
-#include <sys/time.h>
-#include <asm/unistd.h>
-#include <aio.h>
-#include <init.h>
-#include <kern_util.h>
-#include <os.h>
-
-struct aio_thread_req {
- enum aio_type type;
- int io_fd;
- unsigned long long offset;
- char *buf;
- int len;
- struct aio_context *aio;
-};
-
-#if defined(HAVE_AIO_ABI)
-#include <linux/aio_abi.h>
-
-/*
- * If we have the headers, we are going to build with AIO enabled.
- * If we don't have aio in libc, we define the necessary stubs here.
- */
-
-#if !defined(HAVE_AIO_LIBC)
-
-static long io_setup(int n, aio_context_t *ctxp)
-{
- return syscall(__NR_io_setup, n, ctxp);
-}
-
-static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp)
-{
- return syscall(__NR_io_submit, ctx, nr, iocbpp);
-}
-
-static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
- struct io_event *events, struct timespec *timeout)
-{
- return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout);
-}
-
-#endif
-
-/*
- * The AIO_MMAP cases force the mmapped page into memory here
- * rather than in whatever place first touches the data. I used
- * to do this by touching the page, but that's delicate because
- * gcc is prone to optimizing that away. So, what's done here
- * is we read from the descriptor from which the page was
- * mapped. The caller is required to pass an offset which is
- * inside the page that was mapped. Thus, when the read
- * returns, we know that the page is in the page cache, and
- * that it now backs the mmapped area.
- */
-
-static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf,
- int len, unsigned long long offset, struct aio_context *aio)
-{
- struct iocb *iocbp = & ((struct iocb) {
- .aio_data = (unsigned long) aio,
- .aio_fildes = fd,
- .aio_buf = (unsigned long) buf,
- .aio_nbytes = len,
- .aio_offset = offset
- });
- char c;
-
- switch (type) {
- case AIO_READ:
- iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
- break;
- case AIO_WRITE:
- iocbp->aio_lio_opcode = IOCB_CMD_PWRITE;
- break;
- case AIO_MMAP:
- iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
- iocbp->aio_buf = (unsigned long) &c;
- iocbp->aio_nbytes = sizeof(c);
- break;
- default:
- printk(UM_KERN_ERR "Bogus op in do_aio - %d\n", type);
- return -EINVAL;
- }
-
- return (io_submit(ctx, 1, &iocbp) > 0) ? 0 : -errno;
-}
-
-/* Initialized in an initcall and unchanged thereafter */
-static aio_context_t ctx = 0;
-
-static int aio_thread(void *arg)
-{
- struct aio_thread_reply reply;
- struct io_event event;
- int err, n, reply_fd;
-
- os_fix_helper_signals();
- while (1) {
- n = io_getevents(ctx, 1, 1, &event, NULL);
- if (n < 0) {
- if (errno == EINTR)
- continue;
- printk(UM_KERN_ERR "aio_thread - io_getevents failed, "
- "errno = %d\n", errno);
- }
- else {
- reply = ((struct aio_thread_reply)
- { .data = (void *) (long) event.data,
- .err = event.res });
- reply_fd = ((struct aio_context *) reply.data)->reply_fd;
- err = write(reply_fd, &reply, sizeof(reply));
- if (err != sizeof(reply))
- printk(UM_KERN_ERR "aio_thread - write failed, "
- "fd = %d, err = %d\n", reply_fd, errno);
- }
- }
- return 0;
-}
-
-#endif
-
-static int do_not_aio(struct aio_thread_req *req)
-{
- char c;
- unsigned long long actual;
- int n;
-
- actual = lseek64(req->io_fd, req->offset, SEEK_SET);
- if (actual != req->offset)
- return -errno;
-
- switch (req->type) {
- case AIO_READ:
- n = read(req->io_fd, req->buf, req->len);
- break;
- case AIO_WRITE:
- n = write(req->io_fd, req->buf, req->len);
- break;
- case AIO_MMAP:
- n = read(req->io_fd, &c, sizeof(c));
- break;
- default:
- printk(UM_KERN_ERR "do_not_aio - bad request type : %d\n",
- req->type);
- return -EINVAL;
- }
-
- if (n < 0)
- return -errno;
- return 0;
-}
-
-/* These are initialized in initcalls and not changed */
-static int aio_req_fd_r = -1;
-static int aio_req_fd_w = -1;
-static int aio_pid = -1;
-static unsigned long aio_stack;
-
-static int not_aio_thread(void *arg)
-{
- struct aio_thread_req req;
- struct aio_thread_reply reply;
- int err;
-
- os_fix_helper_signals();
- while (1) {
- err = read(aio_req_fd_r, &req, sizeof(req));
- if (err != sizeof(req)) {
- if (err < 0)
- printk(UM_KERN_ERR "not_aio_thread - "
- "read failed, fd = %d, err = %d\n",
- aio_req_fd_r,
- errno);
- else {
- printk(UM_KERN_ERR "not_aio_thread - short "
- "read, fd = %d, length = %d\n",
- aio_req_fd_r, err);
- }
- continue;
- }
- err = do_not_aio(&req);
- reply = ((struct aio_thread_reply) { .data = req.aio,
- .err = err });
- err = write(req.aio->reply_fd, &reply, sizeof(reply));
- if (err != sizeof(reply))
- printk(UM_KERN_ERR "not_aio_thread - write failed, "
- "fd = %d, err = %d\n", req.aio->reply_fd, errno);
- }
-
- return 0;
-}
-
-static int init_aio_24(void)
-{
- int fds[2], err;
-
- err = os_pipe(fds, 1, 1);
- if (err)
- goto out;
-
- aio_req_fd_w = fds[0];
- aio_req_fd_r = fds[1];
-
- err = os_set_fd_block(aio_req_fd_w, 0);
- if (err)
- goto out_close_pipe;
-
- err = run_helper_thread(not_aio_thread, NULL,
- CLONE_FILES | CLONE_VM, &aio_stack);
- if (err < 0)
- goto out_close_pipe;
-
- aio_pid = err;
- goto out;
-
-out_close_pipe:
- close(fds[0]);
- close(fds[1]);
- aio_req_fd_w = -1;
- aio_req_fd_r = -1;
-out:
-#ifndef HAVE_AIO_ABI
- printk(UM_KERN_INFO "/usr/include/linux/aio_abi.h not present during "
- "build\n");
-#endif
- printk(UM_KERN_INFO "2.6 host AIO support not used - falling back to "
- "I/O thread\n");
- return 0;
-}
-
-#ifdef HAVE_AIO_ABI
-#define DEFAULT_24_AIO 0
-static int init_aio_26(void)
-{
- int err;
-
- if (io_setup(256, &ctx)) {
- err = -errno;
- printk(UM_KERN_ERR "aio_thread failed to initialize context, "
- "err = %d\n", errno);
- return err;
- }
-
- err = run_helper_thread(aio_thread, NULL,
- CLONE_FILES | CLONE_VM, &aio_stack);
- if (err < 0)
- return err;
-
- aio_pid = err;
-
- printk(UM_KERN_INFO "Using 2.6 host AIO\n");
- return 0;
-}
-
-static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
- unsigned long long offset, struct aio_context *aio)
-{
- struct aio_thread_reply reply;
- int err;
-
- err = do_aio(ctx, type, io_fd, buf, len, offset, aio);
- if (err) {
- reply = ((struct aio_thread_reply) { .data = aio,
- .err = err });
- err = write(aio->reply_fd, &reply, sizeof(reply));
- if (err != sizeof(reply)) {
- err = -errno;
- printk(UM_KERN_ERR "submit_aio_26 - write failed, "
- "fd = %d, err = %d\n", aio->reply_fd, -err);
- }
- else err = 0;
- }
-
- return err;
-}
-
-#else
-#define DEFAULT_24_AIO 1
-static int init_aio_26(void)
-{
- return -ENOSYS;
-}
-
-static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
- unsigned long long offset, struct aio_context *aio)
-{
- return -ENOSYS;
-}
-#endif
-
-/* Initialized in an initcall and unchanged thereafter */
-static int aio_24 = DEFAULT_24_AIO;
-
-static int __init set_aio_24(char *name, int *add)
-{
- aio_24 = 1;
- return 0;
-}
-
-__uml_setup("aio=2.4", set_aio_24,
-"aio=2.4\n"
-" This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n"
-" available. 2.4 AIO is a single thread that handles one request at a\n"
-" time, synchronously. 2.6 AIO is a thread which uses the 2.6 AIO \n"
-" interface to handle an arbitrary number of pending requests. 2.6 AIO \n"
-" is not available in tt mode, on 2.4 hosts, or when UML is built with\n"
-" /usr/include/linux/aio_abi.h not available. Many distributions don't\n"
-" include aio_abi.h, so you will need to copy it from a kernel tree to\n"
-" your /usr/include/linux in order to build an AIO-capable UML\n\n"
-);
-
-static int init_aio(void)
-{
- int err;
-
- if (!aio_24) {
- err = init_aio_26();
- if (err && (errno == ENOSYS)) {
- printk(UM_KERN_INFO "2.6 AIO not supported on the "
- "host - reverting to 2.4 AIO\n");
- aio_24 = 1;
- }
- else return err;
- }
-
- if (aio_24)
- return init_aio_24();
-
- return 0;
-}
-
-/*
- * The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
- * needs to be called when the kernel is running because it calls run_helper,
- * which needs get_free_page. exit_aio is a __uml_exitcall because the generic
- * kernel does not run __exitcalls on shutdown, and can't because many of them
- * break when called outside of module unloading.
- */
-__initcall(init_aio);
-
-static void exit_aio(void)
-{
- if (aio_pid != -1) {
- os_kill_process(aio_pid, 1);
- free_stack(aio_stack, 0);
- }
-}
-
-__uml_exitcall(exit_aio);
-
-static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len,
- unsigned long long offset, struct aio_context *aio)
-{
- struct aio_thread_req req = { .type = type,
- .io_fd = io_fd,
- .offset = offset,
- .buf = buf,
- .len = len,
- .aio = aio,
- };
- int err;
-
- err = write(aio_req_fd_w, &req, sizeof(req));
- if (err == sizeof(req))
- err = 0;
- else err = -errno;
-
- return err;
-}
-
-int submit_aio(enum aio_type type, int io_fd, char *buf, int len,
- unsigned long long offset, int reply_fd,
- struct aio_context *aio)
-{
- aio->reply_fd = reply_fd;
- if (aio_24)
- return submit_aio_24(type, io_fd, buf, len, offset, aio);
- else
- return submit_aio_26(type, io_fd, buf, len, offset, aio);
-}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index c94c3bd70ccd..df4a985716eb 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -610,6 +610,11 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf)
fatal_sigsegv();
}
longjmp(*switch_buf, 1);
+
+ /* unreachable */
+ printk(UM_KERN_ERR "impossible long jump!");
+ fatal_sigsegv();
+ return 0;
}
void initial_thread_cb_skas(void (*proc)(void *), void *arg)
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 0c5111b206bd..a4c05159dca5 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -5,8 +5,6 @@ config UNICORE32
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select DMA_DIRECT_OPS
- select HAVE_MEMBLOCK
- select NO_BOOTMEM
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h
index 4eaa42167667..b772ed1c0f25 100644
--- a/arch/unicore32/include/asm/processor.h
+++ b/arch/unicore32/include/asm/processor.h
@@ -13,12 +13,6 @@
#ifndef __UNICORE_PROCESSOR_H__
#define __UNICORE_PROCESSOR_H__
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
-
#ifdef __KERNEL__
#include <asm/ptrace.h>
diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
index 9969ec374abb..29b71c68eb7c 100644
--- a/arch/unicore32/kernel/hibernate.c
+++ b/arch/unicore32/kernel/hibernate.c
@@ -13,7 +13,7 @@
#include <linux/gfp.h>
#include <linux/suspend.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index c2bffa5614a4..4b0cb68c355a 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -17,7 +17,7 @@
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
#include <linux/init.h>
@@ -27,7 +27,6 @@
#include <linux/smp.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
-#include <linux/memblock.h>
#include <linux/elf.h>
#include <linux/io.h>
@@ -207,7 +206,7 @@ request_standard_resources(struct meminfo *mi)
if (mi->bank[i].size == 0)
continue;
- res = alloc_bootmem_low(sizeof(*res));
+ res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM";
res->start = mi->bank[i].start;
res->end = mi->bank[i].start + mi->bank[i].size - 1;
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 8f8699e62bd5..cf4eb9481fd6 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -11,13 +11,12 @@
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
-#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
@@ -238,7 +237,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
* free the section of the memmap array.
*/
if (pg < pgend)
- free_bootmem(pg, pgend - pg);
+ memblock_free(pg, pgend - pg);
}
/*
@@ -286,7 +285,7 @@ void __init mem_init(void)
free_unused_memmap(&meminfo);
/* this will put all unused low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index 0c94b7b4514d..040a8c279761 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -17,7 +17,6 @@
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
-#include <linux/bootmem.h>
#include <linux/io.h>
#include <asm/cputype.h>
@@ -144,7 +143,7 @@ static void __init build_mem_type_table(void)
static void __init *early_alloc(unsigned long sz)
{
- void *ptr = __va(memblock_alloc(sz, sz));
+ void *ptr = __va(memblock_phys_alloc(sz, sz));
memset(ptr, 0, sz);
return ptr;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cbd5f28ea8e2..ba7e3464ee92 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -129,6 +129,7 @@ config X86
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
+ select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
@@ -169,7 +170,6 @@ config X86
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH if X86_64
- select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MIXED_BREAKPOINTS_REGS
select HAVE_MOD_ARCH_SPECIFIC
@@ -186,6 +186,7 @@ config X86
select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
+ select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
select HAVE_RSEQ
@@ -833,9 +834,6 @@ config JAILHOUSE_GUEST
endif #HYPERVISOR_GUEST
-config NO_BOOTMEM
- def_bool y
-
source "arch/x86/Kconfig.cpu"
config HPET_TIMER
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 708b46a54578..25e5a6bda8c3 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -329,8 +329,22 @@ For 32-bit we have the following conventions - kernel is built with
#endif
+.macro STACKLEAK_ERASE_NOCLOBBER
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ PUSH_AND_CLEAR_REGS
+ call stackleak_erase
+ POP_REGS
+#endif
+.endm
+
#endif /* CONFIG_X86_64 */
+.macro STACKLEAK_ERASE
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ call stackleak_erase
+#endif
+.endm
+
/*
* This does 'call enter_from_user_mode' unless we can avoid it based on
* kernel config or using the static jump infrastructure.
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 687e47f8a796..d309f30cf7af 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -46,6 +46,8 @@
#include <asm/frame.h>
#include <asm/nospec-branch.h>
+#include "calling.h"
+
.section .entry.text, "ax"
/*
@@ -712,6 +714,7 @@ ENTRY(ret_from_fork)
/* When we fork, we trace the syscall return in the child, too. */
movl %esp, %eax
call syscall_return_slowpath
+ STACKLEAK_ERASE
jmp restore_all
/* kernel thread */
@@ -886,6 +889,8 @@ ENTRY(entry_SYSENTER_32)
ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
"jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+ STACKLEAK_ERASE
+
/* Opportunistic SYSEXIT */
TRACE_IRQS_ON /* User mode traces as IRQs on. */
@@ -997,6 +1002,8 @@ ENTRY(entry_INT80_32)
call do_int80_syscall_32
.Lsyscall_32_done:
+ STACKLEAK_ERASE
+
restore_all:
TRACE_IRQS_IRET
SWITCH_TO_ENTRY_STACK
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 4d7a2d9d44cf..ce25d84023c0 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -266,6 +266,8 @@ syscall_return_via_sysret:
* We are on the trampoline stack. All regs except RDI are live.
* We can do future final exit work right here.
*/
+ STACKLEAK_ERASE_NOCLOBBER
+
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
popq %rdi
@@ -625,6 +627,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
* We are on the trampoline stack. All regs except RDI are live.
* We can do future final exit work right here.
*/
+ STACKLEAK_ERASE_NOCLOBBER
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 7d0df78db727..8eaf8952c408 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -261,6 +261,11 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
/* Opportunistic SYSRET */
sysret32_from_system_call:
+ /*
+ * We are not going to return to userspace from the trampoline
+ * stack. So let's erase the thread stack right now.
+ */
+ STACKLEAK_ERASE
TRACE_IRQS_ON /* User mode traces as IRQs on. */
movq RBX(%rsp), %rbx /* pt_regs->rbx */
movq RBP(%rsp), %rbp /* pt_regs->rbp */
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index 3de0489deade..5270ff39b9af 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -105,8 +105,10 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
* the PMIC bus while another driver is also accessing the PMIC bus various bad
* things happen.
*
- * To avoid these problems this function must be called before accessing the
- * P-Unit or the PMIC, be it through iosf_mbi* functions or through other means.
+ * Call this function before sending requests to the P-Unit which may make it
+ * access the PMIC, be it through iosf_mbi* functions or through other means.
+ * This function will block all kernel access to the PMIC I2C bus, so that the
+ * P-Unit can safely access the PMIC over the shared I2C bus.
*
* Note on these systems the i2c-bus driver will request a sempahore from the
* P-Unit for exclusive access to the PMIC bus when i2c drivers are accessing
@@ -123,6 +125,31 @@ void iosf_mbi_punit_acquire(void);
void iosf_mbi_punit_release(void);
/**
+ * iosf_mbi_block_punit_i2c_access() - Block P-Unit accesses to the PMIC bus
+ *
+ * Call this function to block P-Unit access to the PMIC I2C bus, so that the
+ * kernel can safely access the PMIC over the shared I2C bus.
+ *
+ * This function acquires the P-Unit bus semaphore and notifies
+ * pmic_bus_access_notifier listeners that they may no longer access the
+ * P-Unit in a way which may cause it to access the shared I2C bus.
+ *
+ * Note this function may be called multiple times and the bus will not
+ * be released until iosf_mbi_unblock_punit_i2c_access() has been called the
+ * same amount of times.
+ *
+ * Return: Nonzero on error
+ */
+int iosf_mbi_block_punit_i2c_access(void);
+
+/*
+ * iosf_mbi_unblock_punit_i2c_access() - Release PMIC I2C bus block
+ *
+ * Release i2c access block gotten through iosf_mbi_block_punit_i2c_access().
+ */
+void iosf_mbi_unblock_punit_i2c_access(void);
+
+/**
* iosf_mbi_register_pmic_bus_access_notifier - Register PMIC bus notifier
*
* This function can be used by drivers which may need to acquire P-Unit
@@ -159,14 +186,6 @@ int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
struct notifier_block *nb);
/**
- * iosf_mbi_call_pmic_bus_access_notifier_chain - Call PMIC bus notifier chain
- *
- * @val: action to pass into listener's notifier_call function
- * @v: data pointer to pass into listener's notifier_call function
- */
-int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v);
-
-/**
* iosf_mbi_assert_punit_acquired - Assert that the P-Unit has been acquired.
*/
void iosf_mbi_assert_punit_acquired(void);
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 5125fca472bb..003f2daa3b0f 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -21,6 +21,7 @@
#ifndef __ASSEMBLY__
#include <linux/string.h>
+#include <linux/kernel.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -132,7 +133,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
#endif
- newregs->ip = (unsigned long)current_text_addr();
+ newregs->ip = _THIS_IP_;
}
}
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 617805981cce..071b2a6fff85 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -42,18 +42,6 @@ struct vm86;
#define NET_IP_ALIGN 0
#define HBP_NUM 4
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-static inline void *current_text_addr(void)
-{
- void *pc;
-
- asm volatile("mov $1f, %0; 1:":"=r" (pc));
-
- return pc;
-}
/*
* These alignment constraints are for performance in the vSMP case,
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 143c99499531..8a7fc0cca2d1 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -286,6 +286,44 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
return 0;
}
+/**
+ * regs_get_kernel_argument() - get Nth function argument in kernel
+ * @regs: pt_regs of that context
+ * @n: function argument number (start from 0)
+ *
+ * regs_get_argument() returns @n th argument of the function call.
+ * Note that this chooses most probably assignment, in some case
+ * it can be incorrect.
+ * This is expected to be called from kprobes or ftrace with regs
+ * where the top of stack is the return address.
+ */
+static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
+ unsigned int n)
+{
+ static const unsigned int argument_offs[] = {
+#ifdef __i386__
+ offsetof(struct pt_regs, ax),
+ offsetof(struct pt_regs, cx),
+ offsetof(struct pt_regs, dx),
+#define NR_REG_ARGUMENTS 3
+#else
+ offsetof(struct pt_regs, di),
+ offsetof(struct pt_regs, si),
+ offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, cx),
+ offsetof(struct pt_regs, r8),
+ offsetof(struct pt_regs, r9),
+#define NR_REG_ARGUMENTS 6
+#endif
+ };
+
+ if (n >= NR_REG_ARGUMENTS) {
+ n -= NR_REG_ARGUMENTS - 1;
+ return regs_get_kernel_stack_nth(regs, n);
+ } else
+ return regs_get_register(regs, argument_offs[n]);
+}
+
#define arch_has_single_step() (1)
#ifdef CONFIG_X86_DEBUGCTLMSR
#define arch_has_block_step() (1)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e8fea7ffa306..92c76bf97ad8 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -32,7 +32,7 @@
#include <linux/dmi.h>
#include <linux/irq.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/efi-bgrt.h>
@@ -933,7 +933,8 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
* the resource tree during the lateinit timeframe.
*/
#define HPET_RESOURCE_NAME_SIZE 9
- hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
+ hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
+ SMP_CACHE_BYTES);
hpet_res->name = (void *)&hpet_res[1];
hpet_res->flags = IORESOURCE_MEM;
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index f1915b744052..ca13851f0570 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -7,7 +7,6 @@
*/
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/dmi.h>
#include <linux/cpumask.h>
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ab731ab09f06..32b2b7a41ef5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -20,7 +20,7 @@
#include <linux/acpi_pmtmr.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ftrace.h>
#include <linux/ioport.h>
#include <linux/export.h>
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ff0d14cd9e82..2953bbf05c08 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -47,7 +47,7 @@
#include <linux/kthread.h>
#include <linux/jiffies.h> /* time_after() */
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/irqdomain.h>
#include <asm/io.h>
@@ -2578,7 +2578,7 @@ static struct resource * __init ioapic_setup_resources(void)
n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
n *= nr_ioapics;
- mem = alloc_bootmem(n);
+ mem = memblock_alloc(n, SMP_CACHE_BYTES);
res = (void *)mem;
mem += sizeof(struct resource) * nr_ioapics;
@@ -2621,7 +2621,8 @@ void __init io_apic_init_mappings(void)
#ifdef CONFIG_X86_32
fake_ioapic_page:
#endif
- ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
+ ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
+ PAGE_SIZE);
ioapic_phys = __pa(ioapic_phys);
}
set_fixmap_nocache(idx, ioapic_phys);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 660d0b22e962..cbbd57ae06ee 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,7 +1,7 @@
/* cpu_feature_enabled() cannot be used this early */
#define USE_EARLY_PGTABLE_L5
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d1f25c831447..50895c2f937d 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -9,11 +9,10 @@
* allocation code routines via a platform independent interface (memblock, etc.).
*/
#include <linux/crash_dump.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/firmware-map.h>
-#include <linux/memblock.h>
#include <linux/sort.h>
#include <asm/e820/api.h>
@@ -1094,7 +1093,8 @@ void __init e820__reserve_resources(void)
struct resource *res;
u64 end;
- res = alloc_bootmem(sizeof(*res) * e820_table->nr_entries);
+ res = memblock_alloc(sizeof(*res) * e820_table->nr_entries,
+ SMP_CACHE_BYTES);
e820_res = res;
for (i = 0; i < e820_table->nr_entries; i++) {
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index f1c5eb99d445..3482460d984d 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -11,7 +11,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 7ba73fe0d917..f4562fcec681 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -3,7 +3,7 @@
#include <linux/dma-debug.h>
#include <linux/dmar.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/pci.h>
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 71c0b01d93b1..bd08b9e1c9e2 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -5,7 +5,7 @@
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/swiotlb.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-direct.h>
#include <linux/mem_encrypt.h>
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 637982efecd8..9b158b4716d2 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -20,7 +20,7 @@
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/gfp.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/nmi.h>
#include <asm/fixmap.h>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 7005f89bf3b2..b74e7bfed6ab 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -30,7 +30,6 @@
#include <linux/sfi.h>
#include <linux/apm_bios.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/console.h>
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ea554f812ee1..e8796fcd7e5a 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/percpu.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
@@ -106,20 +106,22 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
void *ptr;
if (!node_online(node) || !NODE_DATA(node)) {
- ptr = __alloc_bootmem_nopanic(size, align, goal);
+ ptr = memblock_alloc_from_nopanic(size, align, goal);
pr_info("cpu %d has no node %d or node-local memory\n",
cpu, node);
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
cpu, size, __pa(ptr));
} else {
- ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
- size, align, goal);
+ ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ node);
+
pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
cpu, size, node, __pa(ptr));
}
return ptr;
#else
- return __alloc_bootmem_nopanic(size, align, goal);
+ return memblock_alloc_from_nopanic(size, align, goal);
#endif
}
@@ -133,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
static void __init pcpu_fc_free(void *ptr, size_t size)
{
- free_bootmem(__pa(ptr), size);
+ memblock_free(__pa(ptr), size);
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5369d7fac797..a9134d1910b9 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -49,7 +49,7 @@
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/percpu.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/nmi.h>
#include <linux/tboot.h>
diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c
index f386bad0984e..285aaa62d153 100644
--- a/arch/x86/kernel/tce_64.c
+++ b/arch/x86/kernel/tce_64.c
@@ -30,7 +30,7 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/tce.h>
#include <asm/calgary.h>
#include <asm/proto.h>
@@ -173,7 +173,7 @@ void * __init alloc_tce_table(void)
size = table_size_to_number_of_entries(specified_table_size);
size *= TCE_ENTRY_SIZE;
- return __alloc_bootmem_low(size, size, 0);
+ return memblock_alloc_low(size, size);
}
void __init free_tce_table(void *tbl)
@@ -186,5 +186,5 @@ void __init free_tce_table(void *tbl)
size = table_size_to_number_of_entries(specified_table_size);
size *= TCE_ENTRY_SIZE;
- free_bootmem(__pa(tbl), size);
+ memblock_free(__pa(tbl), size);
}
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 048c761d97b0..058b2f36b3a6 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -12,7 +12,6 @@
#include <linux/string.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b24eb4eb9984..71d4b9d4d43f 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -8,7 +8,7 @@
#include <linux/sched/task_stack.h> /* task_stack_*(), ... */
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/extable.h> /* search_exception_tables */
-#include <linux/bootmem.h> /* max_low_pfn */
+#include <linux/memblock.h> /* max_low_pfn */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 6d18b70ed5a9..0d4bdcb84da5 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,7 +1,7 @@
#include <linux/highmem.h>
#include <linux/export.h>
#include <linux/swap.h> /* for totalram_pages */
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
void *kmap(struct page *page)
{
@@ -111,7 +111,7 @@ void __init set_highmem_pages_init(void)
/*
* Explicitly reset zone->managed_pages because set_highmem_pages_init()
- * is invoked before free_all_bootmem()
+ * is invoked before memblock_free_all()
*/
reset_all_zones_managed_pages();
for_each_zone(zone) {
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index faca978ebf9d..ef99f3892e1f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -3,7 +3,6 @@
#include <linux/ioport.h>
#include <linux/swap.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h> /* for max_low_pfn */
#include <linux/swapfile.h>
#include <linux/swapops.h>
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 142c7d9f89cc..49ecf5ecf6d3 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -23,7 +23,6 @@
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/poison.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/memory_hotplug.h>
@@ -771,7 +770,7 @@ void __init mem_init(void)
#endif
/*
* With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
- * be done before free_all_bootmem(). Memblock use free low memory for
+ * be done before memblock_free_all(). Memblock use free low memory for
* temporary data (see find_range_array()) and for this purpose can use
* pages that was already passed to the buddy allocator, hence marked as
* not accessible in the page tables when compiled with
@@ -781,7 +780,7 @@ void __init mem_init(void)
set_highmem_pages_init();
/* this will put all low memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
after_bootmem = 1;
x86_init.hyper.init_after_bootmem();
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dd519f372169..5fab264948c2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -20,7 +20,6 @@
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
@@ -197,7 +196,7 @@ static __ref void *spp_getpage(void)
if (after_bootmem)
ptr = (void *) get_zeroed_page(GFP_ATOMIC);
else
- ptr = alloc_bootmem_pages(PAGE_SIZE);
+ ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
panic("set_pte_phys: cannot allocate page data %s\n",
@@ -1188,14 +1187,14 @@ void __init mem_init(void)
/* clear_bss() already clear the empty_zero_page */
/* this will put all memory onto the freelists */
- free_all_bootmem();
+ memblock_free_all();
after_bootmem = 1;
x86_init.hyper.init_after_bootmem();
/*
* Must be done after boot memory is put on freelist, because here we
* might set fields in deferred struct pages that have not yet been
- * initialized, and free_all_bootmem() initializes all the reserved
+ * initialized, and memblock_free_all() initializes all the reserved
* deferred pages for us.
*/
register_page_bootmem_info();
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 24e0920a9b25..5378d10f1d31 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -6,7 +6,7 @@
* (C) Copyright 1995 1996 Linus Torvalds
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index e3e77527f8df..04a9cf6b034f 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -5,10 +5,9 @@
/* cpu_feature_enabled() cannot be used this early */
#define USE_EARLY_PGTABLE_L5
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/kasan.h>
#include <linux/kdebug.h>
-#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
@@ -28,11 +27,11 @@ static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
static __init void *early_alloc(size_t size, int nid, bool panic)
{
if (panic)
- return memblock_virt_alloc_try_nid(size, size,
- __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ return memblock_alloc_try_nid(size, size,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
else
- return memblock_virt_alloc_try_nid_nopanic(size, size,
- __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ return memblock_alloc_try_nid_nopanic(size, size,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 61db77b0eda9..3f452ffed7e9 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
+#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index fa150855647c..1308f5408bf7 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -4,7 +4,6 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/ctype.h>
@@ -196,7 +195,7 @@ static void __init alloc_node_data(int nid)
* Allocate node data. Try node-local memory and then any node.
* Never allocate in DMA zone.
*/
- nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_phys_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
MEMBLOCK_ALLOC_ACCESSIBLE);
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index e8a4a09e20f1..f2bd3d61e16b 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -22,7 +22,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/init.h>
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 066f3511d5f1..59d80160fa5a 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -3,7 +3,7 @@
* Generic VM initialization for x86-64 NUMA setups.
* Copyright 2002,2003 Andi Kleen, SuSE Labs.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index b54d52a2d00a..a80fdd7fb40f 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -6,7 +6,6 @@
#include <linux/errno.h>
#include <linux/topology.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <asm/dma.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index a25588ad75ef..08f8f76a4852 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -5,7 +5,7 @@
* Clears the a test pte bit on random pages in the direct mapping,
* then reverts and compares page tables forwards and afterwards.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <linux/kernel.h>
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 62bb30b4bd2a..f799076e3d57 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -3,7 +3,7 @@
* Thanks to Ben LaHaise for precious feedback.
*/
#include <linux/highmem.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 3d0c83ef6aab..08013524fba1 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -8,7 +8,7 @@
*/
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/debugfs.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index 7f9acb68324c..bdc98150d4db 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/mmdebug.h>
#include <linux/export.h>
#include <linux/mm.h>
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index ed4ac215305d..8cd66152cdb0 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -32,7 +32,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/errno.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/pat.h>
#include <asm/e820/api.h>
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 9061babfbc83..7ae939e353cd 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -36,9 +36,8 @@
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
-#include <linux/slab.h>
#include <linux/memblock.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/time.h>
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index e8da7f492970..cf0347f61b21 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -23,7 +23,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/mc146818rtc.h>
#include <linux/efi.h>
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 669babcaf245..95e77a667ba5 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -8,7 +8,6 @@
#include <linux/efi.h>
#include <linux/slab.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -333,7 +332,7 @@ void __init efi_reserve_boot_services(void)
/*
* Because the following memblock_reserve() is paired
- * with free_bootmem_late() for this region in
+ * with memblock_free_late() for this region in
* efi_free_boot_services(), we must be extremely
* careful not to reserve, and subsequently free,
* critical regions of memory (like the kernel image) or
@@ -364,7 +363,7 @@ void __init efi_reserve_boot_services(void)
* doesn't make sense as far as the firmware is
* concerned, but it does provide us with a way to tag
* those regions that must not be paired with
- * free_bootmem_late().
+ * memblock_free_late().
*/
md->attribute |= EFI_MEMORY_RUNTIME;
}
@@ -414,7 +413,7 @@ void __init efi_free_boot_services(void)
size -= rm_size;
}
- free_bootmem_late(start, size);
+ memblock_free_late(start, size);
}
if (!num_entries)
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index 6f37a2137a79..2e569d10f2d0 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -18,24 +18,26 @@
* enumerate the device using PCI.
*/
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <linux/capability.h>
+#include <linux/pm_qos.h>
#include <asm/iosf_mbi.h>
-#define PCI_DEVICE_ID_BAYTRAIL 0x0F00
-#define PCI_DEVICE_ID_BRASWELL 0x2280
-#define PCI_DEVICE_ID_QUARK_X1000 0x0958
-#define PCI_DEVICE_ID_TANGIER 0x1170
+#define PCI_DEVICE_ID_INTEL_BAYTRAIL 0x0F00
+#define PCI_DEVICE_ID_INTEL_BRASWELL 0x2280
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000 0x0958
+#define PCI_DEVICE_ID_INTEL_TANGIER 0x1170
static struct pci_dev *mbi_pdev;
static DEFINE_SPINLOCK(iosf_mbi_lock);
-static DEFINE_MUTEX(iosf_mbi_punit_mutex);
-static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier);
+
+/**************** Generic iosf_mbi access helpers ****************/
static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
{
@@ -192,6 +194,30 @@ bool iosf_mbi_available(void)
}
EXPORT_SYMBOL(iosf_mbi_available);
+/*
+ **************** P-Unit/kernel shared I2C bus arbritration ****************
+ *
+ * Some Bay Trail and Cherry Trail devices have the P-Unit and us (the kernel)
+ * share a single I2C bus to the PMIC. Below are helpers to arbitrate the
+ * accesses between the kernel and the P-Unit.
+ *
+ * See arch/x86/include/asm/iosf_mbi.h for kernel-doc text for each function.
+ */
+
+#define SEMAPHORE_TIMEOUT 500
+#define PUNIT_SEMAPHORE_BYT 0x7
+#define PUNIT_SEMAPHORE_CHT 0x10e
+#define PUNIT_SEMAPHORE_BIT BIT(0)
+#define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
+
+static DEFINE_MUTEX(iosf_mbi_punit_mutex);
+static DEFINE_MUTEX(iosf_mbi_block_punit_i2c_access_count_mutex);
+static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier);
+static u32 iosf_mbi_block_punit_i2c_access_count;
+static u32 iosf_mbi_sem_address;
+static unsigned long iosf_mbi_sem_acquired;
+static struct pm_qos_request iosf_mbi_pm_qos;
+
void iosf_mbi_punit_acquire(void)
{
mutex_lock(&iosf_mbi_punit_mutex);
@@ -204,6 +230,159 @@ void iosf_mbi_punit_release(void)
}
EXPORT_SYMBOL(iosf_mbi_punit_release);
+static int iosf_mbi_get_sem(u32 *sem)
+{
+ int ret;
+
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ iosf_mbi_sem_address, sem);
+ if (ret) {
+ dev_err(&mbi_pdev->dev, "Error P-Unit semaphore read failed\n");
+ return ret;
+ }
+
+ *sem &= PUNIT_SEMAPHORE_BIT;
+ return 0;
+}
+
+static void iosf_mbi_reset_semaphore(void)
+{
+ if (iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ iosf_mbi_sem_address, 0, PUNIT_SEMAPHORE_BIT))
+ dev_err(&mbi_pdev->dev, "Error P-Unit semaphore reset failed\n");
+
+ pm_qos_update_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier,
+ MBI_PMIC_BUS_ACCESS_END, NULL);
+}
+
+/*
+ * This function blocks P-Unit accesses to the PMIC I2C bus, so that kernel
+ * I2C code, such as e.g. a fuel-gauge driver, can access it safely.
+ *
+ * This function may be called by I2C controller code while an I2C driver has
+ * already blocked P-Unit accesses because it wants them blocked over multiple
+ * i2c-transfers, for e.g. read-modify-write of an I2C client register.
+ *
+ * The P-Unit accesses already being blocked is tracked through the
+ * iosf_mbi_block_punit_i2c_access_count variable which is protected by the
+ * iosf_mbi_block_punit_i2c_access_count_mutex this mutex is hold for the
+ * entire duration of the function.
+ *
+ * If access is not blocked yet, this function takes the following steps:
+ *
+ * 1) Some code sends request to the P-Unit which make it access the PMIC
+ * I2C bus. Testing has shown that the P-Unit does not check its internal
+ * PMIC bus semaphore for these requests. Callers of these requests call
+ * iosf_mbi_punit_acquire()/_release() around their P-Unit accesses, these
+ * functions lock/unlock the iosf_mbi_punit_mutex.
+ * As the first step we lock the iosf_mbi_punit_mutex, to wait for any in
+ * flight requests to finish and to block any new requests.
+ *
+ * 2) Some code makes such P-Unit requests from atomic contexts where it
+ * cannot call iosf_mbi_punit_acquire() as that may sleep.
+ * As the second step we call a notifier chain which allows any code
+ * needing P-Unit resources from atomic context to acquire them before
+ * we take control over the PMIC I2C bus.
+ *
+ * 3) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC
+ * if this happens while the kernel itself is accessing the PMIC I2C bus
+ * the SoC hangs.
+ * As the third step we call pm_qos_update_request() to disallow the CPU
+ * to enter C6 or C7.
+ *
+ * 4) The P-Unit has a PMIC bus semaphore which we can request to stop
+ * autonomous P-Unit tasks from accessing the PMIC I2C bus while we hold it.
+ * As the fourth and final step we request this semaphore and wait for our
+ * request to be acknowledged.
+ */
+int iosf_mbi_block_punit_i2c_access(void)
+{
+ unsigned long start, end;
+ int ret = 0;
+ u32 sem;
+
+ if (WARN_ON(!mbi_pdev || !iosf_mbi_sem_address))
+ return -ENXIO;
+
+ mutex_lock(&iosf_mbi_block_punit_i2c_access_count_mutex);
+
+ if (iosf_mbi_block_punit_i2c_access_count > 0)
+ goto success;
+
+ mutex_lock(&iosf_mbi_punit_mutex);
+ blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier,
+ MBI_PMIC_BUS_ACCESS_BEGIN, NULL);
+
+ /*
+ * Disallow the CPU to enter C6 or C7 state, entering these states
+ * requires the P-Unit to talk to the PMIC and if this happens while
+ * we're holding the semaphore, the SoC hangs.
+ */
+ pm_qos_update_request(&iosf_mbi_pm_qos, 0);
+
+ /* host driver writes to side band semaphore register */
+ ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ iosf_mbi_sem_address, PUNIT_SEMAPHORE_ACQUIRE);
+ if (ret) {
+ dev_err(&mbi_pdev->dev, "Error P-Unit semaphore request failed\n");
+ goto error;
+ }
+
+ /* host driver waits for bit 0 to be set in semaphore register */
+ start = jiffies;
+ end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
+ do {
+ ret = iosf_mbi_get_sem(&sem);
+ if (!ret && sem) {
+ iosf_mbi_sem_acquired = jiffies;
+ dev_dbg(&mbi_pdev->dev, "P-Unit semaphore acquired after %ums\n",
+ jiffies_to_msecs(jiffies - start));
+ /*
+ * Success, keep iosf_mbi_punit_mutex locked till
+ * iosf_mbi_unblock_punit_i2c_access() gets called.
+ */
+ goto success;
+ }
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end));
+
+ ret = -ETIMEDOUT;
+ dev_err(&mbi_pdev->dev, "Error P-Unit semaphore timed out, resetting\n");
+error:
+ iosf_mbi_reset_semaphore();
+ mutex_unlock(&iosf_mbi_punit_mutex);
+
+ if (!iosf_mbi_get_sem(&sem))
+ dev_err(&mbi_pdev->dev, "P-Unit semaphore: %d\n", sem);
+success:
+ if (!WARN_ON(ret))
+ iosf_mbi_block_punit_i2c_access_count++;
+
+ mutex_unlock(&iosf_mbi_block_punit_i2c_access_count_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_block_punit_i2c_access);
+
+void iosf_mbi_unblock_punit_i2c_access(void)
+{
+ mutex_lock(&iosf_mbi_block_punit_i2c_access_count_mutex);
+
+ iosf_mbi_block_punit_i2c_access_count--;
+ if (iosf_mbi_block_punit_i2c_access_count == 0) {
+ iosf_mbi_reset_semaphore();
+ mutex_unlock(&iosf_mbi_punit_mutex);
+ dev_dbg(&mbi_pdev->dev, "punit semaphore held for %ums\n",
+ jiffies_to_msecs(jiffies - iosf_mbi_sem_acquired));
+ }
+
+ mutex_unlock(&iosf_mbi_block_punit_i2c_access_count_mutex);
+}
+EXPORT_SYMBOL(iosf_mbi_unblock_punit_i2c_access);
+
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
{
int ret;
@@ -241,19 +420,14 @@ int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
-int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
-{
- return blocking_notifier_call_chain(
- &iosf_mbi_pmic_bus_access_notifier, val, v);
-}
-EXPORT_SYMBOL(iosf_mbi_call_pmic_bus_access_notifier_chain);
-
void iosf_mbi_assert_punit_acquired(void)
{
WARN_ON(!mutex_is_locked(&iosf_mbi_punit_mutex));
}
EXPORT_SYMBOL(iosf_mbi_assert_punit_acquired);
+/**************** iosf_mbi debug code ****************/
+
#ifdef CONFIG_IOSF_MBI_DEBUG
static u32 dbg_mdr;
static u32 dbg_mcr;
@@ -338,7 +512,7 @@ static inline void iosf_debugfs_remove(void) { }
#endif /* CONFIG_IOSF_MBI_DEBUG */
static int iosf_mbi_probe(struct pci_dev *pdev,
- const struct pci_device_id *unused)
+ const struct pci_device_id *dev_id)
{
int ret;
@@ -349,14 +523,16 @@ static int iosf_mbi_probe(struct pci_dev *pdev,
}
mbi_pdev = pci_dev_get(pdev);
+ iosf_mbi_sem_address = dev_id->driver_data;
+
return 0;
}
static const struct pci_device_id iosf_mbi_pci_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_TANGIER) },
+ { PCI_DEVICE_DATA(INTEL, BAYTRAIL, PUNIT_SEMAPHORE_BYT) },
+ { PCI_DEVICE_DATA(INTEL, BRASWELL, PUNIT_SEMAPHORE_CHT) },
+ { PCI_DEVICE_DATA(INTEL, QUARK_X1000, 0) },
+ { PCI_DEVICE_DATA(INTEL, TANGIER, 0) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
@@ -371,6 +547,9 @@ static int __init iosf_mbi_init(void)
{
iosf_debugfs_init();
+ pm_qos_add_request(&iosf_mbi_pm_qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
return pci_register_driver(&iosf_mbi_pci_driver);
}
@@ -381,6 +560,8 @@ static void __exit iosf_mbi_exit(void)
pci_unregister_driver(&iosf_mbi_pci_driver);
pci_dev_put(mbi_pdev);
mbi_pdev = NULL;
+
+ pm_qos_remove_request(&iosf_mbi_pm_qos);
}
module_init(iosf_mbi_init);
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index d6ee92986920..24d2175a9480 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -17,7 +17,7 @@
*/
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_pdt.h>
@@ -141,7 +141,7 @@ void * __init prom_early_alloc(unsigned long size)
* fast enough on the platforms we care about while minimizing
* wasted bootmem) and hand off chunks of it to callers.
*/
- res = alloc_bootmem(chunk_size);
+ res = memblock_alloc(chunk_size, SMP_CACHE_BYTES);
BUG_ON(!res);
prom_early_allocated += chunk_size;
memset(res, 0, chunk_size);
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index 15695e30f982..be15bdcb20df 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -8,7 +8,7 @@
#include <linux/gfp.h>
#include <linux/suspend.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/um/asm/processor_32.h b/arch/x86/um/asm/processor_32.h
index c112de81c9e1..5fb1b8449adf 100644
--- a/arch/x86/um/asm/processor_32.h
+++ b/arch/x86/um/asm/processor_32.h
@@ -47,14 +47,6 @@ static inline void arch_copy_thread(struct arch_thread *from,
memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
}
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter"). Stolen
- * from asm-i386/processor.h
- */
-#define current_text_addr() \
- ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
-
#define current_sp() ({ void *sp; __asm__("movl %%esp, %0" : "=r" (sp) : ); sp; })
#define current_bp() ({ unsigned long bp; __asm__("movl %%ebp, %0" : "=r" (bp) : ); bp; })
diff --git a/arch/x86/um/asm/processor_64.h b/arch/x86/um/asm/processor_64.h
index c3be85205a65..1ef9c21877bc 100644
--- a/arch/x86/um/asm/processor_64.h
+++ b/arch/x86/um/asm/processor_64.h
@@ -31,9 +31,6 @@ static inline void arch_copy_thread(struct arch_thread *from,
to->fs = from->fs;
}
-#define current_text_addr() \
- ({ void *pc; __asm__("movq $1f,%0\n1:":"=g" (pc)); pc; })
-
#define current_sp() ({ void *sp; __asm__("movq %%rsp, %0" : "=r" (sp) : ); sp; })
#define current_bp() ({ unsigned long bp; __asm__("movq %%rbp, %0" : "=r" (bp) : ); bp; })
diff --git a/arch/x86/um/shared/sysdep/ptrace_32.h b/arch/x86/um/shared/sysdep/ptrace_32.h
index b94a108de1dc..db8478a83a09 100644
--- a/arch/x86/um/shared/sysdep/ptrace_32.h
+++ b/arch/x86/um/shared/sysdep/ptrace_32.h
@@ -8,22 +8,10 @@
#define MAX_FP_NR HOST_FPX_SIZE
-static inline void update_debugregs(int seq) {}
-
-/* syscall emulation path in ptrace */
-
-#ifndef PTRACE_SYSEMU
-#define PTRACE_SYSEMU 31
-#endif
-
void set_using_sysemu(int value);
int get_using_sysemu(void);
extern int sysemu_supported;
-#ifndef PTRACE_SYSEMU_SINGLESTEP
-#define PTRACE_SYSEMU_SINGLESTEP 32
-#endif
-
#define UPT_SYSCALL_ARG1(r) UPT_BX(r)
#define UPT_SYSCALL_ARG2(r) UPT_CX(r)
#define UPT_SYSCALL_ARG3(r) UPT_DX(r)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 67b2f31a1265..e996e8e744cb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#endif
#include <linux/cpu.h>
#include <linux/kexec.h>
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index ec7a4209f310..2f6787fc7106 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -23,7 +23,7 @@
#include <linux/start_kernel.h>
#include <linux/sched.h>
#include <linux/kprobes.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
@@ -31,7 +31,6 @@
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/gfp.h>
-#include <linux/memblock.h>
#include <linux/edd.h>
#include <linux/frame.h>
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 70ea598a37d2..0d7b3ae4960b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -864,7 +864,7 @@ static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
* The init_mm pagetable is really pinned as soon as its created, but
* that's before we have page structures to store the bits. So do all
* the book-keeping now once struct pages for allocated pages are
- * initialized. This happens only after free_all_bootmem() is called.
+ * initialized. This happens only after memblock_free_all() is called.
*/
static void __init xen_after_bootmem(void)
{
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index d6d74efd8912..b06731705529 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -67,7 +67,7 @@
#include <linux/hash.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -182,7 +182,7 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
static void * __ref alloc_p2m_page(void)
{
if (unlikely(!slab_is_available()))
- return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
+ return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
return (void *)__get_free_page(GFP_KERNEL);
}
@@ -190,7 +190,7 @@ static void * __ref alloc_p2m_page(void)
static void __ref free_p2m_page(void *p)
{
if (unlikely(!slab_is_available())) {
- free_bootmem((unsigned long)p, PAGE_SIZE);
+ memblock_free((unsigned long)p, PAGE_SIZE);
return;
}
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 66ab96a4e2b3..96d7f7d39cb9 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -134,6 +134,10 @@ void xen_unplug_emulated_devices(void)
{
int r;
+ /* PVH guests don't have emulated devices. */
+ if (xen_pvh_domain())
+ return;
+
/* user explicitly requested no unplug */
if (xen_emul_unplug & XEN_UNPLUG_NEVER)
return;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 23f6793af88a..441c88262169 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -39,34 +39,25 @@ static void xen_qlock_kick(int cpu)
*/
static void xen_qlock_wait(u8 *byte, u8 val)
{
+ unsigned long flags;
int irq = __this_cpu_read(lock_kicker_irq);
/* If kicker interrupts not initialized yet, just spin */
- if (irq == -1)
+ if (irq == -1 || in_nmi())
return;
- /* clear pending */
- xen_clear_irq_pending(irq);
- barrier();
-
- /*
- * We check the byte value after clearing pending IRQ to make sure
- * that we won't miss a wakeup event because of the clearing.
- *
- * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
- * So it is effectively a memory barrier for x86.
- */
- if (READ_ONCE(*byte) != val)
- return;
+ /* Guard against reentry. */
+ local_irq_save(flags);
- /*
- * If an interrupt happens here, it will leave the wakeup irq
- * pending, which will cause xen_poll_irq() to return
- * immediately.
- */
+ /* If irq pending already clear it. */
+ if (xen_test_irq_pending(irq)) {
+ xen_clear_irq_pending(irq);
+ } else if (READ_ONCE(*byte) == val) {
+ /* Block until irq becomes pending (or a spurious wakeup) */
+ xen_poll_irq(irq);
+ }
- /* Block until irq becomes pending (or perhaps a spurious wakeup) */
- xen_poll_irq(irq);
+ local_irq_restore(flags);
}
static irqreturn_t dummy_handler(int irq, void *dev_id)
diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index b0e471506cd8..1f8825bbaffb 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -170,7 +170,7 @@ canary:
.fill 48, 1, 0
early_stack:
- .fill 256, 1, 0
+ .fill BOOT_STACK_SIZE, 1, 0
early_stack_end:
ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index ea5d8d03e53b..d29b7365da8d 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-config ZONE_DMA
- def_bool y
-
config XTENSA
def_bool y
select ARCH_HAS_SG_CHAIN
@@ -28,13 +25,11 @@ config XTENSA
select HAVE_FUTEX_CMPXCHG if !MMU
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_MEMBLOCK
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HAVE_STACKPROTECTOR
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
- select NO_BOOTMEM
select PERF_USE_VMALLOC
select VIRT_TO_BUS
help
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index dc9e0ba7122c..294846117fc2 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -33,7 +33,7 @@ uImage: $(obj)/uImage
boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
-OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
+OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary
vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index e4ccb88b7996..be9bfd9aa865 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -152,14 +152,6 @@ struct thread_struct {
int align[0] __attribute__ ((aligned(16)));
};
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 21f13e9aabe1..5ca440a74316 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <asm/pci-bridge.h>
#include <asm/platform.h>
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index b727b18a68ac..b80a430453b1 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -131,6 +131,7 @@ SECTIONS
.fixup : { *(.fixup) }
EXCEPTION_TABLE(16)
+ NOTES
/* Data section */
_sdata = .;
@@ -296,38 +297,11 @@ SECTIONS
_end = .;
- .xt.lit : { *(.xt.lit) }
- .xt.prop : { *(.xt.prop) }
-
- .debug 0 : { *(.debug) }
- .line 0 : { *(.line) }
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- .debug_info 0 : { *(.debug_info) }
- .debug_abbrev 0 : { *(.debug_abbrev) }
- .debug_line 0 : { *(.debug_line) }
- .debug_frame 0 : { *(.debug_frame) }
- .debug_str 0 : { *(.debug_str) }
- .debug_loc 0 : { *(.debug_loc) }
- .debug_macinfo 0 : { *(.debug_macinfo) }
- .debug_weaknames 0 : { *(.debug_weaknames) }
- .debug_funcnames 0 : { *(.debug_funcnames) }
- .debug_typenames 0 : { *(.debug_typenames) }
- .debug_varnames 0 : { *(.debug_varnames) }
-
- .xt.insn 0 :
- {
- *(.xt.insn)
- *(.gnu.linkonce.x*)
- }
+ DWARF_DEBUG
- .xt.lit 0 :
- {
- *(.xt.lit)
- *(.gnu.linkonce.p*)
- }
+ .xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) }
+ .xt.insn 0 : { KEEP(*(.xt.insn .xt.insn.* .gnu.linkonce.x*)) }
+ .xt.lit 0 : { KEEP(*(.xt.lit .xt.lit.* .gnu.linkonce.p*)) }
/* Sections to be discarded */
DISCARDS
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 9220dcde7520..b27359e2a464 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -21,7 +21,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 34aead7dcb48..30a48bba4a47 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -18,7 +18,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/swap.h>
@@ -71,7 +71,7 @@ void __init zones_init(void)
{
/* All pages are DMA-able, so we put them all in the DMA zone. */
unsigned long zones_size[MAX_NR_ZONES] = {
- [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
+ [ZONE_NORMAL] = max_low_pfn - ARCH_PFN_OFFSET,
#ifdef CONFIG_HIGHMEM
[ZONE_HIGHMEM] = max_pfn - max_low_pfn,
#endif
@@ -152,7 +152,7 @@ void __init mem_init(void)
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index 6b532b6bd785..6b95ca43aec0 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -8,11 +8,10 @@
* Copyright (C) 2017 Cadence Design Systems Inc.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init_task.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
-#include <linux/memblock.h>
#include <asm/initialize_mmu.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
@@ -43,7 +42,7 @@ static void __init populate(void *start, void *end)
unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
- pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+ pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
pr_debug("%s: %p - %p\n", __func__, start, end);
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 9d1ecfc53670..a4dcfd39bc5c 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -4,7 +4,7 @@
*
* Extracted from init.c
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/string.h>
@@ -31,7 +31,7 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
__func__, vaddr, n_pages);
- pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t));
+ pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < n_pages; ++i)
pte_clear(NULL, 0, pte + i);
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index d027dddc41ca..d052712373b6 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -30,7 +30,7 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/platform_device.h>
@@ -646,7 +646,7 @@ static int __init iss_net_setup(char *str)
return 1;
}
- new = alloc_bootmem(sizeof(*new));
+ new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
if (new == NULL) {
pr_err("Alloc_bootmem failed\n");
return 1;
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index 58709e89a8ed..c14cc673976c 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -16,7 +16,7 @@
* option) any later version.
*
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index d9a7916ff0ab..9fe5952d117d 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -642,7 +642,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
uint64_t serial_nr;
rcu_read_lock();
- serial_nr = __bio_blkcg(bio)->css.serial_nr;
+ serial_nr = bio_blkcg(bio)->css.serial_nr;
/*
* Check whether blkcg has changed. The condition may trigger
@@ -651,7 +651,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
goto out;
- bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
/*
* Update blkg_path for bfq_log_* functions. We cache this
* path, and update it here, for the following
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 6075100f03a5..3a27d31fcda6 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4384,7 +4384,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
rcu_read_lock();
- bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
+ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
if (!bfqg) {
bfqq = &bfqd->oom_bfqq;
goto out;
diff --git a/block/bio.c b/block/bio.c
index bbfeb4ee2892..d5368a445561 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -609,9 +609,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
- bio_clone_blkg_association(bio, bio_src);
-
- blkcg_bio_issue_init(bio);
+ bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
@@ -1256,7 +1254,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
/*
* success
*/
- if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
+ if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
(map_data && map_data->from_user)) {
ret = bio_copy_from_iter(bio, iter);
if (ret)
@@ -1956,151 +1954,69 @@ EXPORT_SYMBOL(bioset_init_from_src);
#ifdef CONFIG_BLK_CGROUP
-/**
- * bio_associate_blkg - associate a bio with the a blkg
- * @bio: target bio
- * @blkg: the blkg to associate
- *
- * This tries to associate @bio with the specified blkg. Association failure
- * is handled by walking up the blkg tree. Therefore, the blkg associated can
- * be anything between @blkg and the root_blkg. This situation only happens
- * when a cgroup is dying and then the remaining bios will spill to the closest
- * alive blkg.
- *
- * A reference will be taken on the @blkg and will be released when @bio is
- * freed.
- */
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
-{
- if (unlikely(bio->bi_blkg))
- return -EBUSY;
- bio->bi_blkg = blkg_tryget_closest(blkg);
- return 0;
-}
-
-/**
- * __bio_associate_blkg_from_css - internal blkg association function
- *
- * This in the core association function that all association paths rely on.
- * A blkg reference is taken which is released upon freeing of the bio.
- */
-static int __bio_associate_blkg_from_css(struct bio *bio,
- struct cgroup_subsys_state *css)
-{
- struct request_queue *q = bio->bi_disk->queue;
- struct blkcg_gq *blkg;
- int ret;
-
- rcu_read_lock();
-
- if (!css || !css->parent)
- blkg = q->root_blkg;
- else
- blkg = blkg_lookup_create(css_to_blkcg(css), q);
-
- ret = bio_associate_blkg(bio, blkg);
-
- rcu_read_unlock();
- return ret;
-}
-
-/**
- * bio_associate_blkg_from_css - associate a bio with a specified css
- * @bio: target bio
- * @css: target css
- *
- * Associate @bio with the blkg found by combining the css's blkg and the
- * request_queue of the @bio. This falls back to the queue's root_blkg if
- * the association fails with the css.
- */
-int bio_associate_blkg_from_css(struct bio *bio,
- struct cgroup_subsys_state *css)
-{
- if (unlikely(bio->bi_blkg))
- return -EBUSY;
- return __bio_associate_blkg_from_css(bio, css);
-}
-EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
-
#ifdef CONFIG_MEMCG
/**
- * bio_associate_blkg_from_page - associate a bio with the page's blkg
+ * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
* @bio: target bio
* @page: the page to lookup the blkcg from
*
- * Associate @bio with the blkg from @page's owning memcg and the respective
- * request_queue. If cgroup_e_css returns NULL, fall back to the queue's
- * root_blkg.
- *
- * Note: this must be called after bio has an associated device.
+ * Associate @bio with the blkcg from @page's owning memcg. This works like
+ * every other associate function wrt references.
*/
-int bio_associate_blkg_from_page(struct bio *bio, struct page *page)
+int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
{
- struct cgroup_subsys_state *css;
- int ret;
+ struct cgroup_subsys_state *blkcg_css;
- if (unlikely(bio->bi_blkg))
+ if (unlikely(bio->bi_css))
return -EBUSY;
if (!page->mem_cgroup)
return 0;
-
- rcu_read_lock();
-
- css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
-
- ret = __bio_associate_blkg_from_css(bio, css);
-
- rcu_read_unlock();
- return ret;
+ blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
+ &io_cgrp_subsys);
+ bio->bi_css = blkcg_css;
+ return 0;
}
#endif /* CONFIG_MEMCG */
/**
- * bio_associate_create_blkg - associate a bio with a blkg from q
- * @q: request_queue where bio is going
+ * bio_associate_blkcg - associate a bio with the specified blkcg
* @bio: target bio
+ * @blkcg_css: css of the blkcg to associate
+ *
+ * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
+ * treat @bio as if it were issued by a task which belongs to the blkcg.
*
- * Associate @bio with the blkg found from the bio's css and the request_queue.
- * If one is not found, bio_lookup_blkg creates the blkg. This falls back to
- * the queue's root_blkg if association fails.
+ * This function takes an extra reference of @blkcg_css which will be put
+ * when @bio is released. The caller must own @bio and is responsible for
+ * synchronizing calls to this function.
*/
-int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
+int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
{
- struct cgroup_subsys_state *css;
- int ret = 0;
-
- /* someone has already associated this bio with a blkg */
- if (bio->bi_blkg)
- return ret;
-
- rcu_read_lock();
-
- css = blkcg_css();
-
- ret = __bio_associate_blkg_from_css(bio, css);
-
- rcu_read_unlock();
- return ret;
+ if (unlikely(bio->bi_css))
+ return -EBUSY;
+ css_get(blkcg_css);
+ bio->bi_css = blkcg_css;
+ return 0;
}
+EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/**
- * bio_reassociate_blkg - reassociate a bio with a blkg from q
- * @q: request_queue where bio is going
+ * bio_associate_blkg - associate a bio with the specified blkg
* @bio: target bio
+ * @blkg: the blkg to associate
*
- * When submitting a bio, multiple recursive calls to make_request() may occur.
- * This causes the initial associate done in blkcg_bio_issue_check() to be
- * incorrect and reference the prior request_queue. This performs reassociation
- * when this situation happens.
+ * Associate @bio with the blkg specified by @blkg. This is the queue specific
+ * blkcg information associated with the @bio, a reference will be taken on the
+ * @blkg and will be freed when the bio is freed.
*/
-int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
+int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
{
- if (bio->bi_blkg) {
- blkg_put(bio->bi_blkg);
- bio->bi_blkg = NULL;
- }
-
- return bio_associate_create_blkg(q, bio);
+ if (unlikely(bio->bi_blkg))
+ return -EBUSY;
+ if (!blkg_try_get(blkg))
+ return -ENODEV;
+ bio->bi_blkg = blkg;
+ return 0;
}
/**
@@ -2113,6 +2029,10 @@ void bio_disassociate_task(struct bio *bio)
put_io_context(bio->bi_ioc);
bio->bi_ioc = NULL;
}
+ if (bio->bi_css) {
+ css_put(bio->bi_css);
+ bio->bi_css = NULL;
+ }
if (bio->bi_blkg) {
blkg_put(bio->bi_blkg);
bio->bi_blkg = NULL;
@@ -2120,16 +2040,16 @@ void bio_disassociate_task(struct bio *bio)
}
/**
- * bio_clone_blkg_association - clone blkg association from src to dst bio
+ * bio_clone_blkcg_association - clone blkcg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
-void bio_clone_blkg_association(struct bio *dst, struct bio *src)
+void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
{
- if (src->bi_blkg)
- bio_associate_blkg(dst, src->bi_blkg);
+ if (src->bi_css)
+ WARN_ON(bio_associate_blkcg(dst, src->bi_css));
}
-EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
+EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 992da5592c6e..c630e02836a8 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -84,37 +84,6 @@ static void blkg_free(struct blkcg_gq *blkg)
kfree(blkg);
}
-static void __blkg_release(struct rcu_head *rcu)
-{
- struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
-
- percpu_ref_exit(&blkg->refcnt);
-
- /* release the blkcg and parent blkg refs this blkg has been holding */
- css_put(&blkg->blkcg->css);
- if (blkg->parent)
- blkg_put(blkg->parent);
-
- wb_congested_put(blkg->wb_congested);
-
- blkg_free(blkg);
-}
-
-/*
- * A group is RCU protected, but having an rcu lock does not mean that one
- * can access all the fields of blkg and assume these are valid. For
- * example, don't try to follow throtl_data and request queue links.
- *
- * Having a reference to blkg under an rcu allows accesses to only values
- * local to groups like group stats and group rate limits.
- */
-static void blkg_release(struct percpu_ref *ref)
-{
- struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
-
- call_rcu(&blkg->rcu_head, __blkg_release);
-}
-
/**
* blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with
@@ -141,6 +110,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
+ atomic_set(&blkg->refcnt, 1);
/* root blkg uses @q->root_rl, init rl only for !root blkgs */
if (blkcg != &blkcg_root) {
@@ -247,11 +217,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_get(blkg->parent);
}
- ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
- GFP_NOWAIT | __GFP_NOWARN);
- if (ret)
- goto err_cancel_ref;
-
/* invoke per-policy init */
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -284,8 +249,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_put(blkg);
return ERR_PTR(ret);
-err_cancel_ref:
- percpu_ref_exit(&blkg->refcnt);
err_put_congested:
wb_congested_put(wb_congested);
err_put_css:
@@ -296,7 +259,7 @@ err_free_blkg:
}
/**
- * __blkg_lookup_create - lookup blkg, try to create one if not there
+ * blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
@@ -305,11 +268,12 @@ err_free_blkg:
* that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and @q->queue_lock.
*
- * Returns the blkg or the closest blkg if blkg_create fails as it walks
- * down from root.
+ * Returns pointer to the looked up or created blkg on success, ERR_PTR()
+ * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
+ * dead and bypassing, returns ERR_PTR(-EBUSY).
*/
-struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q)
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
{
struct blkcg_gq *blkg;
@@ -321,7 +285,7 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
- return q->root_blkg;
+ return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
blkg = __blkg_lookup(blkcg, q, true);
if (blkg)
@@ -329,58 +293,23 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
/*
* Create blkgs walking down from blkcg_root to @blkcg, so that all
- * non-root blkgs have access to their parents. Returns the closest
- * blkg to the intended blkg should blkg_create() fail.
+ * non-root blkgs have access to their parents.
*/
while (true) {
struct blkcg *pos = blkcg;
struct blkcg *parent = blkcg_parent(blkcg);
- struct blkcg_gq *ret_blkg = q->root_blkg;
-
- while (parent) {
- blkg = __blkg_lookup(parent, q, false);
- if (blkg) {
- /* remember closest blkg */
- ret_blkg = blkg;
- break;
- }
+
+ while (parent && !__blkg_lookup(parent, q, false)) {
pos = parent;
parent = blkcg_parent(parent);
}
blkg = blkg_create(pos, q, NULL);
- if (IS_ERR(blkg))
- return ret_blkg;
- if (pos == blkcg)
+ if (pos == blkcg || IS_ERR(blkg))
return blkg;
}
}
-/**
- * blkg_lookup_create - find or create a blkg
- * @blkcg: target block cgroup
- * @q: target request_queue
- *
- * This looks up or creates the blkg representing the unique pair
- * of the blkcg and the request_queue.
- */
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q)
-{
- struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
- unsigned long flags;
-
- if (unlikely(!blkg)) {
- spin_lock_irqsave(q->queue_lock, flags);
-
- blkg = __blkg_lookup_create(blkcg, q);
-
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
-
- return blkg;
-}
-
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
@@ -424,7 +353,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
- percpu_ref_kill(&blkg->refcnt);
+ blkg_put(blkg);
}
/**
@@ -452,6 +381,29 @@ static void blkg_destroy_all(struct request_queue *q)
}
/*
+ * A group is RCU protected, but having an rcu lock does not mean that one
+ * can access all the fields of blkg and assume these are valid. For
+ * example, don't try to follow throtl_data and request queue links.
+ *
+ * Having a reference to blkg under an rcu allows accesses to only values
+ * local to groups like group stats and group rate limits.
+ */
+void __blkg_release_rcu(struct rcu_head *rcu_head)
+{
+ struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
+
+ /* release the blkcg and parent blkg refs this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+ if (blkg->parent)
+ blkg_put(blkg->parent);
+
+ wb_congested_put(blkg->wb_congested);
+
+ blkg_free(blkg);
+}
+EXPORT_SYMBOL_GPL(__blkg_release_rcu);
+
+/*
* The next function used by blk_queue_for_each_rl(). It's a bit tricky
* because the root blkg uses @q->root_rl instead of its own rl.
*/
@@ -1796,7 +1748,8 @@ void blkcg_maybe_throttle_current(void)
blkg = blkg_lookup(blkcg, q);
if (!blkg)
goto out;
- if (!blkg_tryget(blkg))
+ blkg = blkg_try_get(blkg);
+ if (!blkg)
goto out;
rcu_read_unlock();
diff --git a/block/blk-core.c b/block/blk-core.c
index bc6ea87d10e0..ce12515f9b9b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -785,6 +785,9 @@ void blk_cleanup_queue(struct request_queue *q)
* prevent that q->request_fn() gets invoked after draining finished.
*/
blk_freeze_queue(q);
+
+ rq_qos_exit(q);
+
spin_lock_irq(lock);
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
@@ -2432,7 +2435,6 @@ blk_qc_t generic_make_request(struct bio *bio)
if (q)
blk_queue_exit(q);
q = bio->bi_disk->queue;
- bio_reassociate_blkg(q, bio);
flags = 0;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 28f80d227528..38c35c32aff2 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -482,12 +482,34 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
spinlock_t *lock)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
- struct blkcg_gq *blkg = bio->bi_blkg;
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+ struct request_queue *q = rqos->q;
bool issue_as_root = bio_issue_as_root_blkg(bio);
if (!blk_iolatency_enabled(blkiolat))
return;
+ rcu_read_lock();
+ blkcg = bio_blkcg(bio);
+ bio_associate_blkcg(bio, &blkcg->css);
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg)) {
+ if (!lock)
+ spin_lock_irq(q->queue_lock);
+ blkg = blkg_lookup_create(blkcg, q);
+ if (IS_ERR(blkg))
+ blkg = NULL;
+ if (!lock)
+ spin_unlock_irq(q->queue_lock);
+ }
+ if (!blkg)
+ goto out;
+
+ bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+ bio_associate_blkg(bio, blkg);
+out:
+ rcu_read_unlock();
while (blkg && blkg->parent) {
struct iolatency_grp *iolat = blkg_to_lat(blkg);
if (!iolat) {
@@ -708,7 +730,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
* We could be exiting, don't access the pd unless we have a
* ref on the blkg.
*/
- if (!blkg_tryget(blkg))
+ if (!blkg_try_get(blkg))
continue;
iolat = blkg_to_lat(blkg);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 42a46744c11b..6b5ad275ed56 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -714,6 +714,31 @@ static void blk_account_io_merge(struct request *req)
part_stat_unlock();
}
}
+/*
+ * Two cases of handling DISCARD merge:
+ * If max_discard_segments > 1, the driver takes every bio
+ * as a range and send them to controller together. The ranges
+ * needn't to be contiguous.
+ * Otherwise, the bios/requests will be handled as same as
+ * others which should be contiguous.
+ */
+static inline bool blk_discard_mergable(struct request *req)
+{
+ if (req_op(req) == REQ_OP_DISCARD &&
+ queue_max_discard_segments(req->q) > 1)
+ return true;
+ return false;
+}
+
+enum elv_merge blk_try_req_merge(struct request *req, struct request *next)
+{
+ if (blk_discard_mergable(req))
+ return ELEVATOR_DISCARD_MERGE;
+ else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
+ return ELEVATOR_BACK_MERGE;
+
+ return ELEVATOR_NO_MERGE;
+}
/*
* For non-mq, this has to be called with the request spinlock acquired.
@@ -731,12 +756,6 @@ static struct request *attempt_merge(struct request_queue *q,
if (req_op(req) != req_op(next))
return NULL;
- /*
- * not contiguous
- */
- if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
- return NULL;
-
if (rq_data_dir(req) != rq_data_dir(next)
|| req->rq_disk != next->rq_disk
|| req_no_special_merge(next))
@@ -760,11 +779,19 @@ static struct request *attempt_merge(struct request_queue *q,
* counts here. Handle DISCARDs separately, as they
* have separate settings.
*/
- if (req_op(req) == REQ_OP_DISCARD) {
+
+ switch (blk_try_req_merge(req, next)) {
+ case ELEVATOR_DISCARD_MERGE:
if (!req_attempt_discard_merge(q, req, next))
return NULL;
- } else if (!ll_merge_requests_fn(q, req, next))
+ break;
+ case ELEVATOR_BACK_MERGE:
+ if (!ll_merge_requests_fn(q, req, next))
+ return NULL;
+ break;
+ default:
return NULL;
+ }
/*
* If failfast settings disagree or any of the two is already
@@ -888,8 +915,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
- if (req_op(rq) == REQ_OP_DISCARD &&
- queue_max_discard_segments(rq->q) > 1)
+ if (blk_discard_mergable(rq))
return ELEVATOR_DISCARD_MERGE;
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index ffd459969689..696c04c1ab6c 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -6,7 +6,7 @@
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
#include <linux/gcd.h>
#include <linux/lcm.h>
#include <linux/jiffies.h>
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0641533597f1..844a454a7b3a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -1007,8 +1007,6 @@ void blk_unregister_queue(struct gendisk *disk)
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
- rq_qos_exit(q);
-
mutex_lock(&q->sysfs_lock);
if (q->request_fn || (q->mq_ops && q->elevator))
elv_unregister_queue(q);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 4bda70e8db48..db1a3a2ae006 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2115,11 +2115,21 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
}
#endif
+static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
+{
+#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+ /* fallback to root_blkg if we fail to get a blkg ref */
+ if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
+ bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
+ bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+#endif
+}
+
bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio)
{
struct throtl_qnode *qn = NULL;
- struct throtl_grp *tg = blkg_to_tg(blkg);
+ struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
struct throtl_service_queue *sq;
bool rw = bio_data_dir(bio);
bool throttled = false;
@@ -2138,6 +2148,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
if (unlikely(blk_queue_bypass(q)))
goto out_unlock;
+ blk_throtl_assoc_bio(tg, bio);
blk_throtl_update_idletime(tg);
sq = &tg->service_queue;
diff --git a/block/bounce.c b/block/bounce.c
index ec0d99995f5f..36869afc258c 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/highmem.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/printk.h>
#include <asm/tlbflush.h>
@@ -276,9 +276,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
}
}
- bio_clone_blkg_association(bio, bio_src);
-
- blkcg_bio_issue_init(bio);
+ bio_clone_blkcg_association(bio, bio_src);
return bio;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6a3d87dd3c1a..ed41aa978c4a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3759,7 +3759,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
uint64_t serial_nr;
rcu_read_lock();
- serial_nr = __bio_blkcg(bio)->css.serial_nr;
+ serial_nr = bio_blkcg(bio)->css.serial_nr;
rcu_read_unlock();
/*
@@ -3824,7 +3824,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
struct cfq_group *cfqg;
rcu_read_lock();
- cfqg = cfq_lookup_cfqg(cfqd, __bio_blkcg(bio));
+ cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
if (!cfqg) {
cfqq = &cfqd->oom_cfqq;
goto out;
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index f3702e533ff4..be70ca6c85d3 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -21,6 +21,18 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
appropriate hash algorithms (such as SHA-1) must be available.
ENOPKG will be reported if the requisite algorithm is unavailable.
+config ASYMMETRIC_TPM_KEY_SUBTYPE
+ tristate "Asymmetric TPM backed private key subtype"
+ depends on TCG_TPM
+ depends on TRUSTED_KEYS
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
+ help
+ This option provides support for TPM backed private key type handling.
+ Operations such as sign, verify, encrypt, decrypt are performed by
+ the TPM after the private key is loaded.
+
config X509_CERTIFICATE_PARSER
tristate "X.509 certificate parser"
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
@@ -31,6 +43,25 @@ config X509_CERTIFICATE_PARSER
data and provides the ability to instantiate a crypto key from a
public key packet found inside the certificate.
+config PKCS8_PRIVATE_KEY_PARSER
+ tristate "PKCS#8 private key parser"
+ depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ select ASN1
+ select OID_REGISTRY
+ help
+ This option provides support for parsing PKCS#8 format blobs for
+ private key data and provides the ability to instantiate a crypto key
+ from that data.
+
+config TPM_KEY_PARSER
+ tristate "TPM private key parser"
+ depends on ASYMMETRIC_TPM_KEY_SUBTYPE
+ select ASN1
+ help
+ This option provides support for parsing TPM format blobs for
+ private key data and provides the ability to instantiate a crypto key
+ from that data.
+
config PKCS7_MESSAGE_PARSER
tristate "PKCS#7 message parser"
depends on X509_CERTIFICATE_PARSER
diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
index d4b2e1b2dc65..28b91adba2ae 100644
--- a/crypto/asymmetric_keys/Makefile
+++ b/crypto/asymmetric_keys/Makefile
@@ -11,6 +11,7 @@ asymmetric_keys-y := \
signature.o
obj-$(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE) += public_key.o
+obj-$(CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE) += asym_tpm.o
#
# X.509 Certificate handling
@@ -30,6 +31,19 @@ $(obj)/x509.asn1.o: $(obj)/x509.asn1.c $(obj)/x509.asn1.h
$(obj)/x509_akid.asn1.o: $(obj)/x509_akid.asn1.c $(obj)/x509_akid.asn1.h
#
+# PKCS#8 private key handling
+#
+obj-$(CONFIG_PKCS8_PRIVATE_KEY_PARSER) += pkcs8_key_parser.o
+pkcs8_key_parser-y := \
+ pkcs8.asn1.o \
+ pkcs8_parser.o
+
+$(obj)/pkcs8_parser.o: $(obj)/pkcs8.asn1.h
+$(obj)/pkcs8-asn1.o: $(obj)/pkcs8.asn1.c $(obj)/pkcs8.asn1.h
+
+clean-files += pkcs8.asn1.c pkcs8.asn1.h
+
+#
# PKCS#7 message handling
#
obj-$(CONFIG_PKCS7_MESSAGE_PARSER) += pkcs7_message.o
@@ -61,3 +75,14 @@ verify_signed_pefile-y := \
$(obj)/mscode_parser.o: $(obj)/mscode.asn1.h $(obj)/mscode.asn1.h
$(obj)/mscode.asn1.o: $(obj)/mscode.asn1.c $(obj)/mscode.asn1.h
+
+#
+# TPM private key parsing
+#
+obj-$(CONFIG_TPM_KEY_PARSER) += tpm_key_parser.o
+tpm_key_parser-y := \
+ tpm.asn1.o \
+ tpm_parser.o
+
+$(obj)/tpm_parser.o: $(obj)/tpm.asn1.h
+$(obj)/tpm.asn1.o: $(obj)/tpm.asn1.c $(obj)/tpm.asn1.h
diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
new file mode 100644
index 000000000000..5d4c270463f6
--- /dev/null
+++ b/crypto/asymmetric_keys/asym_tpm.c
@@ -0,0 +1,988 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "ASYM-TPM: "fmt
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/scatterlist.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+#include <crypto/akcipher.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <asm/unaligned.h>
+#include <keys/asymmetric-subtype.h>
+#include <keys/trusted.h>
+#include <crypto/asym_tpm_subtype.h>
+#include <crypto/public_key.h>
+
+#define TPM_ORD_FLUSHSPECIFIC 186
+#define TPM_ORD_LOADKEY2 65
+#define TPM_ORD_UNBIND 30
+#define TPM_ORD_SIGN 60
+#define TPM_LOADKEY2_SIZE 59
+#define TPM_FLUSHSPECIFIC_SIZE 18
+#define TPM_UNBIND_SIZE 63
+#define TPM_SIGN_SIZE 63
+
+#define TPM_RT_KEY 0x00000001
+
+/*
+ * Load a TPM key from the blob provided by userspace
+ */
+static int tpm_loadkey2(struct tpm_buf *tb,
+ uint32_t keyhandle, unsigned char *keyauth,
+ const unsigned char *keyblob, int keybloblen,
+ uint32_t *newhandle)
+{
+ unsigned char nonceodd[TPM_NONCE_SIZE];
+ unsigned char enonce[TPM_NONCE_SIZE];
+ unsigned char authdata[SHA1_DIGEST_SIZE];
+ uint32_t authhandle = 0;
+ unsigned char cont = 0;
+ uint32_t ordinal;
+ int ret;
+
+ ordinal = htonl(TPM_ORD_LOADKEY2);
+
+ /* session for loading the key */
+ ret = oiap(tb, &authhandle, enonce);
+ if (ret < 0) {
+ pr_info("oiap failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* generate odd nonce */
+ ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE);
+ if (ret < 0) {
+ pr_info("tpm_get_random failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* calculate authorization HMAC value */
+ ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce,
+ nonceodd, cont, sizeof(uint32_t), &ordinal,
+ keybloblen, keyblob, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* build the request buffer */
+ INIT_BUF(tb);
+ store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
+ store32(tb, TPM_LOADKEY2_SIZE + keybloblen);
+ store32(tb, TPM_ORD_LOADKEY2);
+ store32(tb, keyhandle);
+ storebytes(tb, keyblob, keybloblen);
+ store32(tb, authhandle);
+ storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+ store8(tb, cont);
+ storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0) {
+ pr_info("authhmac failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, keyauth,
+ SHA1_DIGEST_SIZE, 0, 0);
+ if (ret < 0) {
+ pr_info("TSS_checkhmac1 failed (%d)\n", ret);
+ return ret;
+ }
+
+ *newhandle = LOAD32(tb->data, TPM_DATA_OFFSET);
+ return 0;
+}
+
+/*
+ * Execute the FlushSpecific TPM command
+ */
+static int tpm_flushspecific(struct tpm_buf *tb, uint32_t handle)
+{
+ INIT_BUF(tb);
+ store16(tb, TPM_TAG_RQU_COMMAND);
+ store32(tb, TPM_FLUSHSPECIFIC_SIZE);
+ store32(tb, TPM_ORD_FLUSHSPECIFIC);
+ store32(tb, handle);
+ store32(tb, TPM_RT_KEY);
+
+ return trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+}
+
+/*
+ * Decrypt a blob provided by userspace using a specific key handle.
+ * The handle is a well known handle or previously loaded by e.g. LoadKey2
+ */
+static int tpm_unbind(struct tpm_buf *tb,
+ uint32_t keyhandle, unsigned char *keyauth,
+ const unsigned char *blob, uint32_t bloblen,
+ void *out, uint32_t outlen)
+{
+ unsigned char nonceodd[TPM_NONCE_SIZE];
+ unsigned char enonce[TPM_NONCE_SIZE];
+ unsigned char authdata[SHA1_DIGEST_SIZE];
+ uint32_t authhandle = 0;
+ unsigned char cont = 0;
+ uint32_t ordinal;
+ uint32_t datalen;
+ int ret;
+
+ ordinal = htonl(TPM_ORD_UNBIND);
+ datalen = htonl(bloblen);
+
+ /* session for loading the key */
+ ret = oiap(tb, &authhandle, enonce);
+ if (ret < 0) {
+ pr_info("oiap failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* generate odd nonce */
+ ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE);
+ if (ret < 0) {
+ pr_info("tpm_get_random failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* calculate authorization HMAC value */
+ ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce,
+ nonceodd, cont, sizeof(uint32_t), &ordinal,
+ sizeof(uint32_t), &datalen,
+ bloblen, blob, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* build the request buffer */
+ INIT_BUF(tb);
+ store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
+ store32(tb, TPM_UNBIND_SIZE + bloblen);
+ store32(tb, TPM_ORD_UNBIND);
+ store32(tb, keyhandle);
+ store32(tb, bloblen);
+ storebytes(tb, blob, bloblen);
+ store32(tb, authhandle);
+ storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+ store8(tb, cont);
+ storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0) {
+ pr_info("authhmac failed (%d)\n", ret);
+ return ret;
+ }
+
+ datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
+
+ ret = TSS_checkhmac1(tb->data, ordinal, nonceodd,
+ keyauth, SHA1_DIGEST_SIZE,
+ sizeof(uint32_t), TPM_DATA_OFFSET,
+ datalen, TPM_DATA_OFFSET + sizeof(uint32_t),
+ 0, 0);
+ if (ret < 0) {
+ pr_info("TSS_checkhmac1 failed (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t),
+ min(outlen, datalen));
+
+ return datalen;
+}
+
+/*
+ * Sign a blob provided by userspace (that has had the hash function applied)
+ * using a specific key handle. The handle is assumed to have been previously
+ * loaded by e.g. LoadKey2.
+ *
+ * Note that the key signature scheme of the used key should be set to
+ * TPM_SS_RSASSAPKCS1v15_DER. This allows the hashed input to be of any size
+ * up to key_length_in_bytes - 11 and not be limited to size 20 like the
+ * TPM_SS_RSASSAPKCS1v15_SHA1 signature scheme.
+ */
+static int tpm_sign(struct tpm_buf *tb,
+ uint32_t keyhandle, unsigned char *keyauth,
+ const unsigned char *blob, uint32_t bloblen,
+ void *out, uint32_t outlen)
+{
+ unsigned char nonceodd[TPM_NONCE_SIZE];
+ unsigned char enonce[TPM_NONCE_SIZE];
+ unsigned char authdata[SHA1_DIGEST_SIZE];
+ uint32_t authhandle = 0;
+ unsigned char cont = 0;
+ uint32_t ordinal;
+ uint32_t datalen;
+ int ret;
+
+ ordinal = htonl(TPM_ORD_SIGN);
+ datalen = htonl(bloblen);
+
+ /* session for loading the key */
+ ret = oiap(tb, &authhandle, enonce);
+ if (ret < 0) {
+ pr_info("oiap failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* generate odd nonce */
+ ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE);
+ if (ret < 0) {
+ pr_info("tpm_get_random failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* calculate authorization HMAC value */
+ ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce,
+ nonceodd, cont, sizeof(uint32_t), &ordinal,
+ sizeof(uint32_t), &datalen,
+ bloblen, blob, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* build the request buffer */
+ INIT_BUF(tb);
+ store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
+ store32(tb, TPM_SIGN_SIZE + bloblen);
+ store32(tb, TPM_ORD_SIGN);
+ store32(tb, keyhandle);
+ store32(tb, bloblen);
+ storebytes(tb, blob, bloblen);
+ store32(tb, authhandle);
+ storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+ store8(tb, cont);
+ storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0) {
+ pr_info("authhmac failed (%d)\n", ret);
+ return ret;
+ }
+
+ datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
+
+ ret = TSS_checkhmac1(tb->data, ordinal, nonceodd,
+ keyauth, SHA1_DIGEST_SIZE,
+ sizeof(uint32_t), TPM_DATA_OFFSET,
+ datalen, TPM_DATA_OFFSET + sizeof(uint32_t),
+ 0, 0);
+ if (ret < 0) {
+ pr_info("TSS_checkhmac1 failed (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t),
+ min(datalen, outlen));
+
+ return datalen;
+}
+/*
+ * Maximum buffer size for the BER/DER encoded public key. The public key
+ * is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048
+ * bit key and e is usually 65537
+ * The encoding overhead is:
+ * - max 4 bytes for SEQUENCE
+ * - max 4 bytes for INTEGER n type/length
+ * - 257 bytes of n
+ * - max 2 bytes for INTEGER e type/length
+ * - 3 bytes of e
+ */
+#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3)
+
+/*
+ * Provide a part of a description of the key for /proc/keys.
+ */
+static void asym_tpm_describe(const struct key *asymmetric_key,
+ struct seq_file *m)
+{
+ struct tpm_key *tk = asymmetric_key->payload.data[asym_crypto];
+
+ if (!tk)
+ return;
+
+ seq_printf(m, "TPM1.2/Blob");
+}
+
+static void asym_tpm_destroy(void *payload0, void *payload3)
+{
+ struct tpm_key *tk = payload0;
+
+ if (!tk)
+ return;
+
+ kfree(tk->blob);
+ tk->blob_len = 0;
+
+ kfree(tk);
+}
+
+/* How many bytes will it take to encode the length */
+static inline uint32_t definite_length(uint32_t len)
+{
+ if (len <= 127)
+ return 1;
+ if (len <= 255)
+ return 2;
+ return 3;
+}
+
+static inline uint8_t *encode_tag_length(uint8_t *buf, uint8_t tag,
+ uint32_t len)
+{
+ *buf++ = tag;
+
+ if (len <= 127) {
+ buf[0] = len;
+ return buf + 1;
+ }
+
+ if (len <= 255) {
+ buf[0] = 0x81;
+ buf[1] = len;
+ return buf + 2;
+ }
+
+ buf[0] = 0x82;
+ put_unaligned_be16(len, buf + 1);
+ return buf + 3;
+}
+
+static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
+{
+ uint8_t *cur = buf;
+ uint32_t n_len = definite_length(len) + 1 + len + 1;
+ uint32_t e_len = definite_length(3) + 1 + 3;
+ uint8_t e[3] = { 0x01, 0x00, 0x01 };
+
+ /* SEQUENCE */
+ cur = encode_tag_length(cur, 0x30, n_len + e_len);
+ /* INTEGER n */
+ cur = encode_tag_length(cur, 0x02, len + 1);
+ cur[0] = 0x00;
+ memcpy(cur + 1, pub_key, len);
+ cur += len + 1;
+ cur = encode_tag_length(cur, 0x02, sizeof(e));
+ memcpy(cur, e, sizeof(e));
+ cur += sizeof(e);
+
+ return cur - buf;
+}
+
+/*
+ * Determine the crypto algorithm name.
+ */
+static int determine_akcipher(const char *encoding, const char *hash_algo,
+ char alg_name[CRYPTO_MAX_ALG_NAME])
+{
+ if (strcmp(encoding, "pkcs1") == 0) {
+ if (!hash_algo) {
+ strcpy(alg_name, "pkcs1pad(rsa)");
+ return 0;
+ }
+
+ if (snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(rsa,%s)",
+ hash_algo) >= CRYPTO_MAX_ALG_NAME)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ if (strcmp(encoding, "raw") == 0) {
+ strcpy(alg_name, "rsa");
+ return 0;
+ }
+
+ return -ENOPKG;
+}
+
+/*
+ * Query information about a key.
+ */
+static int tpm_key_query(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info)
+{
+ struct tpm_key *tk = params->key->payload.data[asym_crypto];
+ int ret;
+ char alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_akcipher *tfm;
+ uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
+ uint32_t der_pub_key_len;
+ int len;
+
+ /* TPM only works on private keys, public keys still done in software */
+ ret = determine_akcipher(params->encoding, params->hash_algo, alg_name);
+ if (ret < 0)
+ return ret;
+
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
+ der_pub_key);
+
+ ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len);
+ if (ret < 0)
+ goto error_free_tfm;
+
+ len = crypto_akcipher_maxsize(tfm);
+
+ info->key_size = tk->key_len;
+ info->max_data_size = tk->key_len / 8;
+ info->max_sig_size = len;
+ info->max_enc_size = len;
+ info->max_dec_size = tk->key_len / 8;
+
+ info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT |
+ KEYCTL_SUPPORTS_DECRYPT |
+ KEYCTL_SUPPORTS_VERIFY |
+ KEYCTL_SUPPORTS_SIGN;
+
+ ret = 0;
+error_free_tfm:
+ crypto_free_akcipher(tfm);
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ * Encryption operation is performed with the public key. Hence it is done
+ * in software
+ */
+static int tpm_key_encrypt(struct tpm_key *tk,
+ struct kernel_pkey_params *params,
+ const void *in, void *out)
+{
+ char alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_akcipher *tfm;
+ struct akcipher_request *req;
+ struct crypto_wait cwait;
+ struct scatterlist in_sg, out_sg;
+ uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
+ uint32_t der_pub_key_len;
+ int ret;
+
+ pr_devel("==>%s()\n", __func__);
+
+ ret = determine_akcipher(params->encoding, params->hash_algo, alg_name);
+ if (ret < 0)
+ return ret;
+
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
+ der_pub_key);
+
+ ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len);
+ if (ret < 0)
+ goto error_free_tfm;
+
+ req = akcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ goto error_free_tfm;
+
+ sg_init_one(&in_sg, in, params->in_len);
+ sg_init_one(&out_sg, out, params->out_len);
+ akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
+ params->out_len);
+ crypto_init_wait(&cwait);
+ akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &cwait);
+
+ ret = crypto_akcipher_encrypt(req);
+ ret = crypto_wait_req(ret, &cwait);
+
+ if (ret == 0)
+ ret = req->dst_len;
+
+ akcipher_request_free(req);
+error_free_tfm:
+ crypto_free_akcipher(tfm);
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ * Decryption operation is performed with the private key in the TPM.
+ */
+static int tpm_key_decrypt(struct tpm_key *tk,
+ struct kernel_pkey_params *params,
+ const void *in, void *out)
+{
+ struct tpm_buf *tb;
+ uint32_t keyhandle;
+ uint8_t srkauth[SHA1_DIGEST_SIZE];
+ uint8_t keyauth[SHA1_DIGEST_SIZE];
+ int r;
+
+ pr_devel("==>%s()\n", __func__);
+
+ if (params->hash_algo)
+ return -ENOPKG;
+
+ if (strcmp(params->encoding, "pkcs1"))
+ return -ENOPKG;
+
+ tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+ if (!tb)
+ return -ENOMEM;
+
+ /* TODO: Handle a non-all zero SRK authorization */
+ memset(srkauth, 0, sizeof(srkauth));
+
+ r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
+ tk->blob, tk->blob_len, &keyhandle);
+ if (r < 0) {
+ pr_devel("loadkey2 failed (%d)\n", r);
+ goto error;
+ }
+
+ /* TODO: Handle a non-all zero key authorization */
+ memset(keyauth, 0, sizeof(keyauth));
+
+ r = tpm_unbind(tb, keyhandle, keyauth,
+ in, params->in_len, out, params->out_len);
+ if (r < 0)
+ pr_devel("tpm_unbind failed (%d)\n", r);
+
+ if (tpm_flushspecific(tb, keyhandle) < 0)
+ pr_devel("flushspecific failed (%d)\n", r);
+
+error:
+ kzfree(tb);
+ pr_devel("<==%s() = %d\n", __func__, r);
+ return r;
+}
+
+/*
+ * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
+ */
+static const u8 digest_info_md5[] = {
+ 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
+ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
+ 0x05, 0x00, 0x04, 0x10
+};
+
+static const u8 digest_info_sha1[] = {
+ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+ 0x2b, 0x0e, 0x03, 0x02, 0x1a,
+ 0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 digest_info_rmd160[] = {
+ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+ 0x2b, 0x24, 0x03, 0x02, 0x01,
+ 0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 digest_info_sha224[] = {
+ 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
+ 0x05, 0x00, 0x04, 0x1c
+};
+
+static const u8 digest_info_sha256[] = {
+ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
+ 0x05, 0x00, 0x04, 0x20
+};
+
+static const u8 digest_info_sha384[] = {
+ 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
+ 0x05, 0x00, 0x04, 0x30
+};
+
+static const u8 digest_info_sha512[] = {
+ 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
+ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
+ 0x05, 0x00, 0x04, 0x40
+};
+
+static const struct asn1_template {
+ const char *name;
+ const u8 *data;
+ size_t size;
+} asn1_templates[] = {
+#define _(X) { #X, digest_info_##X, sizeof(digest_info_##X) }
+ _(md5),
+ _(sha1),
+ _(rmd160),
+ _(sha256),
+ _(sha384),
+ _(sha512),
+ _(sha224),
+ { NULL }
+#undef _
+};
+
+static const struct asn1_template *lookup_asn1(const char *name)
+{
+ const struct asn1_template *p;
+
+ for (p = asn1_templates; p->name; p++)
+ if (strcmp(name, p->name) == 0)
+ return p;
+ return NULL;
+}
+
+/*
+ * Sign operation is performed with the private key in the TPM.
+ */
+static int tpm_key_sign(struct tpm_key *tk,
+ struct kernel_pkey_params *params,
+ const void *in, void *out)
+{
+ struct tpm_buf *tb;
+ uint32_t keyhandle;
+ uint8_t srkauth[SHA1_DIGEST_SIZE];
+ uint8_t keyauth[SHA1_DIGEST_SIZE];
+ void *asn1_wrapped = NULL;
+ uint32_t in_len = params->in_len;
+ int r;
+
+ pr_devel("==>%s()\n", __func__);
+
+ if (strcmp(params->encoding, "pkcs1"))
+ return -ENOPKG;
+
+ if (params->hash_algo) {
+ const struct asn1_template *asn1 =
+ lookup_asn1(params->hash_algo);
+
+ if (!asn1)
+ return -ENOPKG;
+
+ /* request enough space for the ASN.1 template + input hash */
+ asn1_wrapped = kzalloc(in_len + asn1->size, GFP_KERNEL);
+ if (!asn1_wrapped)
+ return -ENOMEM;
+
+ /* Copy ASN.1 template, then the input */
+ memcpy(asn1_wrapped, asn1->data, asn1->size);
+ memcpy(asn1_wrapped + asn1->size, in, in_len);
+
+ in = asn1_wrapped;
+ in_len += asn1->size;
+ }
+
+ if (in_len > tk->key_len / 8 - 11) {
+ r = -EOVERFLOW;
+ goto error_free_asn1_wrapped;
+ }
+
+ r = -ENOMEM;
+ tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+ if (!tb)
+ goto error_free_asn1_wrapped;
+
+ /* TODO: Handle a non-all zero SRK authorization */
+ memset(srkauth, 0, sizeof(srkauth));
+
+ r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
+ tk->blob, tk->blob_len, &keyhandle);
+ if (r < 0) {
+ pr_devel("loadkey2 failed (%d)\n", r);
+ goto error_free_tb;
+ }
+
+ /* TODO: Handle a non-all zero key authorization */
+ memset(keyauth, 0, sizeof(keyauth));
+
+ r = tpm_sign(tb, keyhandle, keyauth, in, in_len, out, params->out_len);
+ if (r < 0)
+ pr_devel("tpm_sign failed (%d)\n", r);
+
+ if (tpm_flushspecific(tb, keyhandle) < 0)
+ pr_devel("flushspecific failed (%d)\n", r);
+
+error_free_tb:
+ kzfree(tb);
+error_free_asn1_wrapped:
+ kfree(asn1_wrapped);
+ pr_devel("<==%s() = %d\n", __func__, r);
+ return r;
+}
+
+/*
+ * Do encryption, decryption and signing ops.
+ */
+static int tpm_key_eds_op(struct kernel_pkey_params *params,
+ const void *in, void *out)
+{
+ struct tpm_key *tk = params->key->payload.data[asym_crypto];
+ int ret = -EOPNOTSUPP;
+
+ /* Perform the encryption calculation. */
+ switch (params->op) {
+ case kernel_pkey_encrypt:
+ ret = tpm_key_encrypt(tk, params, in, out);
+ break;
+ case kernel_pkey_decrypt:
+ ret = tpm_key_decrypt(tk, params, in, out);
+ break;
+ case kernel_pkey_sign:
+ ret = tpm_key_sign(tk, params, in, out);
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+/*
+ * Verify a signature using a public key.
+ */
+static int tpm_key_verify_signature(const struct key *key,
+ const struct public_key_signature *sig)
+{
+ const struct tpm_key *tk = key->payload.data[asym_crypto];
+ struct crypto_wait cwait;
+ struct crypto_akcipher *tfm;
+ struct akcipher_request *req;
+ struct scatterlist sig_sg, digest_sg;
+ char alg_name[CRYPTO_MAX_ALG_NAME];
+ uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
+ uint32_t der_pub_key_len;
+ void *output;
+ unsigned int outlen;
+ int ret;
+
+ pr_devel("==>%s()\n", __func__);
+
+ BUG_ON(!tk);
+ BUG_ON(!sig);
+ BUG_ON(!sig->s);
+
+ if (!sig->digest)
+ return -ENOPKG;
+
+ ret = determine_akcipher(sig->encoding, sig->hash_algo, alg_name);
+ if (ret < 0)
+ return ret;
+
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
+ der_pub_key);
+
+ ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len);
+ if (ret < 0)
+ goto error_free_tfm;
+
+ ret = -ENOMEM;
+ req = akcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ goto error_free_tfm;
+
+ ret = -ENOMEM;
+ outlen = crypto_akcipher_maxsize(tfm);
+ output = kmalloc(outlen, GFP_KERNEL);
+ if (!output)
+ goto error_free_req;
+
+ sg_init_one(&sig_sg, sig->s, sig->s_size);
+ sg_init_one(&digest_sg, output, outlen);
+ akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
+ outlen);
+ crypto_init_wait(&cwait);
+ akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &cwait);
+
+ /* Perform the verification calculation. This doesn't actually do the
+ * verification, but rather calculates the hash expected by the
+ * signature and returns that to us.
+ */
+ ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
+ if (ret)
+ goto out_free_output;
+
+ /* Do the actual verification step. */
+ if (req->dst_len != sig->digest_size ||
+ memcmp(sig->digest, output, sig->digest_size) != 0)
+ ret = -EKEYREJECTED;
+
+out_free_output:
+ kfree(output);
+error_free_req:
+ akcipher_request_free(req);
+error_free_tfm:
+ crypto_free_akcipher(tfm);
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ if (WARN_ON_ONCE(ret > 0))
+ ret = -EINVAL;
+ return ret;
+}
+
+/*
+ * Parse enough information out of TPM_KEY structure:
+ * TPM_STRUCT_VER -> 4 bytes
+ * TPM_KEY_USAGE -> 2 bytes
+ * TPM_KEY_FLAGS -> 4 bytes
+ * TPM_AUTH_DATA_USAGE -> 1 byte
+ * TPM_KEY_PARMS -> variable
+ * UINT32 PCRInfoSize -> 4 bytes
+ * BYTE* -> PCRInfoSize bytes
+ * TPM_STORE_PUBKEY
+ * UINT32 encDataSize;
+ * BYTE* -> encDataSize;
+ *
+ * TPM_KEY_PARMS:
+ * TPM_ALGORITHM_ID -> 4 bytes
+ * TPM_ENC_SCHEME -> 2 bytes
+ * TPM_SIG_SCHEME -> 2 bytes
+ * UINT32 parmSize -> 4 bytes
+ * BYTE* -> variable
+ */
+static int extract_key_parameters(struct tpm_key *tk)
+{
+ const void *cur = tk->blob;
+ uint32_t len = tk->blob_len;
+ const void *pub_key;
+ uint32_t sz;
+ uint32_t key_len;
+
+ if (len < 11)
+ return -EBADMSG;
+
+ /* Ensure this is a legacy key */
+ if (get_unaligned_be16(cur + 4) != 0x0015)
+ return -EBADMSG;
+
+ /* Skip to TPM_KEY_PARMS */
+ cur += 11;
+ len -= 11;
+
+ if (len < 12)
+ return -EBADMSG;
+
+ /* Make sure this is an RSA key */
+ if (get_unaligned_be32(cur) != 0x00000001)
+ return -EBADMSG;
+
+ /* Make sure this is TPM_ES_RSAESPKCSv15 encoding scheme */
+ if (get_unaligned_be16(cur + 4) != 0x0002)
+ return -EBADMSG;
+
+ /* Make sure this is TPM_SS_RSASSAPKCS1v15_DER signature scheme */
+ if (get_unaligned_be16(cur + 6) != 0x0003)
+ return -EBADMSG;
+
+ sz = get_unaligned_be32(cur + 8);
+ if (len < sz + 12)
+ return -EBADMSG;
+
+ /* Move to TPM_RSA_KEY_PARMS */
+ len -= 12;
+ cur += 12;
+
+ /* Grab the RSA key length */
+ key_len = get_unaligned_be32(cur);
+
+ switch (key_len) {
+ case 512:
+ case 1024:
+ case 1536:
+ case 2048:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Move just past TPM_KEY_PARMS */
+ cur += sz;
+ len -= sz;
+
+ if (len < 4)
+ return -EBADMSG;
+
+ sz = get_unaligned_be32(cur);
+ if (len < 4 + sz)
+ return -EBADMSG;
+
+ /* Move to TPM_STORE_PUBKEY */
+ cur += 4 + sz;
+ len -= 4 + sz;
+
+ /* Grab the size of the public key, it should jive with the key size */
+ sz = get_unaligned_be32(cur);
+ if (sz > 256)
+ return -EINVAL;
+
+ pub_key = cur + 4;
+
+ tk->key_len = key_len;
+ tk->pub_key = pub_key;
+ tk->pub_key_len = sz;
+
+ return 0;
+}
+
+/* Given the blob, parse it and load it into the TPM */
+struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len)
+{
+ int r;
+ struct tpm_key *tk;
+
+ r = tpm_is_tpm2(NULL);
+ if (r < 0)
+ goto error;
+
+ /* We don't support TPM2 yet */
+ if (r > 0) {
+ r = -ENODEV;
+ goto error;
+ }
+
+ r = -ENOMEM;
+ tk = kzalloc(sizeof(struct tpm_key), GFP_KERNEL);
+ if (!tk)
+ goto error;
+
+ tk->blob = kmemdup(blob, blob_len, GFP_KERNEL);
+ if (!tk->blob)
+ goto error_memdup;
+
+ tk->blob_len = blob_len;
+
+ r = extract_key_parameters(tk);
+ if (r < 0)
+ goto error_extract;
+
+ return tk;
+
+error_extract:
+ kfree(tk->blob);
+ tk->blob_len = 0;
+error_memdup:
+ kfree(tk);
+error:
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL_GPL(tpm_key_create);
+
+/*
+ * TPM-based asymmetric key subtype
+ */
+struct asymmetric_key_subtype asym_tpm_subtype = {
+ .owner = THIS_MODULE,
+ .name = "asym_tpm",
+ .name_len = sizeof("asym_tpm") - 1,
+ .describe = asym_tpm_describe,
+ .destroy = asym_tpm_destroy,
+ .query = tpm_key_query,
+ .eds_op = tpm_key_eds_op,
+ .verify_signature = tpm_key_verify_signature,
+};
+EXPORT_SYMBOL_GPL(asym_tpm_subtype);
+
+MODULE_DESCRIPTION("TPM based asymmetric key subtype");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/crypto/asymmetric_keys/asymmetric_keys.h b/crypto/asymmetric_keys/asymmetric_keys.h
index ca8e9ac34ce6..7be1ccf4fa9f 100644
--- a/crypto/asymmetric_keys/asymmetric_keys.h
+++ b/crypto/asymmetric_keys/asymmetric_keys.h
@@ -16,3 +16,6 @@ extern struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id);
extern int __asymmetric_key_hex_to_key_id(const char *id,
struct asymmetric_key_id *match_id,
size_t hexlen);
+
+extern int asymmetric_key_eds_op(struct kernel_pkey_params *params,
+ const void *in, void *out);
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index 26539e9a8bda..69a0788a7de5 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/ctype.h>
#include <keys/system_keyring.h>
+#include <keys/user-type.h>
#include "asymmetric_keys.h"
MODULE_LICENSE("GPL");
@@ -538,6 +539,45 @@ out:
return ret;
}
+int asymmetric_key_eds_op(struct kernel_pkey_params *params,
+ const void *in, void *out)
+{
+ const struct asymmetric_key_subtype *subtype;
+ struct key *key = params->key;
+ int ret;
+
+ pr_devel("==>%s()\n", __func__);
+
+ if (key->type != &key_type_asymmetric)
+ return -EINVAL;
+ subtype = asymmetric_key_subtype(key);
+ if (!subtype ||
+ !key->payload.data[0])
+ return -EINVAL;
+ if (!subtype->eds_op)
+ return -ENOTSUPP;
+
+ ret = subtype->eds_op(params, in, out);
+
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+
+static int asymmetric_key_verify_signature(struct kernel_pkey_params *params,
+ const void *in, const void *in2)
+{
+ struct public_key_signature sig = {
+ .s_size = params->in2_len,
+ .digest_size = params->in_len,
+ .encoding = params->encoding,
+ .hash_algo = params->hash_algo,
+ .digest = (void *)in,
+ .s = (void *)in2,
+ };
+
+ return verify_signature(params->key, &sig);
+}
+
struct key_type key_type_asymmetric = {
.name = "asymmetric",
.preparse = asymmetric_key_preparse,
@@ -548,6 +588,9 @@ struct key_type key_type_asymmetric = {
.destroy = asymmetric_key_destroy,
.describe = asymmetric_key_describe,
.lookup_restriction = asymmetric_lookup_restriction,
+ .asym_query = query_asymmetric_key,
+ .asym_eds_op = asymmetric_key_eds_op,
+ .asym_verify_signature = asymmetric_key_verify_signature,
};
EXPORT_SYMBOL_GPL(key_type_asymmetric);
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 0f134162cef4..f0d56e1a8b7e 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -271,6 +271,7 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
switch (ctx->last_oid) {
case OID_rsaEncryption:
ctx->sinfo->sig->pkey_algo = "rsa";
+ ctx->sinfo->sig->encoding = "pkcs1";
break;
default:
printk("Unsupported pkey algo: %u\n", ctx->last_oid);
diff --git a/crypto/asymmetric_keys/pkcs8.asn1 b/crypto/asymmetric_keys/pkcs8.asn1
new file mode 100644
index 000000000000..702c41a3c713
--- /dev/null
+++ b/crypto/asymmetric_keys/pkcs8.asn1
@@ -0,0 +1,24 @@
+--
+-- This is the unencrypted variant
+--
+PrivateKeyInfo ::= SEQUENCE {
+ version Version,
+ privateKeyAlgorithm PrivateKeyAlgorithmIdentifier,
+ privateKey PrivateKey,
+ attributes [0] IMPLICIT Attributes OPTIONAL
+}
+
+Version ::= INTEGER ({ pkcs8_note_version })
+
+PrivateKeyAlgorithmIdentifier ::= AlgorithmIdentifier ({ pkcs8_note_algo })
+
+PrivateKey ::= OCTET STRING ({ pkcs8_note_key })
+
+Attributes ::= SET OF Attribute
+
+Attribute ::= ANY
+
+AlgorithmIdentifier ::= SEQUENCE {
+ algorithm OBJECT IDENTIFIER ({ pkcs8_note_OID }),
+ parameters ANY OPTIONAL
+}
diff --git a/crypto/asymmetric_keys/pkcs8_parser.c b/crypto/asymmetric_keys/pkcs8_parser.c
new file mode 100644
index 000000000000..5f6a7ecc9765
--- /dev/null
+++ b/crypto/asymmetric_keys/pkcs8_parser.c
@@ -0,0 +1,184 @@
+/* PKCS#8 Private Key parser [RFC 5208].
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "PKCS8: "fmt
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/oid_registry.h>
+#include <keys/asymmetric-subtype.h>
+#include <keys/asymmetric-parser.h>
+#include <crypto/public_key.h>
+#include "pkcs8.asn1.h"
+
+struct pkcs8_parse_context {
+ struct public_key *pub;
+ unsigned long data; /* Start of data */
+ enum OID last_oid; /* Last OID encountered */
+ enum OID algo_oid; /* Algorithm OID */
+ u32 key_size;
+ const void *key;
+};
+
+/*
+ * Note an OID when we find one for later processing when we know how to
+ * interpret it.
+ */
+int pkcs8_note_OID(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct pkcs8_parse_context *ctx = context;
+
+ ctx->last_oid = look_up_OID(value, vlen);
+ if (ctx->last_oid == OID__NR) {
+ char buffer[50];
+
+ sprint_oid(value, vlen, buffer, sizeof(buffer));
+ pr_info("Unknown OID: [%lu] %s\n",
+ (unsigned long)value - ctx->data, buffer);
+ }
+ return 0;
+}
+
+/*
+ * Note the version number of the ASN.1 blob.
+ */
+int pkcs8_note_version(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ if (vlen != 1 || ((const u8 *)value)[0] != 0) {
+ pr_warn("Unsupported PKCS#8 version\n");
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+/*
+ * Note the public algorithm.
+ */
+int pkcs8_note_algo(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct pkcs8_parse_context *ctx = context;
+
+ if (ctx->last_oid != OID_rsaEncryption)
+ return -ENOPKG;
+
+ ctx->pub->pkey_algo = "rsa";
+ return 0;
+}
+
+/*
+ * Note the key data of the ASN.1 blob.
+ */
+int pkcs8_note_key(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct pkcs8_parse_context *ctx = context;
+
+ ctx->key = value;
+ ctx->key_size = vlen;
+ return 0;
+}
+
+/*
+ * Parse a PKCS#8 private key blob.
+ */
+static struct public_key *pkcs8_parse(const void *data, size_t datalen)
+{
+ struct pkcs8_parse_context ctx;
+ struct public_key *pub;
+ long ret;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ ret = -ENOMEM;
+ ctx.pub = kzalloc(sizeof(struct public_key), GFP_KERNEL);
+ if (!ctx.pub)
+ goto error;
+
+ ctx.data = (unsigned long)data;
+
+ /* Attempt to decode the private key */
+ ret = asn1_ber_decoder(&pkcs8_decoder, &ctx, data, datalen);
+ if (ret < 0)
+ goto error_decode;
+
+ ret = -ENOMEM;
+ pub = ctx.pub;
+ pub->key = kmemdup(ctx.key, ctx.key_size, GFP_KERNEL);
+ if (!pub->key)
+ goto error_decode;
+
+ pub->keylen = ctx.key_size;
+ pub->key_is_private = true;
+ return pub;
+
+error_decode:
+ kfree(ctx.pub);
+error:
+ return ERR_PTR(ret);
+}
+
+/*
+ * Attempt to parse a data blob for a key as a PKCS#8 private key.
+ */
+static int pkcs8_key_preparse(struct key_preparsed_payload *prep)
+{
+ struct public_key *pub;
+
+ pub = pkcs8_parse(prep->data, prep->datalen);
+ if (IS_ERR(pub))
+ return PTR_ERR(pub);
+
+ pr_devel("Cert Key Algo: %s\n", pub->pkey_algo);
+ pub->id_type = "PKCS8";
+
+ /* We're pinning the module by being linked against it */
+ __module_get(public_key_subtype.owner);
+ prep->payload.data[asym_subtype] = &public_key_subtype;
+ prep->payload.data[asym_key_ids] = NULL;
+ prep->payload.data[asym_crypto] = pub;
+ prep->payload.data[asym_auth] = NULL;
+ prep->quotalen = 100;
+ return 0;
+}
+
+static struct asymmetric_key_parser pkcs8_key_parser = {
+ .owner = THIS_MODULE,
+ .name = "pkcs8",
+ .parse = pkcs8_key_preparse,
+};
+
+/*
+ * Module stuff
+ */
+static int __init pkcs8_key_init(void)
+{
+ return register_asymmetric_key_parser(&pkcs8_key_parser);
+}
+
+static void __exit pkcs8_key_exit(void)
+{
+ unregister_asymmetric_key_parser(&pkcs8_key_parser);
+}
+
+module_init(pkcs8_key_init);
+module_exit(pkcs8_key_exit);
+
+MODULE_DESCRIPTION("PKCS#8 certificate parser");
+MODULE_LICENSE("GPL");
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index e929fe1e4106..f5d85b47fcc6 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -60,6 +60,165 @@ static void public_key_destroy(void *payload0, void *payload3)
}
/*
+ * Determine the crypto algorithm name.
+ */
+static
+int software_key_determine_akcipher(const char *encoding,
+ const char *hash_algo,
+ const struct public_key *pkey,
+ char alg_name[CRYPTO_MAX_ALG_NAME])
+{
+ int n;
+
+ if (strcmp(encoding, "pkcs1") == 0) {
+ /* The data wangled by the RSA algorithm is typically padded
+ * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447
+ * sec 8.2].
+ */
+ if (!hash_algo)
+ n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
+ "pkcs1pad(%s)",
+ pkey->pkey_algo);
+ else
+ n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
+ "pkcs1pad(%s,%s)",
+ pkey->pkey_algo, hash_algo);
+ return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
+ }
+
+ if (strcmp(encoding, "raw") == 0) {
+ strcpy(alg_name, pkey->pkey_algo);
+ return 0;
+ }
+
+ return -ENOPKG;
+}
+
+/*
+ * Query information about a key.
+ */
+static int software_key_query(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info)
+{
+ struct crypto_akcipher *tfm;
+ struct public_key *pkey = params->key->payload.data[asym_crypto];
+ char alg_name[CRYPTO_MAX_ALG_NAME];
+ int ret, len;
+
+ ret = software_key_determine_akcipher(params->encoding,
+ params->hash_algo,
+ pkey, alg_name);
+ if (ret < 0)
+ return ret;
+
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ if (pkey->key_is_private)
+ ret = crypto_akcipher_set_priv_key(tfm,
+ pkey->key, pkey->keylen);
+ else
+ ret = crypto_akcipher_set_pub_key(tfm,
+ pkey->key, pkey->keylen);
+ if (ret < 0)
+ goto error_free_tfm;
+
+ len = crypto_akcipher_maxsize(tfm);
+ info->key_size = len * 8;
+ info->max_data_size = len;
+ info->max_sig_size = len;
+ info->max_enc_size = len;
+ info->max_dec_size = len;
+ info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT |
+ KEYCTL_SUPPORTS_VERIFY);
+ if (pkey->key_is_private)
+ info->supported_ops |= (KEYCTL_SUPPORTS_DECRYPT |
+ KEYCTL_SUPPORTS_SIGN);
+ ret = 0;
+
+error_free_tfm:
+ crypto_free_akcipher(tfm);
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ * Do encryption, decryption and signing ops.
+ */
+static int software_key_eds_op(struct kernel_pkey_params *params,
+ const void *in, void *out)
+{
+ const struct public_key *pkey = params->key->payload.data[asym_crypto];
+ struct akcipher_request *req;
+ struct crypto_akcipher *tfm;
+ struct crypto_wait cwait;
+ struct scatterlist in_sg, out_sg;
+ char alg_name[CRYPTO_MAX_ALG_NAME];
+ int ret;
+
+ pr_devel("==>%s()\n", __func__);
+
+ ret = software_key_determine_akcipher(params->encoding,
+ params->hash_algo,
+ pkey, alg_name);
+ if (ret < 0)
+ return ret;
+
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ req = akcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ goto error_free_tfm;
+
+ if (pkey->key_is_private)
+ ret = crypto_akcipher_set_priv_key(tfm,
+ pkey->key, pkey->keylen);
+ else
+ ret = crypto_akcipher_set_pub_key(tfm,
+ pkey->key, pkey->keylen);
+ if (ret)
+ goto error_free_req;
+
+ sg_init_one(&in_sg, in, params->in_len);
+ sg_init_one(&out_sg, out, params->out_len);
+ akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
+ params->out_len);
+ crypto_init_wait(&cwait);
+ akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &cwait);
+
+ /* Perform the encryption calculation. */
+ switch (params->op) {
+ case kernel_pkey_encrypt:
+ ret = crypto_akcipher_encrypt(req);
+ break;
+ case kernel_pkey_decrypt:
+ ret = crypto_akcipher_decrypt(req);
+ break;
+ case kernel_pkey_sign:
+ ret = crypto_akcipher_sign(req);
+ break;
+ default:
+ BUG();
+ }
+
+ ret = crypto_wait_req(ret, &cwait);
+ if (ret == 0)
+ ret = req->dst_len;
+
+error_free_req:
+ akcipher_request_free(req);
+error_free_tfm:
+ crypto_free_akcipher(tfm);
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+
+/*
* Verify a signature using a public key.
*/
int public_key_verify_signature(const struct public_key *pkey,
@@ -69,8 +228,7 @@ int public_key_verify_signature(const struct public_key *pkey,
struct crypto_akcipher *tfm;
struct akcipher_request *req;
struct scatterlist sig_sg, digest_sg;
- const char *alg_name;
- char alg_name_buf[CRYPTO_MAX_ALG_NAME];
+ char alg_name[CRYPTO_MAX_ALG_NAME];
void *output;
unsigned int outlen;
int ret;
@@ -81,21 +239,11 @@ int public_key_verify_signature(const struct public_key *pkey,
BUG_ON(!sig);
BUG_ON(!sig->s);
- if (!sig->digest)
- return -ENOPKG;
-
- alg_name = sig->pkey_algo;
- if (strcmp(sig->pkey_algo, "rsa") == 0) {
- /* The data wangled by the RSA algorithm is typically padded
- * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447
- * sec 8.2].
- */
- if (snprintf(alg_name_buf, CRYPTO_MAX_ALG_NAME,
- "pkcs1pad(rsa,%s)", sig->hash_algo
- ) >= CRYPTO_MAX_ALG_NAME)
- return -EINVAL;
- alg_name = alg_name_buf;
- }
+ ret = software_key_determine_akcipher(sig->encoding,
+ sig->hash_algo,
+ pkey, alg_name);
+ if (ret < 0)
+ return ret;
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm))
@@ -106,7 +254,12 @@ int public_key_verify_signature(const struct public_key *pkey,
if (!req)
goto error_free_tfm;
- ret = crypto_akcipher_set_pub_key(tfm, pkey->key, pkey->keylen);
+ if (pkey->key_is_private)
+ ret = crypto_akcipher_set_priv_key(tfm,
+ pkey->key, pkey->keylen);
+ else
+ ret = crypto_akcipher_set_pub_key(tfm,
+ pkey->key, pkey->keylen);
if (ret)
goto error_free_req;
@@ -167,6 +320,8 @@ struct asymmetric_key_subtype public_key_subtype = {
.name_len = sizeof("public_key") - 1,
.describe = public_key_describe,
.destroy = public_key_destroy,
+ .query = software_key_query,
+ .eds_op = software_key_eds_op,
.verify_signature = public_key_verify_signature_2,
};
EXPORT_SYMBOL_GPL(public_key_subtype);
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 28198314bc39..ad95a58c6642 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -16,7 +16,9 @@
#include <linux/export.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/keyctl.h>
#include <crypto/public_key.h>
+#include <keys/user-type.h>
#include "asymmetric_keys.h"
/*
@@ -37,6 +39,99 @@ void public_key_signature_free(struct public_key_signature *sig)
EXPORT_SYMBOL_GPL(public_key_signature_free);
/**
+ * query_asymmetric_key - Get information about an aymmetric key.
+ * @params: Various parameters.
+ * @info: Where to put the information.
+ */
+int query_asymmetric_key(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info)
+{
+ const struct asymmetric_key_subtype *subtype;
+ struct key *key = params->key;
+ int ret;
+
+ pr_devel("==>%s()\n", __func__);
+
+ if (key->type != &key_type_asymmetric)
+ return -EINVAL;
+ subtype = asymmetric_key_subtype(key);
+ if (!subtype ||
+ !key->payload.data[0])
+ return -EINVAL;
+ if (!subtype->query)
+ return -ENOTSUPP;
+
+ ret = subtype->query(params, info);
+
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(query_asymmetric_key);
+
+/**
+ * encrypt_blob - Encrypt data using an asymmetric key
+ * @params: Various parameters
+ * @data: Data blob to be encrypted, length params->data_len
+ * @enc: Encrypted data buffer, length params->enc_len
+ *
+ * Encrypt the specified data blob using the private key specified by
+ * params->key. The encrypted data is wrapped in an encoding if
+ * params->encoding is specified (eg. "pkcs1").
+ *
+ * Returns the length of the data placed in the encrypted data buffer or an
+ * error.
+ */
+int encrypt_blob(struct kernel_pkey_params *params,
+ const void *data, void *enc)
+{
+ params->op = kernel_pkey_encrypt;
+ return asymmetric_key_eds_op(params, data, enc);
+}
+EXPORT_SYMBOL_GPL(encrypt_blob);
+
+/**
+ * decrypt_blob - Decrypt data using an asymmetric key
+ * @params: Various parameters
+ * @enc: Encrypted data to be decrypted, length params->enc_len
+ * @data: Decrypted data buffer, length params->data_len
+ *
+ * Decrypt the specified data blob using the private key specified by
+ * params->key. The decrypted data is wrapped in an encoding if
+ * params->encoding is specified (eg. "pkcs1").
+ *
+ * Returns the length of the data placed in the decrypted data buffer or an
+ * error.
+ */
+int decrypt_blob(struct kernel_pkey_params *params,
+ const void *enc, void *data)
+{
+ params->op = kernel_pkey_decrypt;
+ return asymmetric_key_eds_op(params, enc, data);
+}
+EXPORT_SYMBOL_GPL(decrypt_blob);
+
+/**
+ * create_signature - Sign some data using an asymmetric key
+ * @params: Various parameters
+ * @data: Data blob to be signed, length params->data_len
+ * @enc: Signature buffer, length params->enc_len
+ *
+ * Sign the specified data blob using the private key specified by params->key.
+ * The signature is wrapped in an encoding if params->encoding is specified
+ * (eg. "pkcs1"). If the encoding needs to know the digest type, this can be
+ * passed through params->hash_algo (eg. "sha1").
+ *
+ * Returns the length of the data placed in the signature buffer or an error.
+ */
+int create_signature(struct kernel_pkey_params *params,
+ const void *data, void *enc)
+{
+ params->op = kernel_pkey_sign;
+ return asymmetric_key_eds_op(params, data, enc);
+}
+EXPORT_SYMBOL_GPL(create_signature);
+
+/**
* verify_signature - Initiate the use of an asymmetric key to verify a signature
* @key: The asymmetric key to verify against
* @sig: The signature to check
diff --git a/crypto/asymmetric_keys/tpm.asn1 b/crypto/asymmetric_keys/tpm.asn1
new file mode 100644
index 000000000000..d7f194232f30
--- /dev/null
+++ b/crypto/asymmetric_keys/tpm.asn1
@@ -0,0 +1,5 @@
+--
+-- Unencryted TPM Blob. For details of the format, see:
+-- http://david.woodhou.se/draft-woodhouse-cert-best-practice.html#I-D.mavrogiannopoulos-tpmuri
+--
+PrivateKeyInfo ::= OCTET STRING ({ tpm_note_key })
diff --git a/crypto/asymmetric_keys/tpm_parser.c b/crypto/asymmetric_keys/tpm_parser.c
new file mode 100644
index 000000000000..96405d8dcd98
--- /dev/null
+++ b/crypto/asymmetric_keys/tpm_parser.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "TPM-PARSER: "fmt
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <keys/asymmetric-subtype.h>
+#include <keys/asymmetric-parser.h>
+#include <crypto/asym_tpm_subtype.h>
+#include "tpm.asn1.h"
+
+struct tpm_parse_context {
+ const void *blob;
+ u32 blob_len;
+};
+
+/*
+ * Note the key data of the ASN.1 blob.
+ */
+int tpm_note_key(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct tpm_parse_context *ctx = context;
+
+ ctx->blob = value;
+ ctx->blob_len = vlen;
+
+ return 0;
+}
+
+/*
+ * Parse a TPM-encrypted private key blob.
+ */
+static struct tpm_key *tpm_parse(const void *data, size_t datalen)
+{
+ struct tpm_parse_context ctx;
+ long ret;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ /* Attempt to decode the private key */
+ ret = asn1_ber_decoder(&tpm_decoder, &ctx, data, datalen);
+ if (ret < 0)
+ goto error;
+
+ return tpm_key_create(ctx.blob, ctx.blob_len);
+
+error:
+ return ERR_PTR(ret);
+}
+/*
+ * Attempt to parse a data blob for a key as a TPM private key blob.
+ */
+static int tpm_key_preparse(struct key_preparsed_payload *prep)
+{
+ struct tpm_key *tk;
+
+ /*
+ * TPM 1.2 keys are max 2048 bits long, so assume the blob is no
+ * more than 4x that
+ */
+ if (prep->datalen > 256 * 4)
+ return -EMSGSIZE;
+
+ tk = tpm_parse(prep->data, prep->datalen);
+
+ if (IS_ERR(tk))
+ return PTR_ERR(tk);
+
+ /* We're pinning the module by being linked against it */
+ __module_get(asym_tpm_subtype.owner);
+ prep->payload.data[asym_subtype] = &asym_tpm_subtype;
+ prep->payload.data[asym_key_ids] = NULL;
+ prep->payload.data[asym_crypto] = tk;
+ prep->payload.data[asym_auth] = NULL;
+ prep->quotalen = 100;
+ return 0;
+}
+
+static struct asymmetric_key_parser tpm_key_parser = {
+ .owner = THIS_MODULE,
+ .name = "tpm_parser",
+ .parse = tpm_key_preparse,
+};
+
+static int __init tpm_key_init(void)
+{
+ return register_asymmetric_key_parser(&tpm_key_parser);
+}
+
+static void __exit tpm_key_exit(void)
+{
+ unregister_asymmetric_key_parser(&tpm_key_parser);
+}
+
+module_init(tpm_key_init);
+module_exit(tpm_key_exit);
+
+MODULE_DESCRIPTION("TPM private key-blob parser");
+MODULE_LICENSE("GPL v2");
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index b6cabac4b62b..991f4d735a4e 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -199,35 +199,32 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
case OID_md4WithRSAEncryption:
ctx->cert->sig->hash_algo = "md4";
- ctx->cert->sig->pkey_algo = "rsa";
- break;
+ goto rsa_pkcs1;
case OID_sha1WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha1";
- ctx->cert->sig->pkey_algo = "rsa";
- break;
+ goto rsa_pkcs1;
case OID_sha256WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha256";
- ctx->cert->sig->pkey_algo = "rsa";
- break;
+ goto rsa_pkcs1;
case OID_sha384WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha384";
- ctx->cert->sig->pkey_algo = "rsa";
- break;
+ goto rsa_pkcs1;
case OID_sha512WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha512";
- ctx->cert->sig->pkey_algo = "rsa";
- break;
+ goto rsa_pkcs1;
case OID_sha224WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha224";
- ctx->cert->sig->pkey_algo = "rsa";
- break;
+ goto rsa_pkcs1;
}
+rsa_pkcs1:
+ ctx->cert->sig->pkey_algo = "rsa";
+ ctx->cert->sig->encoding = "pkcs1";
ctx->algo_oid = ctx->last_oid;
return 0;
}
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 812476e46821..cfc04e15fd97 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -392,7 +392,8 @@ static int pkcs1pad_sign(struct akcipher_request *req)
if (!ctx->key_size)
return -EINVAL;
- digest_size = digest_info->size;
+ if (digest_info)
+ digest_size = digest_info->size;
if (req->src_len + digest_size > ctx->key_size - 11)
return -EOVERFLOW;
@@ -412,8 +413,9 @@ static int pkcs1pad_sign(struct akcipher_request *req)
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
req_ctx->in_buf[ps_end] = 0x00;
- memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
- digest_info->size);
+ if (digest_info)
+ memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
+ digest_info->size);
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
@@ -475,10 +477,13 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
goto done;
pos++;
- if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
- goto done;
+ if (digest_info) {
+ if (crypto_memneq(out_buf + pos, digest_info->data,
+ digest_info->size))
+ goto done;
- pos += digest_info->size;
+ pos += digest_info->size;
+ }
err = 0;
@@ -608,11 +613,14 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
hash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(hash_name))
- return PTR_ERR(hash_name);
+ hash_name = NULL;
- digest_info = rsa_lookup_asn1(hash_name);
- if (!digest_info)
- return -EINVAL;
+ if (hash_name) {
+ digest_info = rsa_lookup_asn1(hash_name);
+ if (!digest_info)
+ return -EINVAL;
+ } else
+ digest_info = NULL;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -632,14 +640,29 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
err = -ENAMETOOLONG;
- if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >=
- CRYPTO_MAX_ALG_NAME ||
- snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "pkcs1pad(%s,%s)",
- rsa_alg->base.cra_driver_name, hash_name) >=
- CRYPTO_MAX_ALG_NAME)
- goto out_drop_alg;
+ if (!hash_name) {
+ if (snprintf(inst->alg.base.cra_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_drop_alg;
+
+ if (snprintf(inst->alg.base.cra_driver_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_driver_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto out_drop_alg;
+ } else {
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
+ hash_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_drop_alg;
+
+ if (snprintf(inst->alg.base.cra_driver_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
+ rsa_alg->base.cra_driver_name,
+ hash_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_drop_alg;
+ }
inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 365e6c1a729e..8f3a444c6ea9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -512,7 +512,7 @@ config CRC_PMIC_OPREGION
config XPOWER_PMIC_OPREGION
bool "ACPI operation region support for XPower AXP288 PMIC"
- depends on MFD_AXP20X_I2C
+ depends on MFD_AXP20X_I2C && IOSF_MBI
help
This config adds ACPI operation region support for XPower AXP288 PMIC.
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 6b0d3ef7309c..8fe0960ea572 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
if (node < 0)
node = memory_add_physaddr_to_nid(info->start_addr);
- result = add_memory(node, info->start_addr, info->length);
+ result = __add_memory(node, info->start_addr, info->length);
/*
* If the memory block has been used by the kernel, add_memory()
@@ -282,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
nid = memory_add_physaddr_to_nid(info->start_addr);
acpi_unbind_memory_blocks(info);
- remove_memory(nid, info->start_addr, info->length);
+ __remove_memory(nid, info->start_addr, info->length);
list_del(&info->list);
kfree(info);
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index a7c2673ffd36..824ae985ad93 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -126,6 +126,7 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
return 0;
}
+EXPORT_SYMBOL(acpi_device_get_power);
static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state)
{
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 85167603b9c9..274699463b4f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index aadc86db804c..2579675b7082 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -8,8 +8,9 @@
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/mfd/axp20x.h>
-#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <asm/iosf_mbi.h>
#include "intel_pmic.h"
#define XPOWER_GPADC_LOW 0x5b
@@ -172,15 +173,21 @@ static int intel_xpower_pmic_get_power(struct regmap *regmap, int reg,
static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
int bit, bool on)
{
- int data;
+ int data, ret;
/* GPIO1 LDO regulator needs special handling */
if (reg == XPOWER_GPI1_CTRL)
return regmap_update_bits(regmap, reg, GPI1_LDO_MASK,
on ? GPI1_LDO_ON : GPI1_LDO_OFF);
- if (regmap_read(regmap, reg, &data))
- return -EIO;
+ ret = iosf_mbi_block_punit_i2c_access();
+ if (ret)
+ return ret;
+
+ if (regmap_read(regmap, reg, &data)) {
+ ret = -EIO;
+ goto out;
+ }
if (on)
data |= BIT(bit);
@@ -188,9 +195,11 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
data &= ~BIT(bit);
if (regmap_write(regmap, reg, data))
- return -EIO;
+ ret = -EIO;
+out:
+ iosf_mbi_unblock_punit_i2c_access();
- return 0;
+ return ret;
}
/**
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index a3d012b08fc5..61203eebf3a1 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -31,9 +31,8 @@
#include <linux/irq.h>
#include <linux/errno.h>
#include <linux/acpi.h>
-#include <linux/bootmem.h>
-#include <linux/earlycpio.h>
#include <linux/memblock.h>
+#include <linux/earlycpio.h>
#include <linux/initrd.h>
#include "internal.h"
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 3b25a643058c..21b9b2f2470a 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -155,10 +155,9 @@ struct logical_input {
int release_data;
} std;
struct { /* valid when type == INPUT_TYPE_KBD */
- /* strings can be non null-terminated */
- char press_str[sizeof(void *) + sizeof(int)];
- char repeat_str[sizeof(void *) + sizeof(int)];
- char release_str[sizeof(void *) + sizeof(int)];
+ char press_str[sizeof(void *) + sizeof(int)] __nonstring;
+ char repeat_str[sizeof(void *) + sizeof(int)] __nonstring;
+ char release_str[sizeof(void *) + sizeof(int)] __nonstring;
} kbd;
} u;
};
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 817320c7c4c1..0e5985682642 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -228,7 +228,6 @@ static bool pages_correctly_probed(unsigned long start_pfn)
/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
- * Must already be protected by mem_hotplug_begin().
*/
static int
memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
@@ -294,7 +293,6 @@ static int memory_subsys_online(struct device *dev)
if (mem->online_type < 0)
mem->online_type = MMOP_ONLINE_KEEP;
- /* Already under protection of mem_hotplug_begin() */
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
/* clear online_type */
@@ -341,19 +339,11 @@ store_mem_state(struct device *dev,
goto err;
}
- /*
- * Memory hotplug needs to hold mem_hotplug_begin() for probe to find
- * the correct memory block to online before doing device_online(dev),
- * which will take dev->mutex. Take the lock early to prevent an
- * inversion, memory_subsys_online() callbacks will be implemented by
- * assuming it's already protected.
- */
- mem_hotplug_begin();
-
switch (online_type) {
case MMOP_ONLINE_KERNEL:
case MMOP_ONLINE_MOVABLE:
case MMOP_ONLINE_KEEP:
+ /* mem->online_type is protected by device_hotplug_lock */
mem->online_type = online_type;
ret = device_online(&mem->dev);
break;
@@ -364,7 +354,6 @@ store_mem_state(struct device *dev,
ret = -EINVAL; /* should never happen */
}
- mem_hotplug_done();
err:
unlock_device_hotplug();
@@ -519,15 +508,20 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
return -EINVAL;
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ goto out;
+
nid = memory_add_physaddr_to_nid(phys_addr);
- ret = add_memory(nid, phys_addr,
- MIN_MEMORY_BLOCK_SIZE * sections_per_block);
+ ret = __add_memory(nid, phys_addr,
+ MIN_MEMORY_BLOCK_SIZE * sections_per_block);
if (ret)
goto out;
ret = count;
out:
+ unlock_device_hotplug();
return ret;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 23cf4427f425..41b91af95afb 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index df8103dd40ac..c18586fccb6f 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -396,15 +396,14 @@ static struct brd_device *brd_alloc(int i)
disk->first_minor = i * max_part;
disk->fops = &brd_fops;
disk->private_data = brd;
- disk->queue = brd->brd_queue;
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2);
- disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
+ brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
/* Tell the block layer that this is not a rotational device */
- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue);
return brd;
@@ -436,6 +435,7 @@ static struct brd_device *brd_init_one(int i, bool *new)
brd = brd_alloc(i);
if (brd) {
+ brd->brd_disk->queue = brd->brd_queue;
add_disk(brd->brd_disk);
list_add_tail(&brd->brd_list, &brd_devices);
}
@@ -503,8 +503,14 @@ static int __init brd_init(void)
/* point of no return */
- list_for_each_entry(brd, &brd_devices, brd_list)
+ list_for_each_entry(brd, &brd_devices, brd_list) {
+ /*
+ * associate with queue just before adding disk for
+ * avoiding to mess up failure path
+ */
+ brd->brd_disk->queue = brd->brd_queue;
add_disk(brd->brd_disk);
+ }
blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
THIS_MODULE, brd_probe, NULL, NULL);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 55fd104f1ed4..fa8204214ac0 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1856,7 +1856,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
/* THINK if (signal_pending) return ... ? */
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
+ iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size);
if (sock == connection->data.socket) {
rcu_read_lock();
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index fc67fd853375..61c392752fe4 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -516,7 +516,7 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
struct msghdr msg = {
.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
};
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
+ iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
return sock_recvmsg(sock, &msg, msg.msg_flags);
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index abad6d15f956..cb0cc8685076 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -77,7 +77,6 @@
#include <linux/falloc.h>
#include <linux/uio.h>
#include <linux/ioprio.h>
-#include <linux/blk-cgroup.h>
#include "loop.h"
@@ -269,7 +268,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
struct iov_iter i;
ssize_t bw;
- iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
+ iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
@@ -347,7 +346,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
ssize_t len;
rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
+ iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0)
return len;
@@ -388,7 +387,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
- iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
+ iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0) {
ret = len;
@@ -555,8 +554,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
}
atomic_set(&cmd->ref, 2);
- iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
- segments, blk_rq_bytes(rq));
+ iov_iter_bvec(&iter, rw, bvec, segments, blk_rq_bytes(rq));
iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
@@ -1761,8 +1759,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* always use the first bio's css */
#ifdef CONFIG_BLK_CGROUP
- if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
- cmd->css = &bio_blkcg(rq->bio)->css;
+ if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
+ cmd->css = rq->bio->bi_css;
css_get(cmd->css);
} else
#endif
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index dfc8de6ce525..a7daa8acbab3 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1942,8 +1942,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
dev_warn(&dd->pdev->dev,
"data movement but "
"sect_count is 0\n");
- err = -EINVAL;
- goto abort;
+ err = -EINVAL;
+ goto abort;
}
}
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 14a51254c3db..4d4d6129ff66 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -473,7 +473,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
u32 nbd_cmd_flags = 0;
int sent = nsock->sent, skip = 0;
- iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+ iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
switch (req_op(req)) {
case REQ_OP_DISCARD:
@@ -564,8 +564,7 @@ send_pages:
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
req, bvec.bv_len);
- iov_iter_bvec(&from, ITER_BVEC | WRITE,
- &bvec, 1, bvec.bv_len);
+ iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
if (skip) {
if (skip >= iov_iter_count(&from)) {
skip -= iov_iter_count(&from);
@@ -624,7 +623,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
int ret = 0;
reply.magic = 0;
- iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
+ iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
if (result <= 0) {
if (!nbd_disconnected(config))
@@ -678,8 +677,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
- iov_iter_bvec(&to, ITER_BVEC | READ,
- &bvec, 1, bvec.bv_len);
+ iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
@@ -1073,7 +1071,7 @@ static void send_disconnects(struct nbd_device *nbd)
for (i = 0; i < config->num_connections; i++) {
struct nbd_sock *nsock = config->socks[i];
- iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+ iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret <= 0)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 73ed5f3a862d..8e5140bbf241 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1500,9 +1500,6 @@ rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
goto err_req;
- if (ceph_osdc_alloc_messages(req, GFP_NOIO))
- goto err_req;
-
return req;
err_req:
@@ -1945,6 +1942,10 @@ static int __rbd_img_fill_request(struct rbd_img_request *img_req)
}
if (ret)
return ret;
+
+ ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
+ if (ret)
+ return ret;
}
return 0;
@@ -2374,8 +2375,7 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
if (!obj_req->osd_req)
return -ENOMEM;
- ret = osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd",
- "copyup");
+ ret = osd_req_op_cls_init(obj_req->osd_req, 0, "rbd", "copyup");
if (ret)
return ret;
@@ -2405,6 +2405,10 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
rbd_assert(0);
}
+ ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
+ if (ret)
+ return ret;
+
rbd_obj_request_submit(obj_req);
return 0;
}
@@ -3784,10 +3788,6 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
ceph_oloc_copy(&req->r_base_oloc, oloc);
req->r_flags = CEPH_OSD_FLAG_READ;
- ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
- if (ret)
- goto out_req;
-
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
@@ -3798,6 +3798,10 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
true);
+ ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
+ if (ret)
+ goto out_req;
+
ceph_osdc_start_request(osdc, req, false);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0)
@@ -6067,7 +6071,7 @@ static ssize_t rbd_remove_single_major(struct bus_type *bus,
* create control files in sysfs
* /sys/bus/rbd/...
*/
-static int rbd_sysfs_init(void)
+static int __init rbd_sysfs_init(void)
{
int ret;
@@ -6082,13 +6086,13 @@ static int rbd_sysfs_init(void)
return ret;
}
-static void rbd_sysfs_cleanup(void)
+static void __exit rbd_sysfs_cleanup(void)
{
bus_unregister(&rbd_bus_type);
device_unregister(&rbd_root_dev);
}
-static int rbd_slab_init(void)
+static int __init rbd_slab_init(void)
{
rbd_assert(!rbd_img_request_cache);
rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 1deafb4db60c..81cdb4eaca07 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -287,6 +287,7 @@ source "drivers/clk/actions/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
source "drivers/clk/imgtec/Kconfig"
+source "drivers/clk/ingenic/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
source "drivers/clk/meson/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ad11421bdacd..72be7a38cff1 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -72,7 +72,8 @@ obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-y += imgtec/
obj-$(CONFIG_ARCH_MXC) += imx/
-obj-$(CONFIG_MACH_INGENIC) += ingenic/
+obj-y += ingenic/
+obj-$(CONFIG_ARCH_K3) += keystone/
obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
obj-y += mediatek/
diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig
index dc38c85a4833..04f0a6355726 100644
--- a/drivers/clk/actions/Kconfig
+++ b/drivers/clk/actions/Kconfig
@@ -2,6 +2,7 @@ config CLK_ACTIONS
bool "Clock driver for Actions Semi SoCs"
depends on ARCH_ACTIONS || COMPILE_TEST
select REGMAP_MMIO
+ select RESET_CONTROLLER
default ARCH_ACTIONS
if CLK_ACTIONS
diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile
index 78c17d56f991..ccfdf9781cef 100644
--- a/drivers/clk/actions/Makefile
+++ b/drivers/clk/actions/Makefile
@@ -7,6 +7,7 @@ clk-owl-y += owl-divider.o
clk-owl-y += owl-factor.o
clk-owl-y += owl-composite.o
clk-owl-y += owl-pll.o
+clk-owl-y += owl-reset.o
# SoC support
obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o
diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
index 61c1071b5180..32dd29e0a37e 100644
--- a/drivers/clk/actions/owl-common.c
+++ b/drivers/clk/actions/owl-common.c
@@ -39,7 +39,7 @@ static void owl_clk_set_regmap(const struct owl_clk_desc *desc,
}
int owl_clk_regmap_init(struct platform_device *pdev,
- const struct owl_clk_desc *desc)
+ struct owl_clk_desc *desc)
{
void __iomem *base;
struct regmap *regmap;
@@ -57,6 +57,7 @@ int owl_clk_regmap_init(struct platform_device *pdev,
}
owl_clk_set_regmap(desc, regmap);
+ desc->regmap = regmap;
return 0;
}
diff --git a/drivers/clk/actions/owl-common.h b/drivers/clk/actions/owl-common.h
index 4fd726ec54a6..5a866a8b913d 100644
--- a/drivers/clk/actions/owl-common.h
+++ b/drivers/clk/actions/owl-common.h
@@ -26,6 +26,9 @@ struct owl_clk_desc {
struct owl_clk_common **clks;
unsigned long num_clks;
struct clk_hw_onecell_data *hw_clks;
+ const struct owl_reset_map *resets;
+ unsigned long num_resets;
+ struct regmap *regmap;
};
static inline struct owl_clk_common *
@@ -35,7 +38,7 @@ static inline struct owl_clk_common *
}
int owl_clk_regmap_init(struct platform_device *pdev,
- const struct owl_clk_desc *desc);
+ struct owl_clk_desc *desc);
int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks);
#endif /* _OWL_COMMON_H_ */
diff --git a/drivers/clk/actions/owl-reset.c b/drivers/clk/actions/owl-reset.c
new file mode 100644
index 000000000000..203f8f34a8d4
--- /dev/null
+++ b/drivers/clk/actions/owl-reset.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// Actions Semi Owl SoCs Reset Management Unit driver
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include "owl-reset.h"
+
+static int owl_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct owl_reset *reset = to_owl_reset(rcdev);
+ const struct owl_reset_map *map = &reset->reset_map[id];
+
+ return regmap_update_bits(reset->regmap, map->reg, map->bit, 0);
+}
+
+static int owl_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct owl_reset *reset = to_owl_reset(rcdev);
+ const struct owl_reset_map *map = &reset->reset_map[id];
+
+ return regmap_update_bits(reset->regmap, map->reg, map->bit, map->bit);
+}
+
+static int owl_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ owl_reset_assert(rcdev, id);
+ udelay(1);
+ owl_reset_deassert(rcdev, id);
+
+ return 0;
+}
+
+static int owl_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct owl_reset *reset = to_owl_reset(rcdev);
+ const struct owl_reset_map *map = &reset->reset_map[id];
+ u32 reg;
+ int ret;
+
+ ret = regmap_read(reset->regmap, map->reg, &reg);
+ if (ret)
+ return ret;
+
+ /*
+ * The reset control API expects 0 if reset is not asserted,
+ * which is the opposite of what our hardware uses.
+ */
+ return !(map->bit & reg);
+}
+
+const struct reset_control_ops owl_reset_ops = {
+ .assert = owl_reset_assert,
+ .deassert = owl_reset_deassert,
+ .reset = owl_reset_reset,
+ .status = owl_reset_status,
+};
diff --git a/drivers/clk/actions/owl-reset.h b/drivers/clk/actions/owl-reset.h
new file mode 100644
index 000000000000..10f5774979a6
--- /dev/null
+++ b/drivers/clk/actions/owl-reset.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// Actions Semi Owl SoCs Reset Management Unit driver
+//
+// Copyright (c) 2018 Linaro Ltd.
+// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+#ifndef _OWL_RESET_H_
+#define _OWL_RESET_H_
+
+#include <linux/reset-controller.h>
+
+struct owl_reset_map {
+ u32 reg;
+ u32 bit;
+};
+
+struct owl_reset {
+ struct reset_controller_dev rcdev;
+ const struct owl_reset_map *reset_map;
+ struct regmap *regmap;
+};
+
+static inline struct owl_reset *to_owl_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct owl_reset, rcdev);
+}
+
+extern const struct reset_control_ops owl_reset_ops;
+
+#endif /* _OWL_RESET_H_ */
diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c
index 5e9531392ee5..a2f34d13fb54 100644
--- a/drivers/clk/actions/owl-s700.c
+++ b/drivers/clk/actions/owl-s700.c
@@ -20,8 +20,10 @@
#include "owl-gate.h"
#include "owl-mux.h"
#include "owl-pll.h"
+#include "owl-reset.h"
#include <dt-bindings/clock/actions,s700-cmu.h>
+#include <dt-bindings/reset/actions,s700-reset.h>
#define CMU_COREPLL (0x0000)
#define CMU_DEVPLL (0x0004)
@@ -569,20 +571,69 @@ static struct clk_hw_onecell_data s700_hw_clks = {
.num = CLK_NR_CLKS,
};
-static const struct owl_clk_desc s700_clk_desc = {
+static const struct owl_reset_map s700_resets[] = {
+ [RESET_DE] = { CMU_DEVRST0, BIT(0) },
+ [RESET_LCD0] = { CMU_DEVRST0, BIT(1) },
+ [RESET_DSI] = { CMU_DEVRST0, BIT(2) },
+ [RESET_CSI] = { CMU_DEVRST0, BIT(13) },
+ [RESET_SI] = { CMU_DEVRST0, BIT(14) },
+ [RESET_I2C0] = { CMU_DEVRST1, BIT(0) },
+ [RESET_I2C1] = { CMU_DEVRST1, BIT(1) },
+ [RESET_I2C2] = { CMU_DEVRST1, BIT(2) },
+ [RESET_I2C3] = { CMU_DEVRST1, BIT(3) },
+ [RESET_SPI0] = { CMU_DEVRST1, BIT(4) },
+ [RESET_SPI1] = { CMU_DEVRST1, BIT(5) },
+ [RESET_SPI2] = { CMU_DEVRST1, BIT(6) },
+ [RESET_SPI3] = { CMU_DEVRST1, BIT(7) },
+ [RESET_UART0] = { CMU_DEVRST1, BIT(8) },
+ [RESET_UART1] = { CMU_DEVRST1, BIT(9) },
+ [RESET_UART2] = { CMU_DEVRST1, BIT(10) },
+ [RESET_UART3] = { CMU_DEVRST1, BIT(11) },
+ [RESET_UART4] = { CMU_DEVRST1, BIT(12) },
+ [RESET_UART5] = { CMU_DEVRST1, BIT(13) },
+ [RESET_UART6] = { CMU_DEVRST1, BIT(14) },
+ [RESET_KEY] = { CMU_DEVRST1, BIT(24) },
+ [RESET_GPIO] = { CMU_DEVRST1, BIT(25) },
+ [RESET_AUDIO] = { CMU_DEVRST1, BIT(29) },
+};
+
+static struct owl_clk_desc s700_clk_desc = {
.clks = s700_clks,
.num_clks = ARRAY_SIZE(s700_clks),
.hw_clks = &s700_hw_clks,
+
+ .resets = s700_resets,
+ .num_resets = ARRAY_SIZE(s700_resets),
};
static int s700_clk_probe(struct platform_device *pdev)
{
- const struct owl_clk_desc *desc;
+ struct owl_clk_desc *desc;
+ struct owl_reset *reset;
+ int ret;
desc = &s700_clk_desc;
owl_clk_regmap_init(pdev, desc);
+ /*
+ * FIXME: Reset controller registration should be moved to
+ * common code, once all SoCs of Owl family supports it.
+ */
+ reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.ops = &owl_reset_ops;
+ reset->rcdev.nr_resets = desc->num_resets;
+ reset->reset_map = desc->resets;
+ reset->regmap = desc->regmap;
+
+ ret = devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to register reset controller\n");
+
return owl_clk_probe(&pdev->dev, desc->hw_clks);
}
diff --git a/drivers/clk/actions/owl-s900.c b/drivers/clk/actions/owl-s900.c
index 7f60ed6afe63..790890978424 100644
--- a/drivers/clk/actions/owl-s900.c
+++ b/drivers/clk/actions/owl-s900.c
@@ -19,8 +19,10 @@
#include "owl-gate.h"
#include "owl-mux.h"
#include "owl-pll.h"
+#include "owl-reset.h"
#include <dt-bindings/clock/actions,s900-cmu.h>
+#include <dt-bindings/reset/actions,s900-reset.h>
#define CMU_COREPLL (0x0000)
#define CMU_DEVPLL (0x0004)
@@ -684,20 +686,100 @@ static struct clk_hw_onecell_data s900_hw_clks = {
.num = CLK_NR_CLKS,
};
-static const struct owl_clk_desc s900_clk_desc = {
+static const struct owl_reset_map s900_resets[] = {
+ [RESET_DMAC] = { CMU_DEVRST0, BIT(0) },
+ [RESET_SRAMI] = { CMU_DEVRST0, BIT(1) },
+ [RESET_DDR_CTL_PHY] = { CMU_DEVRST0, BIT(2) },
+ [RESET_NANDC0] = { CMU_DEVRST0, BIT(3) },
+ [RESET_SD0] = { CMU_DEVRST0, BIT(4) },
+ [RESET_SD1] = { CMU_DEVRST0, BIT(5) },
+ [RESET_PCM1] = { CMU_DEVRST0, BIT(6) },
+ [RESET_DE] = { CMU_DEVRST0, BIT(7) },
+ [RESET_LVDS] = { CMU_DEVRST0, BIT(8) },
+ [RESET_SD2] = { CMU_DEVRST0, BIT(9) },
+ [RESET_DSI] = { CMU_DEVRST0, BIT(10) },
+ [RESET_CSI0] = { CMU_DEVRST0, BIT(11) },
+ [RESET_BISP_AXI] = { CMU_DEVRST0, BIT(12) },
+ [RESET_CSI1] = { CMU_DEVRST0, BIT(13) },
+ [RESET_GPIO] = { CMU_DEVRST0, BIT(15) },
+ [RESET_EDP] = { CMU_DEVRST0, BIT(16) },
+ [RESET_AUDIO] = { CMU_DEVRST0, BIT(17) },
+ [RESET_PCM0] = { CMU_DEVRST0, BIT(18) },
+ [RESET_HDE] = { CMU_DEVRST0, BIT(21) },
+ [RESET_GPU3D_PA] = { CMU_DEVRST0, BIT(22) },
+ [RESET_IMX] = { CMU_DEVRST0, BIT(23) },
+ [RESET_SE] = { CMU_DEVRST0, BIT(24) },
+ [RESET_NANDC1] = { CMU_DEVRST0, BIT(25) },
+ [RESET_SD3] = { CMU_DEVRST0, BIT(26) },
+ [RESET_GIC] = { CMU_DEVRST0, BIT(27) },
+ [RESET_GPU3D_PB] = { CMU_DEVRST0, BIT(28) },
+ [RESET_DDR_CTL_PHY_AXI] = { CMU_DEVRST0, BIT(29) },
+ [RESET_CMU_DDR] = { CMU_DEVRST0, BIT(30) },
+ [RESET_DMM] = { CMU_DEVRST0, BIT(31) },
+ [RESET_USB2HUB] = { CMU_DEVRST1, BIT(0) },
+ [RESET_USB2HSIC] = { CMU_DEVRST1, BIT(1) },
+ [RESET_HDMI] = { CMU_DEVRST1, BIT(2) },
+ [RESET_HDCP2TX] = { CMU_DEVRST1, BIT(3) },
+ [RESET_UART6] = { CMU_DEVRST1, BIT(4) },
+ [RESET_UART0] = { CMU_DEVRST1, BIT(5) },
+ [RESET_UART1] = { CMU_DEVRST1, BIT(6) },
+ [RESET_UART2] = { CMU_DEVRST1, BIT(7) },
+ [RESET_SPI0] = { CMU_DEVRST1, BIT(8) },
+ [RESET_SPI1] = { CMU_DEVRST1, BIT(9) },
+ [RESET_SPI2] = { CMU_DEVRST1, BIT(10) },
+ [RESET_SPI3] = { CMU_DEVRST1, BIT(11) },
+ [RESET_I2C0] = { CMU_DEVRST1, BIT(12) },
+ [RESET_I2C1] = { CMU_DEVRST1, BIT(13) },
+ [RESET_USB3] = { CMU_DEVRST1, BIT(14) },
+ [RESET_UART3] = { CMU_DEVRST1, BIT(15) },
+ [RESET_UART4] = { CMU_DEVRST1, BIT(16) },
+ [RESET_UART5] = { CMU_DEVRST1, BIT(17) },
+ [RESET_I2C2] = { CMU_DEVRST1, BIT(18) },
+ [RESET_I2C3] = { CMU_DEVRST1, BIT(19) },
+ [RESET_ETHERNET] = { CMU_DEVRST1, BIT(20) },
+ [RESET_CHIPID] = { CMU_DEVRST1, BIT(21) },
+ [RESET_I2C4] = { CMU_DEVRST1, BIT(22) },
+ [RESET_I2C5] = { CMU_DEVRST1, BIT(23) },
+ [RESET_CPU_SCNT] = { CMU_DEVRST1, BIT(30) }
+};
+
+static struct owl_clk_desc s900_clk_desc = {
.clks = s900_clks,
.num_clks = ARRAY_SIZE(s900_clks),
.hw_clks = &s900_hw_clks,
+
+ .resets = s900_resets,
+ .num_resets = ARRAY_SIZE(s900_resets),
};
static int s900_clk_probe(struct platform_device *pdev)
{
- const struct owl_clk_desc *desc;
+ struct owl_clk_desc *desc;
+ struct owl_reset *reset;
+ int ret;
desc = &s900_clk_desc;
owl_clk_regmap_init(pdev, desc);
+ /*
+ * FIXME: Reset controller registration should be moved to
+ * common code, once all SoCs of Owl family supports it.
+ */
+ reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.ops = &owl_reset_ops;
+ reset->rcdev.nr_resets = desc->num_resets;
+ reset->reset_map = desc->resets;
+ reset->regmap = desc->regmap;
+
+ ret = devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to register reset controller\n");
+
return owl_clk_probe(&pdev->dev, desc->hw_clks);
}
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index facc169ebb68..c75df1cad60e 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -3,7 +3,7 @@
# Makefile for at91 specific clk
#
-obj-y += pmc.o sckc.o
+obj-y += pmc.o sckc.o dt-compat.o
obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o
obj-y += clk-system.o clk-peripheral.o clk-programmable.o
@@ -14,3 +14,6 @@ obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o
+obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o
+obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o
+obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
new file mode 100644
index 000000000000..b1af5a395423
--- /dev/null
+++ b/drivers/clk/at91/at91sam9260.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+struct sck {
+ char *n;
+ char *p;
+ u8 id;
+};
+
+struct pck {
+ char *n;
+ u8 id;
+};
+
+struct at91sam926x_data {
+ const struct clk_pll_layout *plla_layout;
+ const struct clk_pll_characteristics *plla_characteristics;
+ const struct clk_pll_layout *pllb_layout;
+ const struct clk_pll_characteristics *pllb_characteristics;
+ const struct clk_master_characteristics *mck_characteristics;
+ const struct sck *sck;
+ const struct pck *pck;
+ u8 num_sck;
+ u8 num_pck;
+ u8 num_progck;
+ bool has_slck;
+};
+
+static const struct clk_master_characteristics sam9260_mck_characteristics = {
+ .output = { .min = 0, .max = 105000000 },
+ .divisors = { 1, 2, 4, 0 },
+};
+
+static u8 sam9260_plla_out[] = { 0, 2 };
+
+static u16 sam9260_plla_icpll[] = { 1, 1 };
+
+static struct clk_range sam9260_plla_outputs[] = {
+ { .min = 80000000, .max = 160000000 },
+ { .min = 150000000, .max = 240000000 },
+};
+
+static const struct clk_pll_characteristics sam9260_plla_characteristics = {
+ .input = { .min = 1000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(sam9260_plla_outputs),
+ .output = sam9260_plla_outputs,
+ .icpll = sam9260_plla_icpll,
+ .out = sam9260_plla_out,
+};
+
+static u8 sam9260_pllb_out[] = { 1 };
+
+static u16 sam9260_pllb_icpll[] = { 1 };
+
+static struct clk_range sam9260_pllb_outputs[] = {
+ { .min = 70000000, .max = 130000000 },
+};
+
+static const struct clk_pll_characteristics sam9260_pllb_characteristics = {
+ .input = { .min = 1000000, .max = 5000000 },
+ .num_output = ARRAY_SIZE(sam9260_pllb_outputs),
+ .output = sam9260_pllb_outputs,
+ .icpll = sam9260_pllb_icpll,
+ .out = sam9260_pllb_out,
+};
+
+static const struct sck at91sam9260_systemck[] = {
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+static const struct pck at91sam9260_periphck[] = {
+ { .n = "pioA_clk", .id = 2 },
+ { .n = "pioB_clk", .id = 3 },
+ { .n = "pioC_clk", .id = 4 },
+ { .n = "adc_clk", .id = 5 },
+ { .n = "usart0_clk", .id = 6 },
+ { .n = "usart1_clk", .id = 7 },
+ { .n = "usart2_clk", .id = 8 },
+ { .n = "mci0_clk", .id = 9 },
+ { .n = "udc_clk", .id = 10 },
+ { .n = "twi0_clk", .id = 11 },
+ { .n = "spi0_clk", .id = 12 },
+ { .n = "spi1_clk", .id = 13 },
+ { .n = "ssc0_clk", .id = 14 },
+ { .n = "tc0_clk", .id = 17 },
+ { .n = "tc1_clk", .id = 18 },
+ { .n = "tc2_clk", .id = 19 },
+ { .n = "ohci_clk", .id = 20 },
+ { .n = "macb0_clk", .id = 21 },
+ { .n = "isi_clk", .id = 22 },
+ { .n = "usart3_clk", .id = 23 },
+ { .n = "uart0_clk", .id = 24 },
+ { .n = "uart1_clk", .id = 25 },
+ { .n = "tc3_clk", .id = 26 },
+ { .n = "tc4_clk", .id = 27 },
+ { .n = "tc5_clk", .id = 28 },
+};
+
+static struct at91sam926x_data at91sam9260_data = {
+ .plla_layout = &at91rm9200_pll_layout,
+ .plla_characteristics = &sam9260_plla_characteristics,
+ .pllb_layout = &at91rm9200_pll_layout,
+ .pllb_characteristics = &sam9260_pllb_characteristics,
+ .mck_characteristics = &sam9260_mck_characteristics,
+ .sck = at91sam9260_systemck,
+ .num_sck = ARRAY_SIZE(at91sam9260_systemck),
+ .pck = at91sam9260_periphck,
+ .num_pck = ARRAY_SIZE(at91sam9260_periphck),
+ .num_progck = 2,
+ .has_slck = true,
+};
+
+static const struct clk_master_characteristics sam9g20_mck_characteristics = {
+ .output = { .min = 0, .max = 133000000 },
+ .divisors = { 1, 2, 4, 6 },
+};
+
+static u8 sam9g20_plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
+
+static u16 sam9g20_plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
+
+static struct clk_range sam9g20_plla_outputs[] = {
+ { .min = 745000000, .max = 800000000 },
+ { .min = 695000000, .max = 750000000 },
+ { .min = 645000000, .max = 700000000 },
+ { .min = 595000000, .max = 650000000 },
+ { .min = 545000000, .max = 600000000 },
+ { .min = 495000000, .max = 550000000 },
+ { .min = 445000000, .max = 500000000 },
+ { .min = 400000000, .max = 450000000 },
+};
+
+static const struct clk_pll_characteristics sam9g20_plla_characteristics = {
+ .input = { .min = 2000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(sam9g20_plla_outputs),
+ .output = sam9g20_plla_outputs,
+ .icpll = sam9g20_plla_icpll,
+ .out = sam9g20_plla_out,
+};
+
+static u8 sam9g20_pllb_out[] = { 0 };
+
+static u16 sam9g20_pllb_icpll[] = { 0 };
+
+static struct clk_range sam9g20_pllb_outputs[] = {
+ { .min = 30000000, .max = 100000000 },
+};
+
+static const struct clk_pll_characteristics sam9g20_pllb_characteristics = {
+ .input = { .min = 2000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(sam9g20_pllb_outputs),
+ .output = sam9g20_pllb_outputs,
+ .icpll = sam9g20_pllb_icpll,
+ .out = sam9g20_pllb_out,
+};
+
+static struct at91sam926x_data at91sam9g20_data = {
+ .plla_layout = &at91sam9g45_pll_layout,
+ .plla_characteristics = &sam9g20_plla_characteristics,
+ .pllb_layout = &at91sam9g20_pllb_layout,
+ .pllb_characteristics = &sam9g20_pllb_characteristics,
+ .mck_characteristics = &sam9g20_mck_characteristics,
+ .sck = at91sam9260_systemck,
+ .num_sck = ARRAY_SIZE(at91sam9260_systemck),
+ .pck = at91sam9260_periphck,
+ .num_pck = ARRAY_SIZE(at91sam9260_periphck),
+ .num_progck = 2,
+ .has_slck = true,
+};
+
+static const struct clk_master_characteristics sam9261_mck_characteristics = {
+ .output = { .min = 0, .max = 94000000 },
+ .divisors = { 1, 2, 4, 0 },
+};
+
+static struct clk_range sam9261_plla_outputs[] = {
+ { .min = 80000000, .max = 200000000 },
+ { .min = 190000000, .max = 240000000 },
+};
+
+static const struct clk_pll_characteristics sam9261_plla_characteristics = {
+ .input = { .min = 1000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(sam9261_plla_outputs),
+ .output = sam9261_plla_outputs,
+ .icpll = sam9260_plla_icpll,
+ .out = sam9260_plla_out,
+};
+
+static u8 sam9261_pllb_out[] = { 1 };
+
+static u16 sam9261_pllb_icpll[] = { 1 };
+
+static struct clk_range sam9261_pllb_outputs[] = {
+ { .min = 70000000, .max = 130000000 },
+};
+
+static const struct clk_pll_characteristics sam9261_pllb_characteristics = {
+ .input = { .min = 1000000, .max = 5000000 },
+ .num_output = ARRAY_SIZE(sam9261_pllb_outputs),
+ .output = sam9261_pllb_outputs,
+ .icpll = sam9261_pllb_icpll,
+ .out = sam9261_pllb_out,
+};
+
+static const struct sck at91sam9261_systemck[] = {
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "pck3", .p = "prog3", .id = 11 },
+ { .n = "hclk0", .p = "masterck", .id = 16 },
+ { .n = "hclk1", .p = "masterck", .id = 17 },
+};
+
+static const struct pck at91sam9261_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioC_clk", .id = 4, },
+ { .n = "usart0_clk", .id = 6, },
+ { .n = "usart1_clk", .id = 7, },
+ { .n = "usart2_clk", .id = 8, },
+ { .n = "mci0_clk", .id = 9, },
+ { .n = "udc_clk", .id = 10, },
+ { .n = "twi0_clk", .id = 11, },
+ { .n = "spi0_clk", .id = 12, },
+ { .n = "spi1_clk", .id = 13, },
+ { .n = "ssc0_clk", .id = 14, },
+ { .n = "ssc1_clk", .id = 15, },
+ { .n = "ssc2_clk", .id = 16, },
+ { .n = "tc0_clk", .id = 17, },
+ { .n = "tc1_clk", .id = 18, },
+ { .n = "tc2_clk", .id = 19, },
+ { .n = "ohci_clk", .id = 20, },
+ { .n = "lcd_clk", .id = 21, },
+};
+
+static struct at91sam926x_data at91sam9261_data = {
+ .plla_layout = &at91rm9200_pll_layout,
+ .plla_characteristics = &sam9261_plla_characteristics,
+ .pllb_layout = &at91rm9200_pll_layout,
+ .pllb_characteristics = &sam9261_pllb_characteristics,
+ .mck_characteristics = &sam9261_mck_characteristics,
+ .sck = at91sam9261_systemck,
+ .num_sck = ARRAY_SIZE(at91sam9261_systemck),
+ .pck = at91sam9261_periphck,
+ .num_pck = ARRAY_SIZE(at91sam9261_periphck),
+ .num_progck = 4,
+};
+
+static const struct clk_master_characteristics sam9263_mck_characteristics = {
+ .output = { .min = 0, .max = 120000000 },
+ .divisors = { 1, 2, 4, 0 },
+};
+
+static struct clk_range sam9263_pll_outputs[] = {
+ { .min = 80000000, .max = 200000000 },
+ { .min = 190000000, .max = 240000000 },
+};
+
+static const struct clk_pll_characteristics sam9263_pll_characteristics = {
+ .input = { .min = 1000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(sam9263_pll_outputs),
+ .output = sam9263_pll_outputs,
+ .icpll = sam9260_plla_icpll,
+ .out = sam9260_plla_out,
+};
+
+static const struct sck at91sam9263_systemck[] = {
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "pck3", .p = "prog3", .id = 11 },
+};
+
+static const struct pck at91sam9263_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioCDE_clk", .id = 4, },
+ { .n = "usart0_clk", .id = 7, },
+ { .n = "usart1_clk", .id = 8, },
+ { .n = "usart2_clk", .id = 9, },
+ { .n = "mci0_clk", .id = 10, },
+ { .n = "mci1_clk", .id = 11, },
+ { .n = "can_clk", .id = 12, },
+ { .n = "twi0_clk", .id = 13, },
+ { .n = "spi0_clk", .id = 14, },
+ { .n = "spi1_clk", .id = 15, },
+ { .n = "ssc0_clk", .id = 16, },
+ { .n = "ssc1_clk", .id = 17, },
+ { .n = "ac97_clk", .id = 18, },
+ { .n = "tcb_clk", .id = 19, },
+ { .n = "pwm_clk", .id = 20, },
+ { .n = "macb0_clk", .id = 21, },
+ { .n = "g2de_clk", .id = 23, },
+ { .n = "udc_clk", .id = 24, },
+ { .n = "isi_clk", .id = 25, },
+ { .n = "lcd_clk", .id = 26, },
+ { .n = "dma_clk", .id = 27, },
+ { .n = "ohci_clk", .id = 29, },
+};
+
+static struct at91sam926x_data at91sam9263_data = {
+ .plla_layout = &at91rm9200_pll_layout,
+ .plla_characteristics = &sam9263_pll_characteristics,
+ .pllb_layout = &at91rm9200_pll_layout,
+ .pllb_characteristics = &sam9263_pll_characteristics,
+ .mck_characteristics = &sam9263_mck_characteristics,
+ .sck = at91sam9263_systemck,
+ .num_sck = ARRAY_SIZE(at91sam9263_systemck),
+ .pck = at91sam9263_periphck,
+ .num_pck = ARRAY_SIZE(at91sam9263_periphck),
+ .num_progck = 4,
+};
+
+static void __init at91sam926x_pmc_setup(struct device_node *np,
+ struct at91sam926x_data *data)
+{
+ const char *slowxtal_name, *mainxtal_name;
+ struct pmc_data *at91sam9260_pmc;
+ u32 usb_div[] = { 1, 2, 4, 0 };
+ const char *parent_names[6];
+ const char *slck_name;
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_xtal");
+ if (i < 0)
+ return;
+
+ slowxtal_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ at91sam9260_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ ndck(data->sck, data->num_sck),
+ ndck(data->pck, data->num_pck), 0);
+ if (!at91sam9260_pmc)
+ return;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9260_pmc->chws[PMC_MAIN] = hw;
+
+ if (data->has_slck) {
+ hw = clk_hw_register_fixed_rate_with_accuracy(NULL,
+ "slow_rc_osc",
+ NULL, 0, 32768,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "slow_rc_osc";
+ parent_names[1] = "slow_xtal";
+ hw = at91_clk_register_sam9260_slow(regmap, "slck",
+ parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9260_pmc->chws[PMC_SLOW] = hw;
+ slck_name = "slck";
+ } else {
+ slck_name = slowxtal_name;
+ }
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ data->plla_layout,
+ data->plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
+ data->pllb_layout,
+ data->pllb_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ parent_names[3] = "pllbck";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91rm9200_master_layout,
+ data->mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9260_pmc->chws[PMC_MCK] = hw;
+
+ hw = at91rm9200_clk_register_usb(regmap, "usbck", "pllbck", usb_div);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ parent_names[3] = "pllbck";
+ for (i = 0; i < data->num_progck; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 4, i,
+ &at91rm9200_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < data->num_sck; i++) {
+ hw = at91_clk_register_system(regmap, data->sck[i].n,
+ data->sck[i].p,
+ data->sck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9260_pmc->shws[data->sck[i].id] = hw;
+ }
+
+ for (i = 0; i < data->num_pck; i++) {
+ hw = at91_clk_register_peripheral(regmap,
+ data->pck[i].n,
+ "masterck",
+ data->pck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9260_pmc->phws[data->pck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9260_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(at91sam9260_pmc);
+}
+
+static void __init at91sam9260_pmc_setup(struct device_node *np)
+{
+ at91sam926x_pmc_setup(np, &at91sam9260_data);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9260_pmc, "atmel,at91sam9260-pmc",
+ at91sam9260_pmc_setup);
+
+static void __init at91sam9261_pmc_setup(struct device_node *np)
+{
+ at91sam926x_pmc_setup(np, &at91sam9261_data);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9261_pmc, "atmel,at91sam9261-pmc",
+ at91sam9261_pmc_setup);
+
+static void __init at91sam9263_pmc_setup(struct device_node *np)
+{
+ at91sam926x_pmc_setup(np, &at91sam9263_data);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9263_pmc, "atmel,at91sam9263-pmc",
+ at91sam9263_pmc_setup);
+
+static void __init at91sam9g20_pmc_setup(struct device_node *np)
+{
+ at91sam926x_pmc_setup(np, &at91sam9g20_data);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9g20_pmc, "atmel,at91sam9g20-pmc",
+ at91sam9g20_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
new file mode 100644
index 000000000000..5aeef68b4bdd
--- /dev/null
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics sam9rl_mck_characteristics = {
+ .output = { .min = 0, .max = 94000000 },
+ .divisors = { 1, 2, 4, 0 },
+};
+
+static u8 sam9rl_plla_out[] = { 0, 2 };
+
+static struct clk_range sam9rl_plla_outputs[] = {
+ { .min = 80000000, .max = 200000000 },
+ { .min = 190000000, .max = 240000000 },
+};
+
+static const struct clk_pll_characteristics sam9rl_plla_characteristics = {
+ .input = { .min = 1000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(sam9rl_plla_outputs),
+ .output = sam9rl_plla_outputs,
+ .out = sam9rl_plla_out,
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} at91sam9rl_systemck[] = {
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+} at91sam9rl_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioC_clk", .id = 4, },
+ { .n = "pioD_clk", .id = 5, },
+ { .n = "usart0_clk", .id = 6, },
+ { .n = "usart1_clk", .id = 7, },
+ { .n = "usart2_clk", .id = 8, },
+ { .n = "usart3_clk", .id = 9, },
+ { .n = "mci0_clk", .id = 10, },
+ { .n = "twi0_clk", .id = 11, },
+ { .n = "twi1_clk", .id = 12, },
+ { .n = "spi0_clk", .id = 13, },
+ { .n = "ssc0_clk", .id = 14, },
+ { .n = "ssc1_clk", .id = 15, },
+ { .n = "tc0_clk", .id = 16, },
+ { .n = "tc1_clk", .id = 17, },
+ { .n = "tc2_clk", .id = 18, },
+ { .n = "pwm_clk", .id = 19, },
+ { .n = "adc_clk", .id = 20, },
+ { .n = "dma0_clk", .id = 21, },
+ { .n = "udphs_clk", .id = 22, },
+ { .n = "lcd_clk", .id = 23, },
+};
+
+static void __init at91sam9rl_pmc_setup(struct device_node *np)
+{
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *at91sam9rl_pmc;
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ at91sam9rl_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(at91sam9rl_systemck),
+ nck(at91sam9rl_periphck), 0);
+ if (!at91sam9rl_pmc)
+ return;
+
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", mainxtal_name);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9rl_pmc->chws[PMC_MAIN] = hw;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &at91rm9200_pll_layout,
+ &sam9rl_plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9rl_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ parent_names[3] = "utmick";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91rm9200_master_layout,
+ &sam9rl_mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9rl_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ parent_names[3] = "utmick";
+ parent_names[4] = "masterck";
+ for (i = 0; i < 2; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91rm9200_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9rl_systemck); i++) {
+ hw = at91_clk_register_system(regmap, at91sam9rl_systemck[i].n,
+ at91sam9rl_systemck[i].p,
+ at91sam9rl_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9rl_pmc->shws[at91sam9rl_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9rl_periphck); i++) {
+ hw = at91_clk_register_peripheral(regmap,
+ at91sam9rl_periphck[i].n,
+ "masterck",
+ at91sam9rl_periphck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9rl_pmc->phws[at91sam9rl_periphck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9rl_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(at91sam9rl_pmc);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9rl_pmc, "atmel,at91sam9rl-pmc", at91sam9rl_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
new file mode 100644
index 000000000000..2fe225a697df
--- /dev/null
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 0, .max = 133333333 },
+ .divisors = { 1, 2, 4, 3 },
+ .have_div3_pres = 1,
+};
+
+static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
+
+static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
+
+static struct clk_range plla_outputs[] = {
+ { .min = 745000000, .max = 800000000 },
+ { .min = 695000000, .max = 750000000 },
+ { .min = 645000000, .max = 700000000 },
+ { .min = 595000000, .max = 650000000 },
+ { .min = 545000000, .max = 600000000 },
+ { .min = 495000000, .max = 555000000 },
+ { .min = 445000000, .max = 500000000 },
+ { .min = 400000000, .max = 450000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 2000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .icpll = plla_icpll,
+ .out = plla_out,
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} at91sam9x5_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "smdck", .p = "smdclk", .id = 4 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+struct pck {
+ char *n;
+ u8 id;
+};
+
+static const struct pck at91sam9x5_periphck[] = {
+ { .n = "pioAB_clk", .id = 2, },
+ { .n = "pioCD_clk", .id = 3, },
+ { .n = "smd_clk", .id = 4, },
+ { .n = "usart0_clk", .id = 5, },
+ { .n = "usart1_clk", .id = 6, },
+ { .n = "usart2_clk", .id = 7, },
+ { .n = "twi0_clk", .id = 9, },
+ { .n = "twi1_clk", .id = 10, },
+ { .n = "twi2_clk", .id = 11, },
+ { .n = "mci0_clk", .id = 12, },
+ { .n = "spi0_clk", .id = 13, },
+ { .n = "spi1_clk", .id = 14, },
+ { .n = "uart0_clk", .id = 15, },
+ { .n = "uart1_clk", .id = 16, },
+ { .n = "tcb0_clk", .id = 17, },
+ { .n = "pwm_clk", .id = 18, },
+ { .n = "adc_clk", .id = 19, },
+ { .n = "dma0_clk", .id = 20, },
+ { .n = "dma1_clk", .id = 21, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "udphs_clk", .id = 23, },
+ { .n = "mci1_clk", .id = 26, },
+ { .n = "ssc0_clk", .id = 28, },
+};
+
+static const struct pck at91sam9g15_periphck[] = {
+ { .n = "lcdc_clk", .id = 25, },
+ { /* sentinel */}
+};
+
+static const struct pck at91sam9g25_periphck[] = {
+ { .n = "usart3_clk", .id = 8, },
+ { .n = "macb0_clk", .id = 24, },
+ { .n = "isi_clk", .id = 25, },
+ { /* sentinel */}
+};
+
+static const struct pck at91sam9g35_periphck[] = {
+ { .n = "macb0_clk", .id = 24, },
+ { .n = "lcdc_clk", .id = 25, },
+ { /* sentinel */}
+};
+
+static const struct pck at91sam9x25_periphck[] = {
+ { .n = "usart3_clk", .id = 8, },
+ { .n = "macb0_clk", .id = 24, },
+ { .n = "macb1_clk", .id = 27, },
+ { .n = "can0_clk", .id = 29, },
+ { .n = "can1_clk", .id = 30, },
+ { /* sentinel */}
+};
+
+static const struct pck at91sam9x35_periphck[] = {
+ { .n = "macb0_clk", .id = 24, },
+ { .n = "lcdc_clk", .id = 25, },
+ { .n = "can0_clk", .id = 29, },
+ { .n = "can1_clk", .id = 30, },
+ { /* sentinel */}
+};
+
+static void __init at91sam9x5_pmc_setup(struct device_node *np,
+ const struct pck *extra_pcks,
+ bool has_lcdck)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *at91sam9x5_pmc;
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(at91sam9x5_systemck),
+ nck(at91sam9x35_periphck), 0);
+ if (!at91sam9x5_pmc)
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9x5_pmc->chws[PMC_MAIN] = hw;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &at91rm9200_pll_layout, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9x5_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9x5_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "plladivck";
+ parent_names[1] = "utmick";
+ hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91sam9x5_clk_register_smd(regmap, "smdclk", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ parent_names[4] = "mck";
+ for (i = 0; i < 2; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91sam9x5_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9x5_systemck); i++) {
+ hw = at91_clk_register_system(regmap, at91sam9x5_systemck[i].n,
+ at91sam9x5_systemck[i].p,
+ at91sam9x5_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9x5_pmc->shws[at91sam9x5_systemck[i].id] = hw;
+ }
+
+ if (has_lcdck) {
+ hw = at91_clk_register_system(regmap, "lcdck", "masterck", 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9x5_pmc->shws[3] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9x5_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ at91sam9x5_periphck[i].n,
+ "masterck",
+ at91sam9x5_periphck[i].id,
+ &range);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9x5_pmc->phws[at91sam9x5_periphck[i].id] = hw;
+ }
+
+ for (i = 0; extra_pcks[i].id; i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ extra_pcks[i].n,
+ "masterck",
+ extra_pcks[i].id,
+ &range);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9x5_pmc->phws[extra_pcks[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9x5_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(at91sam9x5_pmc);
+}
+
+static void __init at91sam9g15_pmc_setup(struct device_node *np)
+{
+ at91sam9x5_pmc_setup(np, at91sam9g15_periphck, true);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9g15_pmc, "atmel,at91sam9g15-pmc",
+ at91sam9g15_pmc_setup);
+
+static void __init at91sam9g25_pmc_setup(struct device_node *np)
+{
+ at91sam9x5_pmc_setup(np, at91sam9g25_periphck, false);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9g25_pmc, "atmel,at91sam9g25-pmc",
+ at91sam9g25_pmc_setup);
+
+static void __init at91sam9g35_pmc_setup(struct device_node *np)
+{
+ at91sam9x5_pmc_setup(np, at91sam9g35_periphck, true);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9g35_pmc, "atmel,at91sam9g35-pmc",
+ at91sam9g35_pmc_setup);
+
+static void __init at91sam9x25_pmc_setup(struct device_node *np)
+{
+ at91sam9x5_pmc_setup(np, at91sam9x25_periphck, false);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9x25_pmc, "atmel,at91sam9x25-pmc",
+ at91sam9x25_pmc_setup);
+
+static void __init at91sam9x35_pmc_setup(struct device_node *np)
+{
+ at91sam9x5_pmc_setup(np, at91sam9x35_periphck, true);
+}
+CLK_OF_DECLARE_DRIVER(at91sam9x35_pmc, "atmel,at91sam9x35-pmc",
+ at91sam9x35_pmc_setup);
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
index da7bafcfbe70..36d77146a3bd 100644
--- a/drivers/clk/at91/clk-audio-pll.c
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -43,6 +43,8 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+#include "pmc.h"
+
#define AUDIO_PLL_DIV_FRAC BIT(22)
#define AUDIO_PLL_ND_MAX (AT91_PMC_AUDIO_PLL_ND_MASK >> \
AT91_PMC_AUDIO_PLL_ND_OFFSET)
@@ -444,93 +446,94 @@ static const struct clk_ops audio_pll_pmc_ops = {
.set_rate = clk_audio_pll_pmc_set_rate,
};
-static int of_sama5d2_clk_audio_pll_setup(struct device_node *np,
- struct clk_init_data *init,
- struct clk_hw *hw,
- struct regmap **clk_audio_regmap)
-{
- struct regmap *regmap;
- const char *parent_names[1];
- int ret;
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- init->name = np->name;
- of_clk_parent_fill(np, parent_names, 1);
- init->parent_names = parent_names;
- init->num_parents = 1;
-
- hw->init = init;
- *clk_audio_regmap = regmap;
-
- ret = clk_hw_register(NULL, hw);
- if (ret)
- return ret;
-
- return of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-
-static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
+struct clk_hw * __init
+at91_clk_register_audio_pll_frac(struct regmap *regmap, const char *name,
+ const char *parent_name)
{
struct clk_audio_frac *frac_ck;
struct clk_init_data init = {};
+ int ret;
frac_ck = kzalloc(sizeof(*frac_ck), GFP_KERNEL);
if (!frac_ck)
- return;
+ return ERR_PTR(-ENOMEM);
+ init.name = name;
init.ops = &audio_pll_frac_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
init.flags = CLK_SET_RATE_GATE;
- if (of_sama5d2_clk_audio_pll_setup(np, &init, &frac_ck->hw,
- &frac_ck->regmap))
+ frac_ck->hw.init = &init;
+ frac_ck->regmap = regmap;
+
+ ret = clk_hw_register(NULL, &frac_ck->hw);
+ if (ret) {
kfree(frac_ck);
+ return ERR_PTR(ret);
+ }
+
+ return &frac_ck->hw;
}
-static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np)
+struct clk_hw * __init
+at91_clk_register_audio_pll_pad(struct regmap *regmap, const char *name,
+ const char *parent_name)
{
struct clk_audio_pad *apad_ck;
- struct clk_init_data init = {};
+ struct clk_init_data init;
+ int ret;
apad_ck = kzalloc(sizeof(*apad_ck), GFP_KERNEL);
if (!apad_ck)
- return;
+ return ERR_PTR(-ENOMEM);
+ init.name = name;
init.ops = &audio_pll_pad_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
CLK_SET_RATE_PARENT;
- if (of_sama5d2_clk_audio_pll_setup(np, &init, &apad_ck->hw,
- &apad_ck->regmap))
+ apad_ck->hw.init = &init;
+ apad_ck->regmap = regmap;
+
+ ret = clk_hw_register(NULL, &apad_ck->hw);
+ if (ret) {
kfree(apad_ck);
+ return ERR_PTR(ret);
+ }
+
+ return &apad_ck->hw;
}
-static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np)
+struct clk_hw * __init
+at91_clk_register_audio_pll_pmc(struct regmap *regmap, const char *name,
+ const char *parent_name)
{
- struct clk_audio_pad *apmc_ck;
- struct clk_init_data init = {};
+ struct clk_audio_pmc *apmc_ck;
+ struct clk_init_data init;
+ int ret;
apmc_ck = kzalloc(sizeof(*apmc_ck), GFP_KERNEL);
if (!apmc_ck)
- return;
+ return ERR_PTR(-ENOMEM);
+ init.name = name;
init.ops = &audio_pll_pmc_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
CLK_SET_RATE_PARENT;
- if (of_sama5d2_clk_audio_pll_setup(np, &init, &apmc_ck->hw,
- &apmc_ck->regmap))
+ apmc_ck->hw.init = &init;
+ apmc_ck->regmap = regmap;
+
+ ret = clk_hw_register(NULL, &apmc_ck->hw);
+ if (ret) {
kfree(apmc_ck);
-}
+ return ERR_PTR(ret);
+ }
-CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_frac_setup,
- "atmel,sama5d2-clk-audio-pll-frac",
- of_sama5d2_clk_audio_pll_frac_setup);
-CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pad_setup,
- "atmel,sama5d2-clk-audio-pll-pad",
- of_sama5d2_clk_audio_pll_pad_setup);
-CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pmc_setup,
- "atmel,sama5d2-clk-audio-pll-pmc",
- of_sama5d2_clk_audio_pll_pmc_setup);
+ return &apmc_ck->hw;
+}
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 33481368740e..66e7f7baf958 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -20,17 +20,8 @@
#include "pmc.h"
-#define PERIPHERAL_MAX 64
-#define PERIPHERAL_ID_MIN 2
-
-#define GENERATED_SOURCE_MAX 6
#define GENERATED_MAX_DIV 255
-#define GCK_ID_SSC0 43
-#define GCK_ID_SSC1 44
-#define GCK_ID_I2S0 54
-#define GCK_ID_I2S1 55
-#define GCK_ID_CLASSD 59
#define GCK_INDEX_DT_AUDIO_PLL 5
struct clk_generated {
@@ -279,10 +270,10 @@ static void clk_generated_startup(struct clk_generated *gck)
>> AT91_PMC_PCR_GCKDIV_OFFSET;
}
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
const char *name, const char **parent_names,
- u8 num_parents, u8 id,
+ u8 num_parents, u8 id, bool pll_audio,
const struct clk_range *range)
{
struct clk_generated *gck;
@@ -306,6 +297,7 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
gck->regmap = regmap;
gck->lock = lock;
gck->range = *range;
+ gck->audio_pll_allowed = pll_audio;
clk_generated_startup(gck);
hw = &gck->hw;
@@ -319,70 +311,3 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
return hw;
}
-
-static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
-{
- int num;
- u32 id;
- const char *name;
- struct clk_hw *hw;
- unsigned int num_parents;
- const char *parent_names[GENERATED_SOURCE_MAX];
- struct device_node *gcknp;
- struct clk_range range = CLK_RANGE(0, 0);
- struct regmap *regmap;
- struct clk_generated *gck;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
-
- num = of_get_child_count(np);
- if (!num || num > PERIPHERAL_MAX)
- return;
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- for_each_child_of_node(np, gcknp) {
- if (of_property_read_u32(gcknp, "reg", &id))
- continue;
-
- if (id < PERIPHERAL_ID_MIN || id >= PERIPHERAL_MAX)
- continue;
-
- if (of_property_read_string(np, "clock-output-names", &name))
- name = gcknp->name;
-
- of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
- &range);
-
- hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
- parent_names, num_parents,
- id, &range);
-
- gck = to_clk_generated(hw);
-
- if (of_device_is_compatible(np,
- "atmel,sama5d2-clk-generated")) {
- if (gck->id == GCK_ID_SSC0 || gck->id == GCK_ID_SSC1 ||
- gck->id == GCK_ID_I2S0 || gck->id == GCK_ID_I2S1 ||
- gck->id == GCK_ID_CLASSD)
- gck->audio_pll_allowed = true;
- else
- gck->audio_pll_allowed = false;
- } else {
- gck->audio_pll_allowed = false;
- }
-
- if (IS_ERR(hw))
- continue;
-
- of_clk_add_hw_provider(gcknp, of_clk_hw_simple_get, hw);
- }
-}
-CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated",
- of_sama5d2_clk_generated_setup);
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index e0daa4a31f88..f0a2c6baab37 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -86,25 +86,19 @@ static const struct clk_ops h32mx_ops = {
.set_rate = clk_sama5d4_h32mx_set_rate,
};
-static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
+struct clk_hw * __init
+at91_clk_register_h32mx(struct regmap *regmap, const char *name,
+ const char *parent_name)
{
struct clk_sama5d4_h32mx *h32mxclk;
struct clk_init_data init;
- const char *parent_name;
- struct regmap *regmap;
int ret;
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
h32mxclk = kzalloc(sizeof(*h32mxclk), GFP_KERNEL);
if (!h32mxclk)
- return;
-
- parent_name = of_clk_get_parent_name(np, 0);
+ return ERR_PTR(-ENOMEM);
- init.name = np->name;
+ init.name = name;
init.ops = &h32mx_ops;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
@@ -116,10 +110,8 @@ static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
ret = clk_hw_register(NULL, &h32mxclk->hw);
if (ret) {
kfree(h32mxclk);
- return;
+ return ERR_PTR(ret);
}
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, &h32mxclk->hw);
+ return &h32mxclk->hw;
}
-CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx",
- of_sama5d4_clk_h32mx_setup);
diff --git a/drivers/clk/at91/clk-i2s-mux.c b/drivers/clk/at91/clk-i2s-mux.c
index f0c3c3079f04..fe6ce172b8b0 100644
--- a/drivers/clk/at91/clk-i2s-mux.c
+++ b/drivers/clk/at91/clk-i2s-mux.c
@@ -14,7 +14,7 @@
#include <soc/at91/atmel-sfr.h>
-#define I2S_BUS_NR 2
+#include "pmc.h"
struct clk_i2s_mux {
struct clk_hw hw;
@@ -48,7 +48,7 @@ static const struct clk_ops clk_i2s_mux_ops = {
.determine_rate = __clk_mux_determine_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_i2s_mux_register(struct regmap *regmap, const char *name,
const char * const *parent_names,
unsigned int num_parents, u8 bus_id)
@@ -78,39 +78,3 @@ at91_clk_i2s_mux_register(struct regmap *regmap, const char *name,
return &i2s_ck->hw;
}
-
-static void __init of_sama5d2_clk_i2s_mux_setup(struct device_node *np)
-{
- struct regmap *regmap_sfr;
- u8 bus_id;
- const char *parent_names[2];
- struct device_node *i2s_mux_np;
- struct clk_hw *hw;
- int ret;
-
- regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
- if (IS_ERR(regmap_sfr))
- return;
-
- for_each_child_of_node(np, i2s_mux_np) {
- if (of_property_read_u8(i2s_mux_np, "reg", &bus_id))
- continue;
-
- if (bus_id > I2S_BUS_NR)
- continue;
-
- ret = of_clk_parent_fill(i2s_mux_np, parent_names, 2);
- if (ret != 2)
- continue;
-
- hw = at91_clk_i2s_mux_register(regmap_sfr, i2s_mux_np->name,
- parent_names, 2, bus_id);
- if (IS_ERR(hw))
- continue;
-
- of_clk_add_hw_provider(i2s_mux_np, of_clk_hw_simple_get, hw);
- }
-}
-
-CLK_OF_DECLARE(sama5d2_clk_i2s_mux, "atmel,sama5d2-clk-i2s-mux",
- of_sama5d2_clk_i2s_mux_setup);
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index c813c27f2e58..7ac0facdb28b 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -12,7 +12,6 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/delay.h>
-#include <linux/of.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -128,7 +127,7 @@ static const struct clk_ops main_osc_ops = {
.is_prepared = clk_main_osc_is_prepared,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_main_osc(struct regmap *regmap,
const char *name,
const char *parent_name,
@@ -171,31 +170,6 @@ at91_clk_register_main_osc(struct regmap *regmap,
return hw;
}
-static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *name = np->name;
- const char *parent_name;
- struct regmap *regmap;
- bool bypass;
-
- of_property_read_string(np, "clock-output-names", &name);
- bypass = of_property_read_bool(np, "atmel,osc-bypass");
- parent_name = of_clk_get_parent_name(np, 0);
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- hw = at91_clk_register_main_osc(regmap, name, parent_name, bypass);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc",
- of_at91rm9200_clk_main_osc_setup);
-
static bool clk_main_rc_osc_ready(struct regmap *regmap)
{
unsigned int status;
@@ -275,7 +249,7 @@ static const struct clk_ops main_rc_osc_ops = {
.recalc_accuracy = clk_main_rc_osc_recalc_accuracy,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_main_rc_osc(struct regmap *regmap,
const char *name,
u32 frequency, u32 accuracy)
@@ -313,32 +287,6 @@ at91_clk_register_main_rc_osc(struct regmap *regmap,
return hw;
}
-static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- u32 frequency = 0;
- u32 accuracy = 0;
- const char *name = np->name;
- struct regmap *regmap;
-
- of_property_read_string(np, "clock-output-names", &name);
- of_property_read_u32(np, "clock-frequency", &frequency);
- of_property_read_u32(np, "clock-accuracy", &accuracy);
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- hw = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc",
- of_at91sam9x5_clk_main_rc_osc_setup);
-
-
static int clk_main_probe_frequency(struct regmap *regmap)
{
unsigned long prep_time, timeout;
@@ -403,7 +351,7 @@ static const struct clk_ops rm9200_main_ops = {
.recalc_rate = clk_rm9200_main_recalc_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_rm9200_main(struct regmap *regmap,
const char *name,
const char *parent_name)
@@ -442,29 +390,6 @@ at91_clk_register_rm9200_main(struct regmap *regmap,
return hw;
}
-static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *parent_name;
- const char *name = np->name;
- struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
- of_property_read_string(np, "clock-output-names", &name);
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- hw = at91_clk_register_rm9200_main(regmap, name, parent_name);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main",
- of_at91rm9200_clk_main_setup);
-
static inline bool clk_sam9x5_main_ready(struct regmap *regmap)
{
unsigned int status;
@@ -541,7 +466,7 @@ static const struct clk_ops sam9x5_main_ops = {
.get_parent = clk_sam9x5_main_get_parent,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_sam9x5_main(struct regmap *regmap,
const char *name,
const char **parent_names,
@@ -583,32 +508,3 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
return hw;
}
-
-static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *parent_names[2];
- unsigned int num_parents;
- const char *name = np->name;
- struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents == 0 || num_parents > 2)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- of_property_read_string(np, "clock-output-names", &name);
-
- hw = at91_clk_register_sam9x5_main(regmap, name, parent_names,
- num_parents);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main",
- of_at91sam9x5_clk_main_setup);
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index e9cba9fc26d7..eb53b4a8fab6 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -17,24 +17,11 @@
#include "pmc.h"
-#define MASTER_SOURCE_MAX 4
-
#define MASTER_PRES_MASK 0x7
#define MASTER_PRES_MAX MASTER_PRES_MASK
#define MASTER_DIV_SHIFT 8
#define MASTER_DIV_MASK 0x3
-struct clk_master_characteristics {
- struct clk_range output;
- u32 divisors[4];
- u8 have_div3_pres;
-};
-
-struct clk_master_layout {
- u32 mask;
- u8 pres_shift;
-};
-
#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
struct clk_master {
@@ -120,7 +107,7 @@ static const struct clk_ops master_ops = {
.get_parent = clk_master_get_parent,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_master(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
@@ -161,92 +148,12 @@ at91_clk_register_master(struct regmap *regmap,
}
-static const struct clk_master_layout at91rm9200_master_layout = {
+const struct clk_master_layout at91rm9200_master_layout = {
.mask = 0x31F,
.pres_shift = 2,
};
-static const struct clk_master_layout at91sam9x5_master_layout = {
+const struct clk_master_layout at91sam9x5_master_layout = {
.mask = 0x373,
.pres_shift = 4,
};
-
-
-static struct clk_master_characteristics * __init
-of_at91_clk_master_get_characteristics(struct device_node *np)
-{
- struct clk_master_characteristics *characteristics;
-
- characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
- if (!characteristics)
- return NULL;
-
- if (of_at91_get_clk_range(np, "atmel,clk-output-range", &characteristics->output))
- goto out_free_characteristics;
-
- of_property_read_u32_array(np, "atmel,clk-divisors",
- characteristics->divisors, 4);
-
- characteristics->have_div3_pres =
- of_property_read_bool(np, "atmel,master-clk-have-div3-pres");
-
- return characteristics;
-
-out_free_characteristics:
- kfree(characteristics);
- return NULL;
-}
-
-static void __init
-of_at91_clk_master_setup(struct device_node *np,
- const struct clk_master_layout *layout)
-{
- struct clk_hw *hw;
- unsigned int num_parents;
- const char *parent_names[MASTER_SOURCE_MAX];
- const char *name = np->name;
- struct clk_master_characteristics *characteristics;
- struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents == 0 || num_parents > MASTER_SOURCE_MAX)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
-
- of_property_read_string(np, "clock-output-names", &name);
-
- characteristics = of_at91_clk_master_get_characteristics(np);
- if (!characteristics)
- return;
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- hw = at91_clk_register_master(regmap, name, num_parents,
- parent_names, layout,
- characteristics);
- if (IS_ERR(hw))
- goto out_free_characteristics;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
- return;
-
-out_free_characteristics:
- kfree(characteristics);
-}
-
-static void __init of_at91rm9200_clk_master_setup(struct device_node *np)
-{
- of_at91_clk_master_setup(np, &at91rm9200_master_layout);
-}
-CLK_OF_DECLARE(at91rm9200_clk_master, "atmel,at91rm9200-clk-master",
- of_at91rm9200_clk_master_setup);
-
-static void __init of_at91sam9x5_clk_master_setup(struct device_node *np)
-{
- of_at91_clk_master_setup(np, &at91sam9x5_master_layout);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_master, "atmel,at91sam9x5-clk-master",
- of_at91sam9x5_clk_master_setup);
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 770118369230..65c1defa78e4 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -19,11 +19,6 @@
DEFINE_SPINLOCK(pmc_pcr_lock);
-#define PERIPHERAL_MAX 64
-
-#define PERIPHERAL_AT91RM9200 0
-#define PERIPHERAL_AT91SAM9X5 1
-
#define PERIPHERAL_ID_MIN 2
#define PERIPHERAL_ID_MAX 31
#define PERIPHERAL_MASK(id) (1 << ((id) & PERIPHERAL_ID_MAX))
@@ -104,7 +99,7 @@ static const struct clk_ops peripheral_ops = {
.is_enabled = clk_peripheral_is_enabled,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_peripheral(struct regmap *regmap, const char *name,
const char *parent_name, u32 id)
{
@@ -331,7 +326,7 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.set_rate = clk_sam9x5_peripheral_set_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name,
u32 id, const struct clk_range *range)
@@ -374,75 +369,3 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
return hw;
}
-
-static void __init
-of_at91_clk_periph_setup(struct device_node *np, u8 type)
-{
- int num;
- u32 id;
- struct clk_hw *hw;
- const char *parent_name;
- const char *name;
- struct device_node *periphclknp;
- struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name)
- return;
-
- num = of_get_child_count(np);
- if (!num || num > PERIPHERAL_MAX)
- return;
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- for_each_child_of_node(np, periphclknp) {
- if (of_property_read_u32(periphclknp, "reg", &id))
- continue;
-
- if (id >= PERIPHERAL_MAX)
- continue;
-
- if (of_property_read_string(np, "clock-output-names", &name))
- name = periphclknp->name;
-
- if (type == PERIPHERAL_AT91RM9200) {
- hw = at91_clk_register_peripheral(regmap, name,
- parent_name, id);
- } else {
- struct clk_range range = CLK_RANGE(0, 0);
-
- of_at91_get_clk_range(periphclknp,
- "atmel,clk-output-range",
- &range);
-
- hw = at91_clk_register_sam9x5_peripheral(regmap,
- &pmc_pcr_lock,
- name,
- parent_name,
- id, &range);
- }
-
- if (IS_ERR(hw))
- continue;
-
- of_clk_add_hw_provider(periphclknp, of_clk_hw_simple_get, hw);
- }
-}
-
-static void __init of_at91rm9200_clk_periph_setup(struct device_node *np)
-{
- of_at91_clk_periph_setup(np, PERIPHERAL_AT91RM9200);
-}
-CLK_OF_DECLARE(at91rm9200_clk_periph, "atmel,at91rm9200-clk-peripheral",
- of_at91rm9200_clk_periph_setup);
-
-static void __init of_at91sam9x5_clk_periph_setup(struct device_node *np)
-{
- of_at91_clk_periph_setup(np, PERIPHERAL_AT91SAM9X5);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_periph, "atmel,at91sam9x5-clk-peripheral",
- of_at91sam9x5_clk_periph_setup);
-
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 72b6091eb7b9..b4138fcacf49 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -34,20 +34,6 @@
#define PLL_OUT_SHIFT 14
#define PLL_MAX_ID 1
-struct clk_pll_characteristics {
- struct clk_range input;
- int num_output;
- struct clk_range *output;
- u16 *icpll;
- u8 *out;
-};
-
-struct clk_pll_layout {
- u32 pllr_mask;
- u16 mul_mask;
- u8 mul_shift;
-};
-
#define to_clk_pll(hw) container_of(hw, struct clk_pll, hw)
struct clk_pll {
@@ -133,6 +119,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
{
struct clk_pll *pll = to_clk_pll(hw);
+ if (!pll->div || !pll->mul)
+ return 0;
+
return (parent_rate / pll->div) * (pll->mul + 1);
}
@@ -285,7 +274,7 @@ static const struct clk_ops pll_ops = {
.set_rate = clk_pll_set_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_pll(struct regmap *regmap, const char *name,
const char *parent_name, u8 id,
const struct clk_pll_layout *layout,
@@ -331,189 +320,26 @@ at91_clk_register_pll(struct regmap *regmap, const char *name,
}
-static const struct clk_pll_layout at91rm9200_pll_layout = {
+const struct clk_pll_layout at91rm9200_pll_layout = {
.pllr_mask = 0x7FFFFFF,
.mul_shift = 16,
.mul_mask = 0x7FF,
};
-static const struct clk_pll_layout at91sam9g45_pll_layout = {
+const struct clk_pll_layout at91sam9g45_pll_layout = {
.pllr_mask = 0xFFFFFF,
.mul_shift = 16,
.mul_mask = 0xFF,
};
-static const struct clk_pll_layout at91sam9g20_pllb_layout = {
+const struct clk_pll_layout at91sam9g20_pllb_layout = {
.pllr_mask = 0x3FFFFF,
.mul_shift = 16,
.mul_mask = 0x3F,
};
-static const struct clk_pll_layout sama5d3_pll_layout = {
+const struct clk_pll_layout sama5d3_pll_layout = {
.pllr_mask = 0x1FFFFFF,
.mul_shift = 18,
.mul_mask = 0x7F,
};
-
-
-static struct clk_pll_characteristics * __init
-of_at91_clk_pll_get_characteristics(struct device_node *np)
-{
- int i;
- int offset;
- u32 tmp;
- int num_output;
- u32 num_cells;
- struct clk_range input;
- struct clk_range *output;
- u8 *out = NULL;
- u16 *icpll = NULL;
- struct clk_pll_characteristics *characteristics;
-
- if (of_at91_get_clk_range(np, "atmel,clk-input-range", &input))
- return NULL;
-
- if (of_property_read_u32(np, "#atmel,pll-clk-output-range-cells",
- &num_cells))
- return NULL;
-
- if (num_cells < 2 || num_cells > 4)
- return NULL;
-
- if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp))
- return NULL;
- num_output = tmp / (sizeof(u32) * num_cells);
-
- characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
- if (!characteristics)
- return NULL;
-
- output = kcalloc(num_output, sizeof(*output), GFP_KERNEL);
- if (!output)
- goto out_free_characteristics;
-
- if (num_cells > 2) {
- out = kcalloc(num_output, sizeof(*out), GFP_KERNEL);
- if (!out)
- goto out_free_output;
- }
-
- if (num_cells > 3) {
- icpll = kcalloc(num_output, sizeof(*icpll), GFP_KERNEL);
- if (!icpll)
- goto out_free_output;
- }
-
- for (i = 0; i < num_output; i++) {
- offset = i * num_cells;
- if (of_property_read_u32_index(np,
- "atmel,pll-clk-output-ranges",
- offset, &tmp))
- goto out_free_output;
- output[i].min = tmp;
- if (of_property_read_u32_index(np,
- "atmel,pll-clk-output-ranges",
- offset + 1, &tmp))
- goto out_free_output;
- output[i].max = tmp;
-
- if (num_cells == 2)
- continue;
-
- if (of_property_read_u32_index(np,
- "atmel,pll-clk-output-ranges",
- offset + 2, &tmp))
- goto out_free_output;
- out[i] = tmp;
-
- if (num_cells == 3)
- continue;
-
- if (of_property_read_u32_index(np,
- "atmel,pll-clk-output-ranges",
- offset + 3, &tmp))
- goto out_free_output;
- icpll[i] = tmp;
- }
-
- characteristics->input = input;
- characteristics->num_output = num_output;
- characteristics->output = output;
- characteristics->out = out;
- characteristics->icpll = icpll;
- return characteristics;
-
-out_free_output:
- kfree(icpll);
- kfree(out);
- kfree(output);
-out_free_characteristics:
- kfree(characteristics);
- return NULL;
-}
-
-static void __init
-of_at91_clk_pll_setup(struct device_node *np,
- const struct clk_pll_layout *layout)
-{
- u32 id;
- struct clk_hw *hw;
- struct regmap *regmap;
- const char *parent_name;
- const char *name = np->name;
- struct clk_pll_characteristics *characteristics;
-
- if (of_property_read_u32(np, "reg", &id))
- return;
-
- parent_name = of_clk_get_parent_name(np, 0);
-
- of_property_read_string(np, "clock-output-names", &name);
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- characteristics = of_at91_clk_pll_get_characteristics(np);
- if (!characteristics)
- return;
-
- hw = at91_clk_register_pll(regmap, name, parent_name, id, layout,
- characteristics);
- if (IS_ERR(hw))
- goto out_free_characteristics;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
- return;
-
-out_free_characteristics:
- kfree(characteristics);
-}
-
-static void __init of_at91rm9200_clk_pll_setup(struct device_node *np)
-{
- of_at91_clk_pll_setup(np, &at91rm9200_pll_layout);
-}
-CLK_OF_DECLARE(at91rm9200_clk_pll, "atmel,at91rm9200-clk-pll",
- of_at91rm9200_clk_pll_setup);
-
-static void __init of_at91sam9g45_clk_pll_setup(struct device_node *np)
-{
- of_at91_clk_pll_setup(np, &at91sam9g45_pll_layout);
-}
-CLK_OF_DECLARE(at91sam9g45_clk_pll, "atmel,at91sam9g45-clk-pll",
- of_at91sam9g45_clk_pll_setup);
-
-static void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np)
-{
- of_at91_clk_pll_setup(np, &at91sam9g20_pllb_layout);
-}
-CLK_OF_DECLARE(at91sam9g20_clk_pllb, "atmel,at91sam9g20-clk-pllb",
- of_at91sam9g20_clk_pllb_setup);
-
-static void __init of_sama5d3_clk_pll_setup(struct device_node *np)
-{
- of_at91_clk_pll_setup(np, &sama5d3_pll_layout);
-}
-CLK_OF_DECLARE(sama5d3_clk_pll, "atmel,sama5d3-clk-pll",
- of_sama5d3_clk_pll_setup);
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
index b4afaf22f3fd..e8c4f8b02f28 100644
--- a/drivers/clk/at91/clk-plldiv.c
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -75,7 +75,7 @@ static const struct clk_ops plldiv_ops = {
.set_rate = clk_plldiv_set_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_plldiv(struct regmap *regmap, const char *name,
const char *parent_name)
{
@@ -106,28 +106,3 @@ at91_clk_register_plldiv(struct regmap *regmap, const char *name,
return hw;
}
-
-static void __init
-of_at91sam9x5_clk_plldiv_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *parent_name;
- const char *name = np->name;
- struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
-
- of_property_read_string(np, "clock-output-names", &name);
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- hw = at91_clk_register_plldiv(regmap, name, parent_name);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv",
- of_at91sam9x5_clk_plldiv_setup);
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 0e6aab1252fc..5bc68b9c5498 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -17,7 +17,6 @@
#include "pmc.h"
-#define PROG_SOURCE_MAX 5
#define PROG_ID_MAX 7
#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
@@ -25,12 +24,6 @@
#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
#define PROG_MAX_RM9200_CSS 3
-struct clk_programmable_layout {
- u8 pres_shift;
- u8 css_mask;
- u8 have_slck_mck;
-};
-
struct clk_programmable {
struct clk_hw hw;
struct regmap *regmap;
@@ -170,7 +163,7 @@ static const struct clk_ops programmable_ops = {
.set_rate = clk_programmable_set_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap,
const char *name, const char **parent_names,
u8 num_parents, u8 id,
@@ -211,86 +204,20 @@ at91_clk_register_programmable(struct regmap *regmap,
return hw;
}
-static const struct clk_programmable_layout at91rm9200_programmable_layout = {
+const struct clk_programmable_layout at91rm9200_programmable_layout = {
.pres_shift = 2,
.css_mask = 0x3,
.have_slck_mck = 0,
};
-static const struct clk_programmable_layout at91sam9g45_programmable_layout = {
+const struct clk_programmable_layout at91sam9g45_programmable_layout = {
.pres_shift = 2,
.css_mask = 0x3,
.have_slck_mck = 1,
};
-static const struct clk_programmable_layout at91sam9x5_programmable_layout = {
+const struct clk_programmable_layout at91sam9x5_programmable_layout = {
.pres_shift = 4,
.css_mask = 0x7,
.have_slck_mck = 0,
};
-
-static void __init
-of_at91_clk_prog_setup(struct device_node *np,
- const struct clk_programmable_layout *layout)
-{
- int num;
- u32 id;
- struct clk_hw *hw;
- unsigned int num_parents;
- const char *parent_names[PROG_SOURCE_MAX];
- const char *name;
- struct device_node *progclknp;
- struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents == 0 || num_parents > PROG_SOURCE_MAX)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
-
- num = of_get_child_count(np);
- if (!num || num > (PROG_ID_MAX + 1))
- return;
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- for_each_child_of_node(np, progclknp) {
- if (of_property_read_u32(progclknp, "reg", &id))
- continue;
-
- if (of_property_read_string(np, "clock-output-names", &name))
- name = progclknp->name;
-
- hw = at91_clk_register_programmable(regmap, name,
- parent_names, num_parents,
- id, layout);
- if (IS_ERR(hw))
- continue;
-
- of_clk_add_hw_provider(progclknp, of_clk_hw_simple_get, hw);
- }
-}
-
-
-static void __init of_at91rm9200_clk_prog_setup(struct device_node *np)
-{
- of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout);
-}
-CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable",
- of_at91rm9200_clk_prog_setup);
-
-static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np)
-{
- of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout);
-}
-CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable",
- of_at91sam9g45_clk_prog_setup);
-
-static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np)
-{
- of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable",
- of_at91sam9x5_clk_prog_setup);
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 560a8b9abf93..cbb146912f7a 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -40,7 +40,7 @@ static const struct clk_ops sam9260_slow_ops = {
.get_parent = clk_sam9260_slow_get_parent,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_sam9260_slow(struct regmap *regmap,
const char *name,
const char **parent_names,
@@ -79,33 +79,3 @@ at91_clk_register_sam9260_slow(struct regmap *regmap,
return hw;
}
-
-static void __init of_at91sam9260_clk_slow_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *parent_names[2];
- unsigned int num_parents;
- const char *name = np->name;
- struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents != 2)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- of_property_read_string(np, "clock-output-names", &name);
-
- hw = at91_clk_register_sam9260_slow(regmap, name, parent_names,
- num_parents);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-
-CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow",
- of_at91sam9260_clk_slow_setup);
diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c
index 965c662b90a5..75679fd8a9c7 100644
--- a/drivers/clk/at91/clk-smd.c
+++ b/drivers/clk/at91/clk-smd.c
@@ -17,8 +17,6 @@
#include "pmc.h"
-#define SMD_SOURCE_MAX 2
-
#define SMD_DIV_SHIFT 8
#define SMD_MAX_DIV 0xf
@@ -111,7 +109,7 @@ static const struct clk_ops at91sam9x5_smd_ops = {
.set_rate = at91sam9x5_clk_smd_set_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents)
{
@@ -142,33 +140,3 @@ at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
return hw;
}
-
-static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- unsigned int num_parents;
- const char *parent_names[SMD_SOURCE_MAX];
- const char *name = np->name;
- struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents == 0 || num_parents > SMD_SOURCE_MAX)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
-
- of_property_read_string(np, "clock-output-names", &name);
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- hw = at91sam9x5_clk_register_smd(regmap, name, parent_names,
- num_parents);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd",
- of_at91sam9x5_clk_smd_setup);
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 86a36809765d..47bfca933403 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -88,7 +88,7 @@ static const struct clk_ops system_ops = {
.is_prepared = clk_system_is_prepared,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_system(struct regmap *regmap, const char *name,
const char *parent_name, u8 id)
{
@@ -123,40 +123,3 @@ at91_clk_register_system(struct regmap *regmap, const char *name,
return hw;
}
-
-static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
-{
- int num;
- u32 id;
- struct clk_hw *hw;
- const char *name;
- struct device_node *sysclknp;
- const char *parent_name;
- struct regmap *regmap;
-
- num = of_get_child_count(np);
- if (num > (SYSTEM_MAX_ID + 1))
- return;
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- for_each_child_of_node(np, sysclknp) {
- if (of_property_read_u32(sysclknp, "reg", &id))
- continue;
-
- if (of_property_read_string(np, "clock-output-names", &name))
- name = sysclknp->name;
-
- parent_name = of_clk_get_parent_name(sysclknp, 0);
-
- hw = at91_clk_register_system(regmap, name, parent_name, id);
- if (IS_ERR(hw))
- continue;
-
- of_clk_add_hw_provider(sysclknp, of_clk_hw_simple_get, hw);
- }
-}
-CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system",
- of_at91rm9200_clk_sys_setup);
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 791770a563fc..79ee1c760f2a 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -17,8 +17,6 @@
#include "pmc.h"
-#define USB_SOURCE_MAX 2
-
#define SAM9X5_USB_DIV_SHIFT 8
#define SAM9X5_USB_MAX_DIV 0xf
@@ -192,7 +190,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
.set_rate = at91sam9x5_clk_usb_set_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents)
{
@@ -225,7 +223,7 @@ at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
return hw;
}
-static struct clk_hw * __init
+struct clk_hw * __init
at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name)
{
@@ -342,7 +340,7 @@ static const struct clk_ops at91rm9200_usb_ops = {
.set_rate = at91rm9200_clk_usb_set_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name, const u32 *divisors)
{
@@ -374,89 +372,3 @@ at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
return hw;
}
-
-static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- unsigned int num_parents;
- const char *parent_names[USB_SOURCE_MAX];
- const char *name = np->name;
- struct regmap *regmap;
-
- num_parents = of_clk_get_parent_count(np);
- if (num_parents == 0 || num_parents > USB_SOURCE_MAX)
- return;
-
- of_clk_parent_fill(np, parent_names, num_parents);
-
- of_property_read_string(np, "clock-output-names", &name);
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- hw = at91sam9x5_clk_register_usb(regmap, name, parent_names,
- num_parents);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb",
- of_at91sam9x5_clk_usb_setup);
-
-static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *parent_name;
- const char *name = np->name;
- struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name)
- return;
-
- of_property_read_string(np, "clock-output-names", &name);
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
-
- hw = at91sam9n12_clk_register_usb(regmap, name, parent_name);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb",
- of_at91sam9n12_clk_usb_setup);
-
-static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *parent_name;
- const char *name = np->name;
- u32 divisors[4] = {0, 0, 0, 0};
- struct regmap *regmap;
-
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name)
- return;
-
- of_property_read_u32_array(np, "atmel,clk-divisors", divisors, 4);
- if (!divisors[0])
- return;
-
- of_property_read_string(np, "clock-output-names", &name);
-
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
- return;
- hw = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb",
- of_at91rm9200_clk_usb_setup);
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index cd8d689138ff..9a970abf3489 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -125,7 +125,7 @@ static const struct clk_ops utmi_ops = {
.recalc_rate = clk_utmi_recalc_rate,
};
-static struct clk_hw * __init
+struct clk_hw * __init
at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
const char *name, const char *parent_name)
{
@@ -157,46 +157,3 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
return hw;
}
-
-static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
-{
- struct clk_hw *hw;
- const char *parent_name;
- const char *name = np->name;
- struct regmap *regmap_pmc, *regmap_sfr;
-
- parent_name = of_clk_get_parent_name(np, 0);
-
- of_property_read_string(np, "clock-output-names", &name);
-
- regmap_pmc = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap_pmc))
- return;
-
- /*
- * If the device supports different mainck rates, this value has to be
- * set in the UTMI Clock Trimming register.
- * - 9x5: mainck supports several rates but it is indicated that a
- * 12 MHz is needed in case of USB.
- * - sama5d3 and sama5d2: mainck supports several rates. Configuring
- * the FREQ field of the UTMI Clock Trimming register is mandatory.
- * - sama5d4: mainck is at 12 MHz.
- *
- * We only need to retrieve sama5d3 or sama5d2 sfr regmap.
- */
- regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d3-sfr");
- if (IS_ERR(regmap_sfr)) {
- regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
- if (IS_ERR(regmap_sfr))
- regmap_sfr = NULL;
- }
-
- hw = at91_clk_register_utmi(regmap_pmc, regmap_sfr, name, parent_name);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
- return;
-}
-CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi",
- of_at91sam9x5_clk_utmi_setup);
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
new file mode 100644
index 000000000000..b95bb4e2a927
--- /dev/null
+++ b/drivers/clk/at91/dt-compat.c
@@ -0,0 +1,961 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "pmc.h"
+
+#define MASTER_SOURCE_MAX 4
+
+#define PERIPHERAL_AT91RM9200 0
+#define PERIPHERAL_AT91SAM9X5 1
+
+#define PERIPHERAL_MAX 64
+
+#define PERIPHERAL_ID_MIN 2
+
+#define PROG_SOURCE_MAX 5
+#define PROG_ID_MAX 7
+
+#define SYSTEM_MAX_ID 31
+
+#ifdef CONFIG_HAVE_AT91_AUDIO_PLL
+static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *name = np->name;
+ const char *parent_name;
+ struct regmap *regmap;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ hw = at91_clk_register_audio_pll_frac(regmap, name, parent_name);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_frac_setup,
+ "atmel,sama5d2-clk-audio-pll-frac",
+ of_sama5d2_clk_audio_pll_frac_setup);
+
+static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *name = np->name;
+ const char *parent_name;
+ struct regmap *regmap;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ hw = at91_clk_register_audio_pll_pad(regmap, name, parent_name);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pad_setup,
+ "atmel,sama5d2-clk-audio-pll-pad",
+ of_sama5d2_clk_audio_pll_pad_setup);
+
+static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *name = np->name;
+ const char *parent_name;
+ struct regmap *regmap;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ hw = at91_clk_register_audio_pll_pmc(regmap, name, parent_name);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pmc_setup,
+ "atmel,sama5d2-clk-audio-pll-pmc",
+ of_sama5d2_clk_audio_pll_pmc_setup);
+#endif /* CONFIG_HAVE_AT91_AUDIO_PLL */
+
+#ifdef CONFIG_HAVE_AT91_GENERATED_CLK
+#define GENERATED_SOURCE_MAX 6
+
+#define GCK_ID_I2S0 54
+#define GCK_ID_I2S1 55
+#define GCK_ID_CLASSD 59
+
+static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
+{
+ int num;
+ u32 id;
+ const char *name;
+ struct clk_hw *hw;
+ unsigned int num_parents;
+ const char *parent_names[GENERATED_SOURCE_MAX];
+ struct device_node *gcknp;
+ struct clk_range range = CLK_RANGE(0, 0);
+ struct regmap *regmap;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX)
+ return;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ num = of_get_child_count(np);
+ if (!num || num > PERIPHERAL_MAX)
+ return;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ for_each_child_of_node(np, gcknp) {
+ bool pll_audio = false;
+
+ if (of_property_read_u32(gcknp, "reg", &id))
+ continue;
+
+ if (id < PERIPHERAL_ID_MIN || id >= PERIPHERAL_MAX)
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = gcknp->name;
+
+ of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
+ &range);
+
+ if (of_device_is_compatible(np, "atmel,sama5d2-clk-generated") &&
+ (id == GCK_ID_I2S0 || id == GCK_ID_I2S1 ||
+ id == GCK_ID_CLASSD))
+ pll_audio = true;
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
+ parent_names, num_parents,
+ id, pll_audio, &range);
+ if (IS_ERR(hw))
+ continue;
+
+ of_clk_add_hw_provider(gcknp, of_clk_hw_simple_get, hw);
+ }
+}
+CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated",
+ of_sama5d2_clk_generated_setup);
+#endif /* CONFIG_HAVE_AT91_GENERATED_CLK */
+
+#ifdef CONFIG_HAVE_AT91_H32MX
+static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *name = np->name;
+ const char *parent_name;
+ struct regmap *regmap;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ hw = at91_clk_register_h32mx(regmap, name, parent_name);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx",
+ of_sama5d4_clk_h32mx_setup);
+#endif /* CONFIG_HAVE_AT91_H32MX */
+
+#ifdef CONFIG_HAVE_AT91_I2S_MUX_CLK
+#define I2S_BUS_NR 2
+
+static void __init of_sama5d2_clk_i2s_mux_setup(struct device_node *np)
+{
+ struct regmap *regmap_sfr;
+ u8 bus_id;
+ const char *parent_names[2];
+ struct device_node *i2s_mux_np;
+ struct clk_hw *hw;
+ int ret;
+
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+ if (IS_ERR(regmap_sfr))
+ return;
+
+ for_each_child_of_node(np, i2s_mux_np) {
+ if (of_property_read_u8(i2s_mux_np, "reg", &bus_id))
+ continue;
+
+ if (bus_id > I2S_BUS_NR)
+ continue;
+
+ ret = of_clk_parent_fill(i2s_mux_np, parent_names, 2);
+ if (ret != 2)
+ continue;
+
+ hw = at91_clk_i2s_mux_register(regmap_sfr, i2s_mux_np->name,
+ parent_names, 2, bus_id);
+ if (IS_ERR(hw))
+ continue;
+
+ of_clk_add_hw_provider(i2s_mux_np, of_clk_hw_simple_get, hw);
+ }
+}
+CLK_OF_DECLARE(sama5d2_clk_i2s_mux, "atmel,sama5d2-clk-i2s-mux",
+ of_sama5d2_clk_i2s_mux_setup);
+#endif /* CONFIG_HAVE_AT91_I2S_MUX_CLK */
+
+static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *name = np->name;
+ const char *parent_name;
+ struct regmap *regmap;
+ bool bypass;
+
+ of_property_read_string(np, "clock-output-names", &name);
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ hw = at91_clk_register_main_osc(regmap, name, parent_name, bypass);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc",
+ of_at91rm9200_clk_main_osc_setup);
+
+static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ u32 frequency = 0;
+ u32 accuracy = 0;
+ const char *name = np->name;
+ struct regmap *regmap;
+
+ of_property_read_string(np, "clock-output-names", &name);
+ of_property_read_u32(np, "clock-frequency", &frequency);
+ of_property_read_u32(np, "clock-accuracy", &accuracy);
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc",
+ of_at91sam9x5_clk_main_rc_osc_setup);
+
+static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *parent_name;
+ const char *name = np->name;
+ struct regmap *regmap;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ of_property_read_string(np, "clock-output-names", &name);
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ hw = at91_clk_register_rm9200_main(regmap, name, parent_name);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main",
+ of_at91rm9200_clk_main_setup);
+
+static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *parent_names[2];
+ unsigned int num_parents;
+ const char *name = np->name;
+ struct regmap *regmap;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents == 0 || num_parents > 2)
+ return;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ hw = at91_clk_register_sam9x5_main(regmap, name, parent_names,
+ num_parents);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main",
+ of_at91sam9x5_clk_main_setup);
+
+static struct clk_master_characteristics * __init
+of_at91_clk_master_get_characteristics(struct device_node *np)
+{
+ struct clk_master_characteristics *characteristics;
+
+ characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
+ if (!characteristics)
+ return NULL;
+
+ if (of_at91_get_clk_range(np, "atmel,clk-output-range", &characteristics->output))
+ goto out_free_characteristics;
+
+ of_property_read_u32_array(np, "atmel,clk-divisors",
+ characteristics->divisors, 4);
+
+ characteristics->have_div3_pres =
+ of_property_read_bool(np, "atmel,master-clk-have-div3-pres");
+
+ return characteristics;
+
+out_free_characteristics:
+ kfree(characteristics);
+ return NULL;
+}
+
+static void __init
+of_at91_clk_master_setup(struct device_node *np,
+ const struct clk_master_layout *layout)
+{
+ struct clk_hw *hw;
+ unsigned int num_parents;
+ const char *parent_names[MASTER_SOURCE_MAX];
+ const char *name = np->name;
+ struct clk_master_characteristics *characteristics;
+ struct regmap *regmap;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents == 0 || num_parents > MASTER_SOURCE_MAX)
+ return;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ characteristics = of_at91_clk_master_get_characteristics(np);
+ if (!characteristics)
+ return;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ hw = at91_clk_register_master(regmap, name, num_parents,
+ parent_names, layout,
+ characteristics);
+ if (IS_ERR(hw))
+ goto out_free_characteristics;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+ return;
+
+out_free_characteristics:
+ kfree(characteristics);
+}
+
+static void __init of_at91rm9200_clk_master_setup(struct device_node *np)
+{
+ of_at91_clk_master_setup(np, &at91rm9200_master_layout);
+}
+CLK_OF_DECLARE(at91rm9200_clk_master, "atmel,at91rm9200-clk-master",
+ of_at91rm9200_clk_master_setup);
+
+static void __init of_at91sam9x5_clk_master_setup(struct device_node *np)
+{
+ of_at91_clk_master_setup(np, &at91sam9x5_master_layout);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_master, "atmel,at91sam9x5-clk-master",
+ of_at91sam9x5_clk_master_setup);
+
+static void __init
+of_at91_clk_periph_setup(struct device_node *np, u8 type)
+{
+ int num;
+ u32 id;
+ struct clk_hw *hw;
+ const char *parent_name;
+ const char *name;
+ struct device_node *periphclknp;
+ struct regmap *regmap;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ num = of_get_child_count(np);
+ if (!num || num > PERIPHERAL_MAX)
+ return;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ for_each_child_of_node(np, periphclknp) {
+ if (of_property_read_u32(periphclknp, "reg", &id))
+ continue;
+
+ if (id >= PERIPHERAL_MAX)
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = periphclknp->name;
+
+ if (type == PERIPHERAL_AT91RM9200) {
+ hw = at91_clk_register_peripheral(regmap, name,
+ parent_name, id);
+ } else {
+ struct clk_range range = CLK_RANGE(0, 0);
+
+ of_at91_get_clk_range(periphclknp,
+ "atmel,clk-output-range",
+ &range);
+
+ hw = at91_clk_register_sam9x5_peripheral(regmap,
+ &pmc_pcr_lock,
+ name,
+ parent_name,
+ id, &range);
+ }
+
+ if (IS_ERR(hw))
+ continue;
+
+ of_clk_add_hw_provider(periphclknp, of_clk_hw_simple_get, hw);
+ }
+}
+
+static void __init of_at91rm9200_clk_periph_setup(struct device_node *np)
+{
+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91RM9200);
+}
+CLK_OF_DECLARE(at91rm9200_clk_periph, "atmel,at91rm9200-clk-peripheral",
+ of_at91rm9200_clk_periph_setup);
+
+static void __init of_at91sam9x5_clk_periph_setup(struct device_node *np)
+{
+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91SAM9X5);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_periph, "atmel,at91sam9x5-clk-peripheral",
+ of_at91sam9x5_clk_periph_setup);
+
+static struct clk_pll_characteristics * __init
+of_at91_clk_pll_get_characteristics(struct device_node *np)
+{
+ int i;
+ int offset;
+ u32 tmp;
+ int num_output;
+ u32 num_cells;
+ struct clk_range input;
+ struct clk_range *output;
+ u8 *out = NULL;
+ u16 *icpll = NULL;
+ struct clk_pll_characteristics *characteristics;
+
+ if (of_at91_get_clk_range(np, "atmel,clk-input-range", &input))
+ return NULL;
+
+ if (of_property_read_u32(np, "#atmel,pll-clk-output-range-cells",
+ &num_cells))
+ return NULL;
+
+ if (num_cells < 2 || num_cells > 4)
+ return NULL;
+
+ if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp))
+ return NULL;
+ num_output = tmp / (sizeof(u32) * num_cells);
+
+ characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL);
+ if (!characteristics)
+ return NULL;
+
+ output = kcalloc(num_output, sizeof(*output), GFP_KERNEL);
+ if (!output)
+ goto out_free_characteristics;
+
+ if (num_cells > 2) {
+ out = kcalloc(num_output, sizeof(*out), GFP_KERNEL);
+ if (!out)
+ goto out_free_output;
+ }
+
+ if (num_cells > 3) {
+ icpll = kcalloc(num_output, sizeof(*icpll), GFP_KERNEL);
+ if (!icpll)
+ goto out_free_output;
+ }
+
+ for (i = 0; i < num_output; i++) {
+ offset = i * num_cells;
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset, &tmp))
+ goto out_free_output;
+ output[i].min = tmp;
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 1, &tmp))
+ goto out_free_output;
+ output[i].max = tmp;
+
+ if (num_cells == 2)
+ continue;
+
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 2, &tmp))
+ goto out_free_output;
+ out[i] = tmp;
+
+ if (num_cells == 3)
+ continue;
+
+ if (of_property_read_u32_index(np,
+ "atmel,pll-clk-output-ranges",
+ offset + 3, &tmp))
+ goto out_free_output;
+ icpll[i] = tmp;
+ }
+
+ characteristics->input = input;
+ characteristics->num_output = num_output;
+ characteristics->output = output;
+ characteristics->out = out;
+ characteristics->icpll = icpll;
+ return characteristics;
+
+out_free_output:
+ kfree(icpll);
+ kfree(out);
+ kfree(output);
+out_free_characteristics:
+ kfree(characteristics);
+ return NULL;
+}
+
+static void __init
+of_at91_clk_pll_setup(struct device_node *np,
+ const struct clk_pll_layout *layout)
+{
+ u32 id;
+ struct clk_hw *hw;
+ struct regmap *regmap;
+ const char *parent_name;
+ const char *name = np->name;
+ struct clk_pll_characteristics *characteristics;
+
+ if (of_property_read_u32(np, "reg", &id))
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ characteristics = of_at91_clk_pll_get_characteristics(np);
+ if (!characteristics)
+ return;
+
+ hw = at91_clk_register_pll(regmap, name, parent_name, id, layout,
+ characteristics);
+ if (IS_ERR(hw))
+ goto out_free_characteristics;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+ return;
+
+out_free_characteristics:
+ kfree(characteristics);
+}
+
+static void __init of_at91rm9200_clk_pll_setup(struct device_node *np)
+{
+ of_at91_clk_pll_setup(np, &at91rm9200_pll_layout);
+}
+CLK_OF_DECLARE(at91rm9200_clk_pll, "atmel,at91rm9200-clk-pll",
+ of_at91rm9200_clk_pll_setup);
+
+static void __init of_at91sam9g45_clk_pll_setup(struct device_node *np)
+{
+ of_at91_clk_pll_setup(np, &at91sam9g45_pll_layout);
+}
+CLK_OF_DECLARE(at91sam9g45_clk_pll, "atmel,at91sam9g45-clk-pll",
+ of_at91sam9g45_clk_pll_setup);
+
+static void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np)
+{
+ of_at91_clk_pll_setup(np, &at91sam9g20_pllb_layout);
+}
+CLK_OF_DECLARE(at91sam9g20_clk_pllb, "atmel,at91sam9g20-clk-pllb",
+ of_at91sam9g20_clk_pllb_setup);
+
+static void __init of_sama5d3_clk_pll_setup(struct device_node *np)
+{
+ of_at91_clk_pll_setup(np, &sama5d3_pll_layout);
+}
+CLK_OF_DECLARE(sama5d3_clk_pll, "atmel,sama5d3-clk-pll",
+ of_sama5d3_clk_pll_setup);
+
+static void __init
+of_at91sam9x5_clk_plldiv_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *parent_name;
+ const char *name = np->name;
+ struct regmap *regmap;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ hw = at91_clk_register_plldiv(regmap, name, parent_name);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv",
+ of_at91sam9x5_clk_plldiv_setup);
+
+static void __init
+of_at91_clk_prog_setup(struct device_node *np,
+ const struct clk_programmable_layout *layout)
+{
+ int num;
+ u32 id;
+ struct clk_hw *hw;
+ unsigned int num_parents;
+ const char *parent_names[PROG_SOURCE_MAX];
+ const char *name;
+ struct device_node *progclknp;
+ struct regmap *regmap;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents == 0 || num_parents > PROG_SOURCE_MAX)
+ return;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ num = of_get_child_count(np);
+ if (!num || num > (PROG_ID_MAX + 1))
+ return;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ for_each_child_of_node(np, progclknp) {
+ if (of_property_read_u32(progclknp, "reg", &id))
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = progclknp->name;
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, num_parents,
+ id, layout);
+ if (IS_ERR(hw))
+ continue;
+
+ of_clk_add_hw_provider(progclknp, of_clk_hw_simple_get, hw);
+ }
+}
+
+static void __init of_at91rm9200_clk_prog_setup(struct device_node *np)
+{
+ of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout);
+}
+CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable",
+ of_at91rm9200_clk_prog_setup);
+
+static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np)
+{
+ of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout);
+}
+CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable",
+ of_at91sam9g45_clk_prog_setup);
+
+static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np)
+{
+ of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable",
+ of_at91sam9x5_clk_prog_setup);
+
+static void __init of_at91sam9260_clk_slow_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *parent_names[2];
+ unsigned int num_parents;
+ const char *name = np->name;
+ struct regmap *regmap;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents != 2)
+ return;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ hw = at91_clk_register_sam9260_slow(regmap, name, parent_names,
+ num_parents);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow",
+ of_at91sam9260_clk_slow_setup);
+
+#ifdef CONFIG_HAVE_AT91_SMD
+#define SMD_SOURCE_MAX 2
+
+static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ unsigned int num_parents;
+ const char *parent_names[SMD_SOURCE_MAX];
+ const char *name = np->name;
+ struct regmap *regmap;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents == 0 || num_parents > SMD_SOURCE_MAX)
+ return;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ hw = at91sam9x5_clk_register_smd(regmap, name, parent_names,
+ num_parents);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd",
+ of_at91sam9x5_clk_smd_setup);
+#endif /* CONFIG_HAVE_AT91_SMD */
+
+static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
+{
+ int num;
+ u32 id;
+ struct clk_hw *hw;
+ const char *name;
+ struct device_node *sysclknp;
+ const char *parent_name;
+ struct regmap *regmap;
+
+ num = of_get_child_count(np);
+ if (num > (SYSTEM_MAX_ID + 1))
+ return;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ for_each_child_of_node(np, sysclknp) {
+ if (of_property_read_u32(sysclknp, "reg", &id))
+ continue;
+
+ if (of_property_read_string(np, "clock-output-names", &name))
+ name = sysclknp->name;
+
+ parent_name = of_clk_get_parent_name(sysclknp, 0);
+
+ hw = at91_clk_register_system(regmap, name, parent_name, id);
+ if (IS_ERR(hw))
+ continue;
+
+ of_clk_add_hw_provider(sysclknp, of_clk_hw_simple_get, hw);
+ }
+}
+CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system",
+ of_at91rm9200_clk_sys_setup);
+
+#ifdef CONFIG_HAVE_AT91_USB_CLK
+#define USB_SOURCE_MAX 2
+
+static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ unsigned int num_parents;
+ const char *parent_names[USB_SOURCE_MAX];
+ const char *name = np->name;
+ struct regmap *regmap;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents == 0 || num_parents > USB_SOURCE_MAX)
+ return;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ hw = at91sam9x5_clk_register_usb(regmap, name, parent_names,
+ num_parents);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb",
+ of_at91sam9x5_clk_usb_setup);
+
+static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *parent_name;
+ const char *name = np->name;
+ struct regmap *regmap;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ hw = at91sam9n12_clk_register_usb(regmap, name, parent_name);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb",
+ of_at91sam9n12_clk_usb_setup);
+
+static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *parent_name;
+ const char *name = np->name;
+ u32 divisors[4] = {0, 0, 0, 0};
+ struct regmap *regmap;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ of_property_read_u32_array(np, "atmel,clk-divisors", divisors, 4);
+ if (!divisors[0])
+ return;
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+ hw = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb",
+ of_at91rm9200_clk_usb_setup);
+#endif /* CONFIG_HAVE_AT91_USB_CLK */
+
+#ifdef CONFIG_HAVE_AT91_UTMI
+static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
+{
+ struct clk_hw *hw;
+ const char *parent_name;
+ const char *name = np->name;
+ struct regmap *regmap_pmc, *regmap_sfr;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ of_property_read_string(np, "clock-output-names", &name);
+
+ regmap_pmc = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap_pmc))
+ return;
+
+ /*
+ * If the device supports different mainck rates, this value has to be
+ * set in the UTMI Clock Trimming register.
+ * - 9x5: mainck supports several rates but it is indicated that a
+ * 12 MHz is needed in case of USB.
+ * - sama5d3 and sama5d2: mainck supports several rates. Configuring
+ * the FREQ field of the UTMI Clock Trimming register is mandatory.
+ * - sama5d4: mainck is at 12 MHz.
+ *
+ * We only need to retrieve sama5d3 or sama5d2 sfr regmap.
+ */
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d3-sfr");
+ if (IS_ERR(regmap_sfr)) {
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+ if (IS_ERR(regmap_sfr))
+ regmap_sfr = NULL;
+ }
+
+ hw = at91_clk_register_utmi(regmap_pmc, regmap_sfr, name, parent_name);
+ if (IS_ERR(hw))
+ return;
+
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi",
+ of_at91sam9x5_clk_utmi_setup);
+#endif /* CONFIG_HAVE_AT91_UTMI */
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 1fa27f4ea538..db24539d5740 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -19,6 +19,8 @@
#include <asm/proc-fns.h>
+#include <dt-bindings/clock/at91.h>
+
#include "pmc.h"
#define PMC_MAX_IDS 128
@@ -47,6 +49,82 @@ int of_at91_get_clk_range(struct device_node *np, const char *propname,
}
EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
+struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data)
+{
+ unsigned int type = clkspec->args[0];
+ unsigned int idx = clkspec->args[1];
+ struct pmc_data *pmc_data = data;
+
+ switch (type) {
+ case PMC_TYPE_CORE:
+ if (idx < pmc_data->ncore)
+ return pmc_data->chws[idx];
+ break;
+ case PMC_TYPE_SYSTEM:
+ if (idx < pmc_data->nsystem)
+ return pmc_data->shws[idx];
+ break;
+ case PMC_TYPE_PERIPHERAL:
+ if (idx < pmc_data->nperiph)
+ return pmc_data->phws[idx];
+ break;
+ case PMC_TYPE_GCK:
+ if (idx < pmc_data->ngck)
+ return pmc_data->ghws[idx];
+ break;
+ default:
+ break;
+ }
+
+ pr_err("%s: invalid type (%u) or index (%u)\n", __func__, type, idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+void pmc_data_free(struct pmc_data *pmc_data)
+{
+ kfree(pmc_data->chws);
+ kfree(pmc_data->shws);
+ kfree(pmc_data->phws);
+ kfree(pmc_data->ghws);
+}
+
+struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
+ unsigned int nperiph, unsigned int ngck)
+{
+ struct pmc_data *pmc_data = kzalloc(sizeof(*pmc_data), GFP_KERNEL);
+
+ if (!pmc_data)
+ return NULL;
+
+ pmc_data->ncore = ncore;
+ pmc_data->chws = kcalloc(ncore, sizeof(struct clk_hw *), GFP_KERNEL);
+ if (!pmc_data->chws)
+ goto err;
+
+ pmc_data->nsystem = nsystem;
+ pmc_data->shws = kcalloc(nsystem, sizeof(struct clk_hw *), GFP_KERNEL);
+ if (!pmc_data->shws)
+ goto err;
+
+ pmc_data->nperiph = nperiph;
+ pmc_data->phws = kcalloc(nperiph, sizeof(struct clk_hw *), GFP_KERNEL);
+ if (!pmc_data->phws)
+ goto err;
+
+ pmc_data->ngck = ngck;
+ pmc_data->ghws = kcalloc(ngck, sizeof(struct clk_hw *), GFP_KERNEL);
+ if (!pmc_data->ghws)
+ goto err;
+
+ return pmc_data;
+
+err:
+ pmc_data_free(pmc_data);
+
+ return NULL;
+}
+
#ifdef CONFIG_PM
static struct regmap *pmcreg;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index d22b1fa9ecdc..672a79bda88c 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -19,6 +19,17 @@
extern spinlock_t pmc_pcr_lock;
+struct pmc_data {
+ unsigned int ncore;
+ struct clk_hw **chws;
+ unsigned int nsystem;
+ struct clk_hw **shws;
+ unsigned int nperiph;
+ struct clk_hw **phws;
+ unsigned int ngck;
+ struct clk_hw **ghws;
+};
+
struct clk_range {
unsigned long min;
unsigned long max;
@@ -26,9 +37,157 @@ struct clk_range {
#define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,}
+struct clk_master_layout {
+ u32 mask;
+ u8 pres_shift;
+};
+
+extern const struct clk_master_layout at91rm9200_master_layout;
+extern const struct clk_master_layout at91sam9x5_master_layout;
+
+struct clk_master_characteristics {
+ struct clk_range output;
+ u32 divisors[4];
+ u8 have_div3_pres;
+};
+
+struct clk_pll_layout {
+ u32 pllr_mask;
+ u16 mul_mask;
+ u8 mul_shift;
+};
+
+extern const struct clk_pll_layout at91rm9200_pll_layout;
+extern const struct clk_pll_layout at91sam9g45_pll_layout;
+extern const struct clk_pll_layout at91sam9g20_pllb_layout;
+extern const struct clk_pll_layout sama5d3_pll_layout;
+
+struct clk_pll_characteristics {
+ struct clk_range input;
+ int num_output;
+ struct clk_range *output;
+ u16 *icpll;
+ u8 *out;
+};
+
+struct clk_programmable_layout {
+ u8 pres_shift;
+ u8 css_mask;
+ u8 have_slck_mck;
+};
+
+extern const struct clk_programmable_layout at91rm9200_programmable_layout;
+extern const struct clk_programmable_layout at91sam9g45_programmable_layout;
+extern const struct clk_programmable_layout at91sam9x5_programmable_layout;
+
+#define ndck(a, s) (a[s - 1].id + 1)
+#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
+struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
+ unsigned int nperiph, unsigned int ngck);
+void pmc_data_free(struct pmc_data *pmc_data);
+
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range);
+struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data);
+
+struct clk_hw * __init
+at91_clk_register_audio_pll_frac(struct regmap *regmap, const char *name,
+ const char *parent_name);
+
+struct clk_hw * __init
+at91_clk_register_audio_pll_pad(struct regmap *regmap, const char *name,
+ const char *parent_name);
+
+struct clk_hw * __init
+at91_clk_register_audio_pll_pmc(struct regmap *regmap, const char *name,
+ const char *parent_name);
+
+struct clk_hw * __init
+at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char **parent_names,
+ u8 num_parents, u8 id, bool pll_audio,
+ const struct clk_range *range);
+
+struct clk_hw * __init
+at91_clk_register_h32mx(struct regmap *regmap, const char *name,
+ const char *parent_name);
+
+struct clk_hw * __init
+at91_clk_i2s_mux_register(struct regmap *regmap, const char *name,
+ const char * const *parent_names,
+ unsigned int num_parents, u8 bus_id);
+
+struct clk_hw * __init
+at91_clk_register_main_rc_osc(struct regmap *regmap, const char *name,
+ u32 frequency, u32 accuracy);
+struct clk_hw * __init
+at91_clk_register_main_osc(struct regmap *regmap, const char *name,
+ const char *parent_name, bool bypass);
+struct clk_hw * __init
+at91_clk_register_rm9200_main(struct regmap *regmap,
+ const char *name,
+ const char *parent_name);
+struct clk_hw * __init
+at91_clk_register_sam9x5_main(struct regmap *regmap, const char *name,
+ const char **parent_names, int num_parents);
+
+struct clk_hw * __init
+at91_clk_register_master(struct regmap *regmap, const char *name,
+ int num_parents, const char **parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics);
+
+struct clk_hw * __init
+at91_clk_register_peripheral(struct regmap *regmap, const char *name,
+ const char *parent_name, u32 id);
+struct clk_hw * __init
+at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name,
+ u32 id, const struct clk_range *range);
+
+struct clk_hw * __init
+at91_clk_register_pll(struct regmap *regmap, const char *name,
+ const char *parent_name, u8 id,
+ const struct clk_pll_layout *layout,
+ const struct clk_pll_characteristics *characteristics);
+struct clk_hw * __init
+at91_clk_register_plldiv(struct regmap *regmap, const char *name,
+ const char *parent_name);
+
+struct clk_hw * __init
+at91_clk_register_programmable(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents, u8 id,
+ const struct clk_programmable_layout *layout);
+
+struct clk_hw * __init
+at91_clk_register_sam9260_slow(struct regmap *regmap,
+ const char *name,
+ const char **parent_names,
+ int num_parents);
+
+struct clk_hw * __init
+at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents);
+
+struct clk_hw * __init
+at91_clk_register_system(struct regmap *regmap, const char *name,
+ const char *parent_name, u8 id);
+
+struct clk_hw * __init
+at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents);
+struct clk_hw * __init
+at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
+ const char *parent_name);
+struct clk_hw * __init
+at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
+ const char *parent_name, const u32 *divisors);
+
+struct clk_hw * __init
+at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
+ const char *name, const char *parent_name);
+
#ifdef CONFIG_PM
void pmc_register_id(u8 id);
void pmc_register_pck(u8 pck);
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
new file mode 100644
index 000000000000..d69ad96fe988
--- /dev/null
+++ b/drivers/clk/at91/sama5d2.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 124000000, .max = 166000000 },
+ .divisors = { 1, 2, 4, 3 },
+};
+
+static u8 plla_out[] = { 0 };
+
+static u16 plla_icpll[] = { 0 };
+
+static struct clk_range plla_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 12000000, .max = 12000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .icpll = plla_icpll,
+ .out = plla_out,
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} sama5d2_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "lcdck", .p = "masterck", .id = 3 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "iscck", .p = "masterck", .id = 18 },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+ struct clk_range r;
+} sama5d2_periph32ck[] = {
+ { .n = "macb0_clk", .id = 5, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "tdes_clk", .id = 11, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "matrix1_clk", .id = 14, },
+ { .n = "hsmc_clk", .id = 17, },
+ { .n = "pioA_clk", .id = 18, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "flx0_clk", .id = 19, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "flx1_clk", .id = 20, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "flx2_clk", .id = 21, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "flx3_clk", .id = 22, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "flx4_clk", .id = 23, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uart0_clk", .id = 24, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uart1_clk", .id = 25, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uart2_clk", .id = 26, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uart3_clk", .id = 27, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uart4_clk", .id = 28, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "twi0_clk", .id = 29, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "twi1_clk", .id = 30, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "spi0_clk", .id = 33, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "spi1_clk", .id = 34, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "tcb0_clk", .id = 35, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "tcb1_clk", .id = 36, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "pwm_clk", .id = 38, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "adc_clk", .id = 40, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uhphs_clk", .id = 41, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "udphs_clk", .id = 42, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "ssc0_clk", .id = 43, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "ssc1_clk", .id = 44, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "trng_clk", .id = 47, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "pdmic_clk", .id = 48, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "securam_clk", .id = 51, },
+ { .n = "i2s0_clk", .id = 54, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "i2s1_clk", .id = 55, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "can0_clk", .id = 56, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "can1_clk", .id = 57, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "classd_clk", .id = 59, .r = { .min = 0, .max = 83000000 }, },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+} sama5d2_periphck[] = {
+ { .n = "dma0_clk", .id = 6, },
+ { .n = "dma1_clk", .id = 7, },
+ { .n = "aes_clk", .id = 9, },
+ { .n = "aesb_clk", .id = 10, },
+ { .n = "sha_clk", .id = 12, },
+ { .n = "mpddr_clk", .id = 13, },
+ { .n = "matrix0_clk", .id = 15, },
+ { .n = "sdmmc0_hclk", .id = 31, },
+ { .n = "sdmmc1_hclk", .id = 32, },
+ { .n = "lcdc_clk", .id = 45, },
+ { .n = "isc_clk", .id = 46, },
+ { .n = "qspi0_clk", .id = 52, },
+ { .n = "qspi1_clk", .id = 53, },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+ struct clk_range r;
+ bool pll;
+} sama5d2_gck[] = {
+ { .n = "sdmmc0_gclk", .id = 31, },
+ { .n = "sdmmc1_gclk", .id = 32, },
+ { .n = "tcb0_gclk", .id = 35, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "tcb1_gclk", .id = 36, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "pwm_gclk", .id = 38, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "isc_gclk", .id = 46, },
+ { .n = "pdmic_gclk", .id = 48, },
+ { .n = "i2s0_gclk", .id = 54, .pll = true },
+ { .n = "i2s1_gclk", .id = 55, .pll = true },
+ { .n = "can0_gclk", .id = 56, .r = { .min = 0, .max = 80000000 }, },
+ { .n = "can1_gclk", .id = 57, .r = { .min = 0, .max = 80000000 }, },
+ { .n = "classd_gclk", .id = 59, .r = { .min = 0, .max = 100000000 },
+ .pll = true },
+};
+
+static void __init sama5d2_pmc_setup(struct device_node *np)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *sama5d2_pmc;
+ const char *parent_names[6];
+ struct regmap *regmap, *regmap_sfr;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama5d2_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1,
+ nck(sama5d2_systemck),
+ nck(sama5d2_periph32ck),
+ nck(sama5d2_gck));
+ if (!sama5d2_pmc)
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 100000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->chws[PMC_MAIN] = hw;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &sama5d3_pll_layout, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_audio_pll_frac(regmap, "audiopll_fracck",
+ "mainck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_audio_pll_pad(regmap, "audiopll_padck",
+ "audiopll_fracck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_audio_pll_pmc(regmap, "audiopll_pmcck",
+ "audiopll_fracck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+ if (IS_ERR(regmap_sfr))
+ regmap_sfr = NULL;
+
+ hw = at91_clk_register_utmi(regmap, regmap_sfr, "utmick", "mainck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->chws[PMC_MCK] = hw;
+
+ hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->chws[PMC_MCK2] = hw;
+
+ parent_names[0] = "plladivck";
+ parent_names[1] = "utmick";
+ hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ parent_names[4] = "mck";
+ for (i = 0; i < 3; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91sam9x5_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d2_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama5d2_systemck[i].n,
+ sama5d2_systemck[i].p,
+ sama5d2_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->shws[sama5d2_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d2_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ sama5d2_periphck[i].n,
+ "masterck",
+ sama5d2_periphck[i].id,
+ &range);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->phws[sama5d2_periphck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d2_periph32ck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ sama5d2_periph32ck[i].n,
+ "h32mxck",
+ sama5d2_periph32ck[i].id,
+ &sama5d2_periph32ck[i].r);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->phws[sama5d2_periph32ck[i].id] = hw;
+ }
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ parent_names[4] = "mck";
+ parent_names[5] = "audiopll_pmcck";
+ for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ sama5d2_gck[i].n,
+ parent_names, 6,
+ sama5d2_gck[i].id,
+ sama5d2_gck[i].pll,
+ &sama5d2_gck[i].r);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->ghws[sama5d2_gck[i].id] = hw;
+ }
+
+ if (regmap_sfr) {
+ parent_names[0] = "i2s0_clk";
+ parent_names[1] = "i2s0_gclk";
+ hw = at91_clk_i2s_mux_register(regmap_sfr, "i2s0_muxclk",
+ parent_names, 2, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->chws[PMC_I2S0_MUX] = hw;
+
+ parent_names[0] = "i2s1_clk";
+ parent_names[1] = "i2s1_gclk";
+ hw = at91_clk_i2s_mux_register(regmap_sfr, "i2s1_muxclk",
+ parent_names, 2, 1);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d2_pmc->chws[PMC_I2S1_MUX] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama5d2_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(sama5d2_pmc);
+}
+CLK_OF_DECLARE_DRIVER(sama5d2_pmc, "atmel,sama5d2-pmc", sama5d2_pmc_setup);
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
new file mode 100644
index 000000000000..e358be7f6c8d
--- /dev/null
+++ b/drivers/clk/at91/sama5d4.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 125000000, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3 },
+};
+
+static u8 plla_out[] = { 0 };
+
+static u16 plla_icpll[] = { 0 };
+
+static struct clk_range plla_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 12000000, .max = 12000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .icpll = plla_icpll,
+ .out = plla_out,
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} sama5d4_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "lcdck", .p = "masterck", .id = 3 },
+ { .n = "smdck", .p = "smdclk", .id = 4 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+} sama5d4_periph32ck[] = {
+ { .n = "pioD_clk", .id = 5 },
+ { .n = "usart0_clk", .id = 6 },
+ { .n = "usart1_clk", .id = 7 },
+ { .n = "icm_clk", .id = 9 },
+ { .n = "aes_clk", .id = 12 },
+ { .n = "tdes_clk", .id = 14 },
+ { .n = "sha_clk", .id = 15 },
+ { .n = "matrix1_clk", .id = 17 },
+ { .n = "hsmc_clk", .id = 22 },
+ { .n = "pioA_clk", .id = 23 },
+ { .n = "pioB_clk", .id = 24 },
+ { .n = "pioC_clk", .id = 25 },
+ { .n = "pioE_clk", .id = 26 },
+ { .n = "uart0_clk", .id = 27 },
+ { .n = "uart1_clk", .id = 28 },
+ { .n = "usart2_clk", .id = 29 },
+ { .n = "usart3_clk", .id = 30 },
+ { .n = "usart4_clk", .id = 31 },
+ { .n = "twi0_clk", .id = 32 },
+ { .n = "twi1_clk", .id = 33 },
+ { .n = "twi2_clk", .id = 34 },
+ { .n = "mci0_clk", .id = 35 },
+ { .n = "mci1_clk", .id = 36 },
+ { .n = "spi0_clk", .id = 37 },
+ { .n = "spi1_clk", .id = 38 },
+ { .n = "spi2_clk", .id = 39 },
+ { .n = "tcb0_clk", .id = 40 },
+ { .n = "tcb1_clk", .id = 41 },
+ { .n = "tcb2_clk", .id = 42 },
+ { .n = "pwm_clk", .id = 43 },
+ { .n = "adc_clk", .id = 44 },
+ { .n = "dbgu_clk", .id = 45 },
+ { .n = "uhphs_clk", .id = 46 },
+ { .n = "udphs_clk", .id = 47 },
+ { .n = "ssc0_clk", .id = 48 },
+ { .n = "ssc1_clk", .id = 49 },
+ { .n = "trng_clk", .id = 53 },
+ { .n = "macb0_clk", .id = 54 },
+ { .n = "macb1_clk", .id = 55 },
+ { .n = "fuse_clk", .id = 57 },
+ { .n = "securam_clk", .id = 59 },
+ { .n = "smd_clk", .id = 61 },
+ { .n = "twi3_clk", .id = 62 },
+ { .n = "catb_clk", .id = 63 },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+} sama5d4_periphck[] = {
+ { .n = "dma0_clk", .id = 8 },
+ { .n = "cpkcc_clk", .id = 10 },
+ { .n = "aesb_clk", .id = 13 },
+ { .n = "mpddr_clk", .id = 16 },
+ { .n = "matrix0_clk", .id = 18 },
+ { .n = "vdec_clk", .id = 19 },
+ { .n = "dma1_clk", .id = 50 },
+ { .n = "lcdc_clk", .id = 51 },
+ { .n = "isi_clk", .id = 52 },
+};
+
+static void __init sama5d4_pmc_setup(struct device_node *np)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *sama5d4_pmc;
+ const char *parent_names[5];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama5d4_pmc = pmc_data_allocate(PMC_MCK2 + 1,
+ nck(sama5d4_systemck),
+ nck(sama5d4_periph32ck), 0);
+ if (!sama5d4_pmc)
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 100000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &sama5d3_pll_layout, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d4_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d4_pmc->chws[PMC_MCK] = hw;
+
+ hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d4_pmc->chws[PMC_MCK2] = hw;
+
+ parent_names[0] = "plladivck";
+ parent_names[1] = "utmick";
+ hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "plladivck";
+ parent_names[1] = "utmick";
+ hw = at91sam9x5_clk_register_smd(regmap, "smdclk", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ parent_names[4] = "mck";
+ for (i = 0; i < 3; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91sam9x5_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d4_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama5d4_systemck[i].n,
+ sama5d4_systemck[i].p,
+ sama5d4_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d4_pmc->shws[sama5d4_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d4_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ sama5d4_periphck[i].n,
+ "masterck",
+ sama5d4_periphck[i].id,
+ &range);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d4_pmc->phws[sama5d4_periphck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d4_periph32ck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ sama5d4_periph32ck[i].n,
+ "h32mxck",
+ sama5d4_periph32ck[i].id,
+ &range);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d4_pmc->phws[sama5d4_periph32ck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama5d4_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(sama5d4_pmc);
+}
+CLK_OF_DECLARE_DRIVER(sama5d4_pmc, "atmel,sama5d4-pmc", sama5d4_pmc_setup);
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index 25d8c240ddfb..c68dada97316 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -301,13 +301,13 @@ static void __init of_axs10x_pll_clk_setup(struct device_node *node)
ret = clk_hw_register(NULL, &pll_clk->hw);
if (ret) {
- pr_err("failed to register %s clock\n", node->name);
+ pr_err("failed to register %pOFn clock\n", node);
goto err_unmap_lock;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
if (ret) {
- pr_err("failed to add hw provider for %s clock\n", node->name);
+ pr_err("failed to add hw provider for %pOFn clock\n", node);
goto err_unregister_clk;
}
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index 281f4322355c..e65eeef9cbaf 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -808,29 +808,29 @@ void __init kona_dt_ccu_setup(struct ccu_data *ccu,
ret = of_address_to_resource(node, 0, &res);
if (ret) {
- pr_err("%s: no valid CCU registers found for %s\n", __func__,
- node->name);
+ pr_err("%s: no valid CCU registers found for %pOFn\n", __func__,
+ node);
goto out_err;
}
range = resource_size(&res);
if (range > (resource_size_t)U32_MAX) {
- pr_err("%s: address range too large for %s\n", __func__,
- node->name);
+ pr_err("%s: address range too large for %pOFn\n", __func__,
+ node);
goto out_err;
}
ccu->range = (u32)range;
if (!ccu_data_valid(ccu)) {
- pr_err("%s: ccu data not valid for %s\n", __func__, node->name);
+ pr_err("%s: ccu data not valid for %pOFn\n", __func__, node);
goto out_err;
}
ccu->base = ioremap(res.start, ccu->range);
if (!ccu->base) {
- pr_err("%s: unable to map CCU registers for %s\n", __func__,
- node->name);
+ pr_err("%s: unable to map CCU registers for %pOFn\n", __func__,
+ node);
goto out_err;
}
ccu->node = of_node_get(node);
@@ -848,16 +848,16 @@ void __init kona_dt_ccu_setup(struct ccu_data *ccu,
ret = of_clk_add_hw_provider(node, of_clk_kona_onecell_get, ccu);
if (ret) {
- pr_err("%s: error adding ccu %s as provider (%d)\n", __func__,
- node->name, ret);
+ pr_err("%s: error adding ccu %pOFn as provider (%d)\n", __func__,
+ node, ret);
goto out_err;
}
if (!kona_ccu_init(ccu))
- pr_err("Broadcom %s initialization had errors\n", node->name);
+ pr_err("Broadcom %pOFn initialization had errors\n", node);
return;
out_err:
kona_ccu_teardown(ccu);
- pr_err("Broadcom %s setup aborted\n", node->name);
+ pr_err("Broadcom %pOFn setup aborted\n", node);
}
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index 44b544157121..d571a00b5282 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -281,7 +281,7 @@ static void __init asm9260_acc_init(struct device_node *np)
base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(base))
- panic("%s: unable to map resource", np->name);
+ panic("%pOFn: unable to map resource", np);
/* register pll */
rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
@@ -292,7 +292,7 @@ static void __init asm9260_acc_init(struct device_node *np)
ref_clk, 0, rate, accuracy);
if (IS_ERR(hw))
- panic("%s: can't register REFCLK. Check DT!", np->name);
+ panic("%pOFn: can't register REFCLK. Check DT!", np);
for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
index 6904ed6da504..6a7118d4250a 100644
--- a/drivers/clk/clk-bulk.c
+++ b/drivers/clk/clk-bulk.c
@@ -17,8 +17,65 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_clks; i++)
+ clks[i].clk = NULL;
+
+ for (i = 0; i < num_clks; i++) {
+ clks[i].clk = of_clk_get(np, i);
+ if (IS_ERR(clks[i].clk)) {
+ ret = PTR_ERR(clks[i].clk);
+ pr_err("%pOF: Failed to get clk index: %d ret: %d\n",
+ np, i, ret);
+ clks[i].clk = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ clk_bulk_put(i, clks);
+
+ return ret;
+}
+
+static int __must_check of_clk_bulk_get_all(struct device_node *np,
+ struct clk_bulk_data **clks)
+{
+ struct clk_bulk_data *clk_bulk;
+ int num_clks;
+ int ret;
+
+ num_clks = of_clk_get_parent_count(np);
+ if (!num_clks)
+ return 0;
+
+ clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL);
+ if (!clk_bulk)
+ return -ENOMEM;
+
+ ret = of_clk_bulk_get(np, num_clks, clk_bulk);
+ if (ret) {
+ kfree(clk_bulk);
+ return ret;
+ }
+
+ *clks = clk_bulk;
+
+ return num_clks;
+}
void clk_bulk_put(int num_clks, struct clk_bulk_data *clks)
{
@@ -59,6 +116,29 @@ err:
}
EXPORT_SYMBOL(clk_bulk_get);
+void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks)
+{
+ if (IS_ERR_OR_NULL(clks))
+ return;
+
+ clk_bulk_put(num_clks, clks);
+
+ kfree(clks);
+}
+EXPORT_SYMBOL(clk_bulk_put_all);
+
+int __must_check clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+ struct device_node *np = dev_of_node(dev);
+
+ if (!np)
+ return 0;
+
+ return of_clk_bulk_get_all(np, clks);
+}
+EXPORT_SYMBOL(clk_bulk_get_all);
+
#ifdef CONFIG_HAVE_CLK_PREPARE
/**
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index 0a7e7d5a7506..23c9326ea48c 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -669,8 +669,8 @@ static int cdce925_probe(struct i2c_client *client,
/* Register PLL clocks */
for (i = 0; i < data->chip_info->num_plls; ++i) {
- pll_clk_name[i] = kasprintf(GFP_KERNEL, "%s.pll%d",
- client->dev.of_node->name, i);
+ pll_clk_name[i] = kasprintf(GFP_KERNEL, "%pOFn.pll%d",
+ client->dev.of_node, i);
init.name = pll_clk_name[i];
data->pll[i].chip = data;
data->pll[i].hw.init = &init;
@@ -703,6 +703,7 @@ static int cdce925_probe(struct i2c_client *client,
0x12 + (i*CDCE925_OFFSET_PLL),
0x07, value & 0x07);
}
+ of_node_put(np_output);
}
/* Register output clock Y1 */
@@ -710,7 +711,7 @@ static int cdce925_probe(struct i2c_client *client,
init.flags = 0;
init.num_parents = 1;
init.parent_names = &parent_name; /* Mux Y1 to input */
- init.name = kasprintf(GFP_KERNEL, "%s.Y1", client->dev.of_node->name);
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.Y1", client->dev.of_node);
data->clk[0].chip = data;
data->clk[0].hw.init = &init;
data->clk[0].index = 0;
@@ -727,8 +728,8 @@ static int cdce925_probe(struct i2c_client *client,
init.flags = CLK_SET_RATE_PARENT;
init.num_parents = 1;
for (i = 1; i < data->chip_info->num_outputs; ++i) {
- init.name = kasprintf(GFP_KERNEL, "%s.Y%d",
- client->dev.of_node->name, i+1);
+ init.name = kasprintf(GFP_KERNEL, "%pOFn.Y%d",
+ client->dev.of_node, i+1);
data->clk[i].chip = data;
data->clk[i].hw.init = &init;
data->clk[i].index = i;
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index d854e26a8ddb..12c87457eca1 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -70,6 +70,30 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
}
EXPORT_SYMBOL_GPL(devm_clk_bulk_get);
+int __must_check devm_clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+ struct clk_bulk_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_clk_bulk_release,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ ret = clk_bulk_get_all(dev, &devres->clks);
+ if (ret > 0) {
+ *clks = devres->clks;
+ devres->num_clks = ret;
+ devres_add(dev, devres);
+ } else {
+ devres_free(devres);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all);
+
static int devm_clk_match(struct device *dev, void *res, void *data)
{
struct clk **c = res;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 20724abd38bd..ef0ca9414f37 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -158,14 +158,14 @@ static struct clk *_of_fixed_factor_clk_setup(struct device_node *node)
int ret;
if (of_property_read_u32(node, "clock-div", &div)) {
- pr_err("%s Fixed factor clock <%s> must have a clock-div property\n",
- __func__, node->name);
+ pr_err("%s Fixed factor clock <%pOFn> must have a clock-div property\n",
+ __func__, node);
return ERR_PTR(-EIO);
}
if (of_property_read_u32(node, "clock-mult", &mult)) {
- pr_err("%s Fixed factor clock <%s> must have a clock-mult property\n",
- __func__, node->name);
+ pr_err("%s Fixed factor clock <%pOFn> must have a clock-mult property\n",
+ __func__, node);
return ERR_PTR(-EIO);
}
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index b5c46b3f8764..6d6475c32ee5 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -200,6 +200,7 @@ static int of_fixed_clk_remove(struct platform_device *pdev)
{
struct clk *clk = platform_get_drvdata(pdev);
+ of_clk_del_provider(pdev->dev.of_node);
clk_unregister_fixed_rate(clk);
return 0;
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 40af4fbab4d2..6a43ce420492 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -233,11 +233,11 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
if (IS_ERR(gpiod)) {
ret = PTR_ERR(gpiod);
if (ret == -EPROBE_DEFER)
- pr_debug("%s: %s: GPIOs not yet available, retry later\n",
- node->name, __func__);
+ pr_debug("%pOFn: %s: GPIOs not yet available, retry later\n",
+ node, __func__);
else
- pr_err("%s: %s: Can't get '%s' named GPIO property\n",
- node->name, __func__,
+ pr_err("%pOFn: %s: Can't get '%s' named GPIO property\n",
+ node, __func__,
gpio_name);
return ret;
}
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index c4ee280f454d..a47c2b600f20 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -390,13 +390,13 @@ static void __init of_hsdk_pll_clk_setup(struct device_node *node)
ret = clk_hw_register(NULL, &pll_clk->hw);
if (ret) {
- pr_err("failed to register %s clock\n", node->name);
+ pr_err("failed to register %pOFn clock\n", node);
goto err_unmap_spec_regs;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
if (ret) {
- pr_err("failed to add hw provider for %s clock\n", node->name);
+ pr_err("failed to add hw provider for %pOFn clock\n", node);
goto err_unmap_spec_regs;
}
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index eb953d3b0b69..02551fe4b87c 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -1,24 +1,9 @@
-/*
- * clk-max77686.c - Clock driver for Maxim 77686/MAX77802
- *
- * Copyright (C) 2012 Samsung Electornics
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// clk-max77686.c - Clock driver for Maxim 77686/MAX77802
+//
+// Copyright (C) 2012 Samsung Electornics
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 13ad6d1e5090..84a24875c629 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -97,8 +97,8 @@ static void __init nomadik_src_init(void)
}
src_base = of_iomap(np, 0);
if (!src_base) {
- pr_err("%s: must have src parent node with REGS (%s)\n",
- __func__, np->name);
+ pr_err("%s: must have src parent node with REGS (%pOFn)\n",
+ __func__, np);
return;
}
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index c5edf8f2fd19..27a86b7a34db 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -549,7 +549,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
ret = of_address_to_resource(clk_np, 0, &res);
if (ret) {
- pr_err("%s: failed to get resource, ret %d\n", clk_np->name,
+ pr_err("%pOFn: failed to get resource, ret %d\n", clk_np,
ret);
return;
}
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 7f51c01085ab..e9612e7068e9 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -195,8 +195,8 @@ static void palmas_clks_get_clk_data(struct platform_device *pdev,
prop = PALMAS_EXT_CONTROL_NSLEEP;
break;
default:
- dev_warn(&pdev->dev, "%s: Invalid ext control option: %u\n",
- node->name, prop);
+ dev_warn(&pdev->dev, "%pOFn: Invalid ext control option: %u\n",
+ node, prop);
prop = 0;
break;
}
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 3a1812f65e5d..4c30b6e799ed 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -945,8 +945,8 @@ static void __init core_mux_init(struct device_node *np)
rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
if (rc) {
- pr_err("%s: Couldn't register clk provider for node %s: %d\n",
- __func__, np->name, rc);
+ pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
+ __func__, np, rc);
return;
}
}
@@ -1199,8 +1199,8 @@ static void __init legacy_pll_init(struct device_node *np, int idx)
rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
if (rc) {
- pr_err("%s: Couldn't register clk provider for node %s: %d\n",
- __func__, np->name, rc);
+ pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
+ __func__, np, rc);
goto err_cell;
}
@@ -1360,7 +1360,7 @@ static void __init clockgen_init(struct device_node *np)
is_old_ls1021a = true;
}
if (!clockgen.regs) {
- pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
+ pr_err("%s(): %pOFn: of_iomap() failed\n", __func__, np);
return;
}
@@ -1406,8 +1406,8 @@ static void __init clockgen_init(struct device_node *np)
ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
if (ret) {
- pr_err("%s: Couldn't register clk provider for node %s: %d\n",
- __func__, np->name, ret);
+ pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
+ __func__, np, ret);
}
return;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index d44e0eea31ec..5b419b82f7ca 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -1,19 +1,8 @@
-/*
- * clk-s2mps11.c - Clock driver for S2MPS11.
- *
- * Copyright (C) 2013,2014 Samsung Electornics
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// clk-s2mps11.c - Clock driver for S2MPS11.
+//
+// Copyright (C) 2013,2014 Samsung Electornics
#include <linux/module.h>
#include <linux/err.h>
@@ -28,12 +17,7 @@
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/mfd/samsung/core.h>
-enum {
- S2MPS11_CLK_AP = 0,
- S2MPS11_CLK_CP,
- S2MPS11_CLK_BT,
- S2MPS11_CLKS_NUM,
-};
+#include <dt-bindings/clock/samsung,s2mps11.h>
struct s2mps11_clk {
struct sec_pmic_dev *iodev;
@@ -245,6 +229,36 @@ static const struct platform_device_id s2mps11_clk_id[] = {
};
MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
+#ifdef CONFIG_OF
+/*
+ * Device is instantiated through parent MFD device and device matching is done
+ * through platform_device_id.
+ *
+ * However if device's DT node contains proper clock compatible and driver is
+ * built as a module, then the *module* matching will be done trough DT aliases.
+ * This requires of_device_id table. In the same time this will not change the
+ * actual *device* matching so do not add .of_match_table.
+ */
+static const struct of_device_id s2mps11_dt_match[] __used = {
+ {
+ .compatible = "samsung,s2mps11-clk",
+ .data = (void *)S2MPS11X,
+ }, {
+ .compatible = "samsung,s2mps13-clk",
+ .data = (void *)S2MPS13X,
+ }, {
+ .compatible = "samsung,s2mps14-clk",
+ .data = (void *)S2MPS14X,
+ }, {
+ .compatible = "samsung,s5m8767-clk",
+ .data = (void *)S5M8767X,
+ }, {
+ /* Sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, s2mps11_dt_match);
+#endif
+
static struct platform_driver s2mps11_clk_driver = {
.driver = {
.name = "s2mps11-clk",
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index a985bf5e1ac6..a2287c770d5c 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -132,7 +132,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
count = handle->clk_ops->count_get(handle);
if (count < 0) {
- dev_err(dev, "%s: invalid clock output count\n", np->name);
+ dev_err(dev, "%pOFn: invalid clock output count\n", np);
return -EINVAL;
}
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 25854722810e..d3ccc1cfccd5 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -207,7 +207,7 @@ static int scpi_clk_add(struct device *dev, struct device_node *np,
count = of_property_count_strings(np, "clock-output-names");
if (count < 0) {
- dev_err(dev, "%s: invalid clock output count\n", np->name);
+ dev_err(dev, "%pOFn: invalid clock output count\n", np);
return -EINVAL;
}
@@ -232,13 +232,13 @@ static int scpi_clk_add(struct device *dev, struct device_node *np,
if (of_property_read_string_index(np, "clock-output-names",
idx, &name)) {
- dev_err(dev, "invalid clock name @ %s\n", np->name);
+ dev_err(dev, "invalid clock name @ %pOFn\n", np);
return -EINVAL;
}
if (of_property_read_u32_index(np, "clock-indices",
idx, &val)) {
- dev_err(dev, "invalid clock index @ %s\n", np->name);
+ dev_err(dev, "invalid clock index @ %pOFn\n", np);
return -EINVAL;
}
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 50e7c341e97e..8bdf91b56012 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1215,8 +1215,8 @@ static int si5351_dt_parse(struct i2c_client *client,
/* per clkout properties */
for_each_child_of_node(np, child) {
if (of_property_read_u32(child, "reg", &num)) {
- dev_err(&client->dev, "missing reg property of %s\n",
- child->name);
+ dev_err(&client->dev, "missing reg property of %pOFn\n",
+ child);
goto put_child;
}
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 294850bdc195..cdaa567c8042 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -1433,7 +1433,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return;
}
diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c
index d3271eca3779..0ea7261d15e0 100644
--- a/drivers/clk/clk-stm32h7.c
+++ b/drivers/clk/clk-stm32h7.c
@@ -1216,7 +1216,7 @@ static void __init stm32h7_rcc_init(struct device_node *np)
/* get RCC base @ from DT */
base = of_iomap(np, 0);
if (!base) {
- pr_err("%s: unable to map resource", np->name);
+ pr_err("%pOFn: unable to map resource", np);
goto err_free_clks;
}
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index a907555b2a3d..4f48342bc280 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -2088,7 +2088,7 @@ static void stm32mp1_rcc_init(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
- pr_err("%s: unable to map resource", np->name);
+ pr_err("%pOFn: unable to map resource", np);
of_node_put(np);
return;
}
diff --git a/drivers/clk/clk-tango4.c b/drivers/clk/clk-tango4.c
index 34b22b7930fb..fe12a43f7a40 100644
--- a/drivers/clk/clk-tango4.c
+++ b/drivers/clk/clk-tango4.c
@@ -54,13 +54,13 @@ static void __init tango4_clkgen_setup(struct device_node *np)
const char *parent = of_clk_get_parent_name(np, 0);
if (!base)
- panic("%s: invalid address\n", np->name);
+ panic("%pOFn: invalid address\n", np);
if (readl(base + CPUCLK_DIV) & DIV_BYPASS)
- panic("%s: unsupported cpuclk setup\n", np->name);
+ panic("%pOFn: unsupported cpuclk setup\n", np);
if (readl(base + SYSCLK_DIV) & DIV_BYPASS)
- panic("%s: unsupported sysclk setup\n", np->name);
+ panic("%pOFn: unsupported sysclk setup\n", np);
writel(0x100, base + CPUCLK_DIV); /* disable frequency ramping */
@@ -77,9 +77,9 @@ static void __init tango4_clkgen_setup(struct device_node *np)
pp[3] = clk_register_fixed_factor(NULL, "sdio_clk", "cd6", 0, 1, 2);
if (IS_ERR(pp[0]) || IS_ERR(pp[1]) || IS_ERR(pp[2]) || IS_ERR(pp[3]))
- panic("%s: clk registration failed\n", np->name);
+ panic("%pOFn: clk registration failed\n", np);
if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data))
- panic("%s: clk provider registration failed\n", np->name);
+ panic("%pOFn: clk provider registration failed\n", np);
}
CLK_OF_DECLARE(tango4_clkgen, "sigma,tango4-clkgen", tango4_clkgen_setup);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index d31055ae6ec6..af011974d4ec 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -924,6 +924,101 @@ static int clk_core_enable_lock(struct clk_core *core)
}
/**
+ * clk_gate_restore_context - restore context for poweroff
+ * @hw: the clk_hw pointer of clock whose state is to be restored
+ *
+ * The clock gate restore context function enables or disables
+ * the gate clocks based on the enable_count. This is done in cases
+ * where the clock context is lost and based on the enable_count
+ * the clock either needs to be enabled/disabled. This
+ * helps restore the state of gate clocks.
+ */
+void clk_gate_restore_context(struct clk_hw *hw)
+{
+ struct clk_core *core = hw->core;
+
+ if (core->enable_count)
+ core->ops->enable(hw);
+ else
+ core->ops->disable(hw);
+}
+EXPORT_SYMBOL_GPL(clk_gate_restore_context);
+
+static int clk_core_save_context(struct clk_core *core)
+{
+ struct clk_core *child;
+ int ret = 0;
+
+ hlist_for_each_entry(child, &core->children, child_node) {
+ ret = clk_core_save_context(child);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (core->ops && core->ops->save_context)
+ ret = core->ops->save_context(core->hw);
+
+ return ret;
+}
+
+static void clk_core_restore_context(struct clk_core *core)
+{
+ struct clk_core *child;
+
+ if (core->ops && core->ops->restore_context)
+ core->ops->restore_context(core->hw);
+
+ hlist_for_each_entry(child, &core->children, child_node)
+ clk_core_restore_context(child);
+}
+
+/**
+ * clk_save_context - save clock context for poweroff
+ *
+ * Saves the context of the clock register for powerstates in which the
+ * contents of the registers will be lost. Occurs deep within the suspend
+ * code. Returns 0 on success.
+ */
+int clk_save_context(void)
+{
+ struct clk_core *clk;
+ int ret;
+
+ hlist_for_each_entry(clk, &clk_root_list, child_node) {
+ ret = clk_core_save_context(clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
+ ret = clk_core_save_context(clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_save_context);
+
+/**
+ * clk_restore_context - restore clock context after poweroff
+ *
+ * Restore the saved clock context upon resume.
+ *
+ */
+void clk_restore_context(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_root_list, child_node)
+ clk_core_restore_context(core);
+
+ hlist_for_each_entry(core, &clk_orphan_list, child_node)
+ clk_core_restore_context(core);
+}
+EXPORT_SYMBOL_GPL(clk_restore_context);
+
+/**
* clk_enable - ungate a clock
* @clk: the clk being ungated
*
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index fffbed5e263b..5b69e24a224f 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -303,24 +303,6 @@ static int davinci_lpsc_clk_reset(struct clk *clk, bool reset)
return 0;
}
-/*
- * REVISIT: These exported functions can be removed after a non-DT lookup is
- * added to the reset controller framework and the davinci-rproc driver is
- * updated to use the generic reset controller framework.
- */
-
-int davinci_clk_reset_assert(struct clk *clk)
-{
- return davinci_lpsc_clk_reset(clk, true);
-}
-EXPORT_SYMBOL(davinci_clk_reset_assert);
-
-int davinci_clk_reset_deassert(struct clk *clk)
-{
- return davinci_lpsc_clk_reset(clk, false);
-}
-EXPORT_SYMBOL(davinci_clk_reset_deassert);
-
static int davinci_psc_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index becdb1dd21b5..30fad7ab0d88 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -21,6 +21,13 @@ config COMMON_CLK_HI3660
help
Build the clock driver for hi3660.
+config COMMON_CLK_HI3670
+ bool "Hi3670 Clock Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ default ARCH_HISI
+ help
+ Build the clock driver for hi3670.
+
config COMMON_CLK_HI3798CV200
tristate "Hi3798CV200 Clock Driver"
depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 2a714c0f9657..b2441b99f3d5 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o
obj-$(CONFIG_COMMON_CLK_HI3516CV300) += crg-hi3516cv300.o
obj-$(CONFIG_COMMON_CLK_HI3519) += clk-hi3519.o
obj-$(CONFIG_COMMON_CLK_HI3660) += clk-hi3660.o
+obj-$(CONFIG_COMMON_CLK_HI3670) += clk-hi3670.o
obj-$(CONFIG_COMMON_CLK_HI3798CV200) += crg-hi3798cv200.o
obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o
obj-$(CONFIG_RESET_HISI) += reset.o
diff --git a/drivers/clk/hisilicon/clk-hi3670.c b/drivers/clk/hisilicon/clk-hi3670.c
new file mode 100644
index 000000000000..fd8c837a6ea3
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hi3670.c
@@ -0,0 +1,1016 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2001-2021, Huawei Tech. Co., Ltd.
+ * Author: chenjun <chenjun14@huawei.com>
+ *
+ * Copyright (c) 2018, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <dt-bindings/clock/hi3670-clock.h>
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk.h"
+
+static const struct hisi_fixed_rate_clock hi3670_fixed_rate_clks[] = {
+ { HI3670_CLKIN_SYS, "clkin_sys", NULL, 0, 19200000, },
+ { HI3670_CLKIN_REF, "clkin_ref", NULL, 0, 32764, },
+ { HI3670_CLK_FLL_SRC, "clk_fll_src", NULL, 0, 134400000, },
+ { HI3670_CLK_PPLL0, "clk_ppll0", NULL, 0, 1660000000, },
+ { HI3670_CLK_PPLL1, "clk_ppll1", NULL, 0, 1866000000, },
+ { HI3670_CLK_PPLL2, "clk_ppll2", NULL, 0, 1920000000, },
+ { HI3670_CLK_PPLL3, "clk_ppll3", NULL, 0, 1200000000, },
+ { HI3670_CLK_PPLL4, "clk_ppll4", NULL, 0, 900000000, },
+ { HI3670_CLK_PPLL6, "clk_ppll6", NULL, 0, 393216000, },
+ { HI3670_CLK_PPLL7, "clk_ppll7", NULL, 0, 1008000000, },
+ { HI3670_CLK_PPLL_PCIE, "clk_ppll_pcie", NULL, 0, 100000000, },
+ { HI3670_CLK_PCIEPLL_REV, "clk_pciepll_rev", NULL, 0, 100000000, },
+ { HI3670_CLK_SCPLL, "clk_scpll", NULL, 0, 245760000, },
+ { HI3670_PCLK, "pclk", NULL, 0, 20000000, },
+ { HI3670_CLK_UART0_DBG, "clk_uart0_dbg", NULL, 0, 19200000, },
+ { HI3670_CLK_UART6, "clk_uart6", NULL, 0, 19200000, },
+ { HI3670_OSC32K, "osc32k", NULL, 0, 32764, },
+ { HI3670_OSC19M, "osc19m", NULL, 0, 19200000, },
+ { HI3670_CLK_480M, "clk_480m", NULL, 0, 480000000, },
+ { HI3670_CLK_INVALID, "clk_invalid", NULL, 0, 10000000, },
+};
+
+/* crgctrl */
+static const struct hisi_fixed_factor_clock hi3670_crg_fixed_factor_clks[] = {
+ { HI3670_CLK_DIV_SYSBUS, "clk_div_sysbus", "clk_mux_sysbus",
+ 1, 7, 0, },
+ { HI3670_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys",
+ 1, 6, 0, },
+ { HI3670_CLK_SD_SYS, "clk_sd_sys", "clk_sd_sys_gt",
+ 1, 6, 0, },
+ { HI3670_CLK_SDIO_SYS, "clk_sdio_sys", "clk_sdio_sys_gt",
+ 1, 6, 0, },
+ { HI3670_CLK_DIV_A53HPM, "clk_div_a53hpm", "clk_a53hpm_andgt",
+ 1, 4, 0, },
+ { HI3670_CLK_DIV_320M, "clk_div_320m", "clk_320m_pll_gt",
+ 1, 5, 0, },
+ { HI3670_PCLK_GATE_UART0, "pclk_gate_uart0", "clk_mux_uartl",
+ 1, 1, 0, },
+ { HI3670_CLK_FACTOR_UART0, "clk_factor_uart0", "clk_mux_uart0",
+ 1, 1, 0, },
+ { HI3670_CLK_FACTOR_USB3PHY_PLL, "clk_factor_usb3phy_pll", "clk_ppll0",
+ 1, 60, 0, },
+ { HI3670_CLK_GATE_ABB_USB, "clk_gate_abb_usb", "clk_gate_usb_tcxo_en",
+ 1, 1, 0, },
+ { HI3670_CLK_GATE_UFSPHY_REF, "clk_gate_ufsphy_ref", "clkin_sys",
+ 1, 1, 0, },
+ { HI3670_ICS_VOLT_HIGH, "ics_volt_high", "peri_volt_hold",
+ 1, 1, 0, },
+ { HI3670_ICS_VOLT_MIDDLE, "ics_volt_middle", "peri_volt_middle",
+ 1, 1, 0, },
+ { HI3670_VENC_VOLT_HOLD, "venc_volt_hold", "peri_volt_hold",
+ 1, 1, 0, },
+ { HI3670_VDEC_VOLT_HOLD, "vdec_volt_hold", "peri_volt_hold",
+ 1, 1, 0, },
+ { HI3670_EDC_VOLT_HOLD, "edc_volt_hold", "peri_volt_hold",
+ 1, 1, 0, },
+ { HI3670_CLK_ISP_SNCLK_FAC, "clk_isp_snclk_fac", "clk_isp_snclk_angt",
+ 1, 10, 0, },
+ { HI3670_CLK_FACTOR_RXDPHY, "clk_factor_rxdphy", "clk_andgt_rxdphy",
+ 1, 6, 0, },
+};
+
+static const struct hisi_gate_clock hi3670_crgctrl_gate_sep_clks[] = {
+ { HI3670_PPLL1_EN_ACPU, "ppll1_en_acpu", "clk_ppll1",
+ CLK_SET_RATE_PARENT, 0x0, 0, 0, },
+ { HI3670_PPLL2_EN_ACPU, "ppll2_en_acpu", "clk_ppll2",
+ CLK_SET_RATE_PARENT, 0x0, 3, 0, },
+ { HI3670_PPLL3_EN_ACPU, "ppll3_en_acpu", "clk_ppll3",
+ CLK_SET_RATE_PARENT, 0x0, 27, 0, },
+ { HI3670_PPLL1_GT_CPU, "ppll1_gt_cpu", "clk_ppll1",
+ CLK_SET_RATE_PARENT, 0x460, 16, 0, },
+ { HI3670_PPLL2_GT_CPU, "ppll2_gt_cpu", "clk_ppll2",
+ CLK_SET_RATE_PARENT, 0x460, 18, 0, },
+ { HI3670_PPLL3_GT_CPU, "ppll3_gt_cpu", "clk_ppll3",
+ CLK_SET_RATE_PARENT, 0x460, 20, 0, },
+ { HI3670_CLK_GATE_PPLL2_MEDIA, "clk_gate_ppll2_media", "clk_ppll2",
+ CLK_SET_RATE_PARENT, 0x410, 27, 0, },
+ { HI3670_CLK_GATE_PPLL3_MEDIA, "clk_gate_ppll3_media", "clk_ppll3",
+ CLK_SET_RATE_PARENT, 0x410, 28, 0, },
+ { HI3670_CLK_GATE_PPLL4_MEDIA, "clk_gate_ppll4_media", "clk_ppll4",
+ CLK_SET_RATE_PARENT, 0x410, 26, 0, },
+ { HI3670_CLK_GATE_PPLL6_MEDIA, "clk_gate_ppll6_media", "clk_ppll6",
+ CLK_SET_RATE_PARENT, 0x410, 30, 0, },
+ { HI3670_CLK_GATE_PPLL7_MEDIA, "clk_gate_ppll7_media", "clk_ppll7",
+ CLK_SET_RATE_PARENT, 0x410, 29, 0, },
+ { HI3670_PCLK_GPIO0, "pclk_gpio0", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 0, 0, },
+ { HI3670_PCLK_GPIO1, "pclk_gpio1", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 1, 0, },
+ { HI3670_PCLK_GPIO2, "pclk_gpio2", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 2, 0, },
+ { HI3670_PCLK_GPIO3, "pclk_gpio3", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 3, 0, },
+ { HI3670_PCLK_GPIO4, "pclk_gpio4", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 4, 0, },
+ { HI3670_PCLK_GPIO5, "pclk_gpio5", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 5, 0, },
+ { HI3670_PCLK_GPIO6, "pclk_gpio6", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 6, 0, },
+ { HI3670_PCLK_GPIO7, "pclk_gpio7", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 7, 0, },
+ { HI3670_PCLK_GPIO8, "pclk_gpio8", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 8, 0, },
+ { HI3670_PCLK_GPIO9, "pclk_gpio9", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 9, 0, },
+ { HI3670_PCLK_GPIO10, "pclk_gpio10", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 10, 0, },
+ { HI3670_PCLK_GPIO11, "pclk_gpio11", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 11, 0, },
+ { HI3670_PCLK_GPIO12, "pclk_gpio12", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 12, 0, },
+ { HI3670_PCLK_GPIO13, "pclk_gpio13", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 13, 0, },
+ { HI3670_PCLK_GPIO14, "pclk_gpio14", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 14, 0, },
+ { HI3670_PCLK_GPIO15, "pclk_gpio15", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 15, 0, },
+ { HI3670_PCLK_GPIO16, "pclk_gpio16", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 16, 0, },
+ { HI3670_PCLK_GPIO17, "pclk_gpio17", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 17, 0, },
+ { HI3670_PCLK_GPIO20, "pclk_gpio20", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 20, 0, },
+ { HI3670_PCLK_GPIO21, "pclk_gpio21", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x10, 21, 0, },
+ { HI3670_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x50, 28, 0, },
+ { HI3670_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus",
+ CLK_SET_RATE_PARENT, 0x50, 29, 0, },
+ { HI3670_HCLK_GATE_USB3OTG, "hclk_gate_usb3otg", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x0, 25, 0, },
+ { HI3670_ACLK_GATE_USB3DVFS, "aclk_gate_usb3dvfs", "autodiv_emmc0bus",
+ CLK_SET_RATE_PARENT, 0x40, 1, 0, },
+ { HI3670_HCLK_GATE_SDIO, "hclk_gate_sdio", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x0, 21, 0, },
+ { HI3670_PCLK_GATE_PCIE_SYS, "pclk_gate_pcie_sys", "clk_div_mmc1bus",
+ CLK_SET_RATE_PARENT, 0x420, 7, 0, },
+ { HI3670_PCLK_GATE_PCIE_PHY, "pclk_gate_pcie_phy", "pclk_gate_mmc1_pcie",
+ CLK_SET_RATE_PARENT, 0x420, 9, 0, },
+ { HI3670_PCLK_GATE_MMC1_PCIE, "pclk_gate_mmc1_pcie", "pclk_div_mmc1_pcie",
+ CLK_SET_RATE_PARENT, 0x30, 12, 0, },
+ { HI3670_PCLK_GATE_MMC0_IOC, "pclk_gate_mmc0_ioc", "clk_div_mmc0bus",
+ CLK_SET_RATE_PARENT, 0x40, 13, 0, },
+ { HI3670_PCLK_GATE_MMC1_IOC, "pclk_gate_mmc1_ioc", "clk_div_mmc1bus",
+ CLK_SET_RATE_PARENT, 0x420, 21, 0, },
+ { HI3670_CLK_GATE_DMAC, "clk_gate_dmac", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x30, 1, 0, },
+ { HI3670_CLK_GATE_VCODECBUS2DDR, "clk_gate_vcodecbus2ddr", "clk_div_vcodecbus",
+ CLK_SET_RATE_PARENT, 0x0, 5, 0, },
+ { HI3670_CLK_CCI400_BYPASS, "clk_cci400_bypass", "clk_ddrc_freq",
+ CLK_SET_RATE_PARENT, 0x22C, 28, 0, },
+ { HI3670_CLK_GATE_CCI400, "clk_gate_cci400", "clk_ddrc_freq",
+ CLK_SET_RATE_PARENT, 0x50, 14, 0, },
+ { HI3670_CLK_GATE_SD, "clk_gate_sd", "clk_mux_sd_sys",
+ CLK_SET_RATE_PARENT, 0x40, 17, 0, },
+ { HI3670_HCLK_GATE_SD, "hclk_gate_sd", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x0, 30, 0, },
+ { HI3670_CLK_GATE_SDIO, "clk_gate_sdio", "clk_mux_sdio_sys",
+ CLK_SET_RATE_PARENT, 0x40, 19, 0, },
+ { HI3670_CLK_GATE_A57HPM, "clk_gate_a57hpm", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x050, 9, 0, },
+ { HI3670_CLK_GATE_A53HPM, "clk_gate_a53hpm", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x050, 13, 0, },
+ { HI3670_CLK_GATE_PA_A53, "clk_gate_pa_a53", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x480, 10, 0, },
+ { HI3670_CLK_GATE_PA_A57, "clk_gate_pa_a57", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x480, 9, 0, },
+ { HI3670_CLK_GATE_PA_G3D, "clk_gate_pa_g3d", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x480, 15, 0, },
+ { HI3670_CLK_GATE_GPUHPM, "clk_gate_gpuhpm", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x050, 15, 0, },
+ { HI3670_CLK_GATE_PERIHPM, "clk_gate_perihpm", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x050, 12, 0, },
+ { HI3670_CLK_GATE_AOHPM, "clk_gate_aohpm", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x050, 11, 0, },
+ { HI3670_CLK_GATE_UART1, "clk_gate_uart1", "clk_mux_uarth",
+ CLK_SET_RATE_PARENT, 0x20, 11, 0, },
+ { HI3670_CLK_GATE_UART4, "clk_gate_uart4", "clk_mux_uarth",
+ CLK_SET_RATE_PARENT, 0x20, 14, 0, },
+ { HI3670_PCLK_GATE_UART1, "pclk_gate_uart1", "clk_mux_uarth",
+ CLK_SET_RATE_PARENT, 0x20, 11, 0, },
+ { HI3670_PCLK_GATE_UART4, "pclk_gate_uart4", "clk_mux_uarth",
+ CLK_SET_RATE_PARENT, 0x20, 14, 0, },
+ { HI3670_CLK_GATE_UART2, "clk_gate_uart2", "clk_mux_uartl",
+ CLK_SET_RATE_PARENT, 0x20, 12, 0, },
+ { HI3670_CLK_GATE_UART5, "clk_gate_uart5", "clk_mux_uartl",
+ CLK_SET_RATE_PARENT, 0x20, 15, 0, },
+ { HI3670_PCLK_GATE_UART2, "pclk_gate_uart2", "clk_mux_uartl",
+ CLK_SET_RATE_PARENT, 0x20, 12, 0, },
+ { HI3670_PCLK_GATE_UART5, "pclk_gate_uart5", "clk_mux_uartl",
+ CLK_SET_RATE_PARENT, 0x20, 15, 0, },
+ { HI3670_CLK_GATE_UART0, "clk_gate_uart0", "clk_mux_uart0",
+ CLK_SET_RATE_PARENT, 0x20, 10, 0, },
+ { HI3670_CLK_GATE_I2C3, "clk_gate_i2c3", "clk_mux_i2c",
+ CLK_SET_RATE_PARENT, 0x20, 7, 0, },
+ { HI3670_CLK_GATE_I2C4, "clk_gate_i2c4", "clk_mux_i2c",
+ CLK_SET_RATE_PARENT, 0x20, 27, 0, },
+ { HI3670_CLK_GATE_I2C7, "clk_gate_i2c7", "clk_mux_i2c",
+ CLK_SET_RATE_PARENT, 0x10, 31, 0, },
+ { HI3670_PCLK_GATE_I2C3, "pclk_gate_i2c3", "clk_mux_i2c",
+ CLK_SET_RATE_PARENT, 0x20, 7, 0, },
+ { HI3670_PCLK_GATE_I2C4, "pclk_gate_i2c4", "clk_mux_i2c",
+ CLK_SET_RATE_PARENT, 0x20, 27, 0, },
+ { HI3670_PCLK_GATE_I2C7, "pclk_gate_i2c7", "clk_mux_i2c",
+ CLK_SET_RATE_PARENT, 0x10, 31, 0, },
+ { HI3670_CLK_GATE_SPI1, "clk_gate_spi1", "clk_mux_spi",
+ CLK_SET_RATE_PARENT, 0x20, 9, 0, },
+ { HI3670_CLK_GATE_SPI4, "clk_gate_spi4", "clk_mux_spi",
+ CLK_SET_RATE_PARENT, 0x40, 4, 0, },
+ { HI3670_PCLK_GATE_SPI1, "pclk_gate_spi1", "clk_mux_spi",
+ CLK_SET_RATE_PARENT, 0x20, 9, 0, },
+ { HI3670_PCLK_GATE_SPI4, "pclk_gate_spi4", "clk_mux_spi",
+ CLK_SET_RATE_PARENT, 0x40, 4, 0, },
+ { HI3670_CLK_GATE_USB3OTG_REF, "clk_gate_usb3otg_ref", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x40, 0, 0, },
+ { HI3670_CLK_GATE_USB2PHY_REF, "clk_gate_usb2phy_ref", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x410, 19, 0, },
+ { HI3670_CLK_GATE_PCIEAUX, "clk_gate_pcieaux", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x420, 8, 0, },
+ { HI3670_ACLK_GATE_PCIE, "aclk_gate_pcie", "clk_gate_mmc1_pcieaxi",
+ CLK_SET_RATE_PARENT, 0x420, 5, 0, },
+ { HI3670_CLK_GATE_MMC1_PCIEAXI, "clk_gate_mmc1_pcieaxi", "clk_div_pcieaxi",
+ CLK_SET_RATE_PARENT, 0x050, 4, 0, },
+ { HI3670_CLK_GATE_PCIEPHY_REF, "clk_gate_pciephy_ref", "clk_ppll_pcie",
+ CLK_SET_RATE_PARENT, 0x470, 14, 0, },
+ { HI3670_CLK_GATE_PCIE_DEBOUNCE, "clk_gate_pcie_debounce", "clk_ppll_pcie",
+ CLK_SET_RATE_PARENT, 0x470, 12, 0, },
+ { HI3670_CLK_GATE_PCIEIO, "clk_gate_pcieio", "clk_ppll_pcie",
+ CLK_SET_RATE_PARENT, 0x470, 13, 0, },
+ { HI3670_CLK_GATE_PCIE_HP, "clk_gate_pcie_hp", "clk_ppll_pcie",
+ CLK_SET_RATE_PARENT, 0x470, 15, 0, },
+ { HI3670_CLK_GATE_AO_ASP, "clk_gate_ao_asp", "clk_div_ao_asp",
+ CLK_SET_RATE_PARENT, 0x0, 26, 0, },
+ { HI3670_PCLK_GATE_PCTRL, "pclk_gate_pctrl", "clk_div_ptp",
+ CLK_SET_RATE_PARENT, 0x20, 31, 0, },
+ { HI3670_CLK_CSI_TRANS_GT, "clk_csi_trans_gt", "clk_div_csi_trans",
+ CLK_SET_RATE_PARENT, 0x30, 24, 0, },
+ { HI3670_CLK_DSI_TRANS_GT, "clk_dsi_trans_gt", "clk_div_dsi_trans",
+ CLK_SET_RATE_PARENT, 0x30, 25, 0, },
+ { HI3670_CLK_GATE_PWM, "clk_gate_pwm", "clk_div_ptp",
+ CLK_SET_RATE_PARENT, 0x20, 0, 0, },
+ { HI3670_ABB_AUDIO_EN0, "abb_audio_en0", "clk_gate_abb_192",
+ CLK_SET_RATE_PARENT, 0x30, 8, 0, },
+ { HI3670_ABB_AUDIO_EN1, "abb_audio_en1", "clk_gate_abb_192",
+ CLK_SET_RATE_PARENT, 0x30, 9, 0, },
+ { HI3670_ABB_AUDIO_GT_EN0, "abb_audio_gt_en0", "abb_audio_en0",
+ CLK_SET_RATE_PARENT, 0x30, 19, 0, },
+ { HI3670_ABB_AUDIO_GT_EN1, "abb_audio_gt_en1", "abb_audio_en1",
+ CLK_SET_RATE_PARENT, 0x40, 20, 0, },
+ { HI3670_CLK_GATE_DP_AUDIO_PLL_AO, "clk_gate_dp_audio_pll_ao", "clkdiv_dp_audio_pll_ao",
+ CLK_SET_RATE_PARENT, 0x00, 13, 0, },
+ { HI3670_PERI_VOLT_HOLD, "peri_volt_hold", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0, 1, 0, },
+ { HI3670_PERI_VOLT_MIDDLE, "peri_volt_middle", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0, 1, 0, },
+ { HI3670_CLK_GATE_ISP_SNCLK0, "clk_gate_isp_snclk0", "clk_isp_snclk_mux0",
+ CLK_SET_RATE_PARENT, 0x50, 16, 0, },
+ { HI3670_CLK_GATE_ISP_SNCLK1, "clk_gate_isp_snclk1", "clk_isp_snclk_mux1",
+ CLK_SET_RATE_PARENT, 0x50, 17, 0, },
+ { HI3670_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2", "clk_isp_snclk_mux2",
+ CLK_SET_RATE_PARENT, 0x50, 18, 0, },
+ { HI3670_CLK_GATE_RXDPHY0_CFG, "clk_gate_rxdphy0_cfg", "clk_mux_rxdphy_cfg",
+ CLK_SET_RATE_PARENT, 0x030, 20, 0, },
+ { HI3670_CLK_GATE_RXDPHY1_CFG, "clk_gate_rxdphy1_cfg", "clk_mux_rxdphy_cfg",
+ CLK_SET_RATE_PARENT, 0x030, 21, 0, },
+ { HI3670_CLK_GATE_RXDPHY2_CFG, "clk_gate_rxdphy2_cfg", "clk_mux_rxdphy_cfg",
+ CLK_SET_RATE_PARENT, 0x030, 22, 0, },
+ { HI3670_CLK_GATE_TXDPHY0_CFG, "clk_gate_txdphy0_cfg", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x030, 28, 0, },
+ { HI3670_CLK_GATE_TXDPHY0_REF, "clk_gate_txdphy0_ref", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x030, 29, 0, },
+ { HI3670_CLK_GATE_TXDPHY1_CFG, "clk_gate_txdphy1_cfg", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x030, 30, 0, },
+ { HI3670_CLK_GATE_TXDPHY1_REF, "clk_gate_txdphy1_ref", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x030, 31, 0, },
+ { HI3670_CLK_GATE_MEDIA_TCXO, "clk_gate_media_tcxo", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x40, 6, 0, },
+};
+
+static const struct hisi_gate_clock hi3670_crgctrl_gate_clks[] = {
+ { HI3670_AUTODIV_SYSBUS, "autodiv_sysbus", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x404, 5, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_AUTODIV_EMMC0BUS, "autodiv_emmc0bus", "autodiv_sysbus",
+ CLK_SET_RATE_PARENT, 0x404, 1, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_PCLK_ANDGT_MMC1_PCIE, "pclk_andgt_mmc1_pcie", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xf8, 13, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_GATE_VCODECBUS_GT, "clk_gate_vcodecbus_gt", "clk_mux_vcodecbus",
+ CLK_SET_RATE_PARENT, 0x0F0, 8, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_SD, "clk_andgt_sd", "clk_mux_sd_pll",
+ CLK_SET_RATE_PARENT, 0xF4, 3, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_SD_SYS_GT, "clk_sd_sys_gt", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0xF4, 5, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_SDIO, "clk_andgt_sdio", "clk_mux_sdio_pll",
+ CLK_SET_RATE_PARENT, 0xF4, 8, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_SDIO_SYS_GT, "clk_sdio_sys_gt", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0xF4, 6, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_A53HPM_ANDGT, "clk_a53hpm_andgt", "clk_mux_a53hpm",
+ CLK_SET_RATE_PARENT, 0x0F4, 7, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_320M_PLL_GT, "clk_320m_pll_gt", "clk_mux_320m",
+ CLK_SET_RATE_PARENT, 0xF8, 10, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_UARTH, "clk_andgt_uarth", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xF4, 11, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_UARTL, "clk_andgt_uartl", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xF4, 10, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_UART0, "clk_andgt_uart0", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xF4, 9, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_SPI, "clk_andgt_spi", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xF4, 13, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_PCIEAXI, "clk_andgt_pcieaxi", "clk_mux_pcieaxi",
+ CLK_SET_RATE_PARENT, 0xfc, 15, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_AO_ASP_GT, "clk_div_ao_asp_gt", "clk_mux_ao_asp",
+ CLK_SET_RATE_PARENT, 0xF4, 4, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_GATE_CSI_TRANS, "clk_gate_csi_trans", "clk_ppll2",
+ CLK_SET_RATE_PARENT, 0xF4, 14, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_GATE_DSI_TRANS, "clk_gate_dsi_trans", "clk_ppll2",
+ CLK_SET_RATE_PARENT, 0xF4, 1, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_PTP, "clk_andgt_ptp", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xF8, 5, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_OUT0, "clk_andgt_out0", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0xF0, 10, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_OUT1, "clk_andgt_out1", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0xF0, 11, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLKGT_DP_AUDIO_PLL_AO, "clkgt_dp_audio_pll_ao", "clk_ppll6",
+ CLK_SET_RATE_PARENT, 0xF8, 15, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_VDEC, "clk_andgt_vdec", "clk_mux_vdec",
+ CLK_SET_RATE_PARENT, 0xF0, 13, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_VENC, "clk_andgt_venc", "clk_mux_venc",
+ CLK_SET_RATE_PARENT, 0xF0, 9, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ISP_SNCLK_ANGT, "clk_isp_snclk_angt", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x108, 2, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_RXDPHY, "clk_andgt_rxdphy", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x0F0, 12, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_ICS, "clk_andgt_ics", "clk_mux_ics",
+ CLK_SET_RATE_PARENT, 0xf0, 14, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_AUTODIV_DMABUS, "autodiv_dmabus", "autodiv_sysbus",
+ CLK_SET_RATE_PARENT, 0x404, 3, CLK_GATE_HIWORD_MASK, 0, },
+};
+
+static const char *const
+clk_mux_sysbus_p[] = { "clk_ppll1", "clk_ppll0", };
+static const char *const
+clk_mux_vcodecbus_p[] = { "clk_invalid", "clk_ppll4", "clk_ppll0",
+ "clk_invalid", "clk_ppll2", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_ppll3",
+ "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", };
+static const char *const
+clk_mux_sd_sys_p[] = { "clk_sd_sys", "clk_div_sd", };
+static const char *const
+clk_mux_sd_pll_p[] = { "clk_ppll0", "clk_ppll3", "clk_ppll2", "clk_ppll2", };
+static const char *const
+clk_mux_sdio_sys_p[] = { "clk_sdio_sys", "clk_div_sdio", };
+static const char *const
+clk_mux_sdio_pll_p[] = { "clk_ppll0", "clk_ppll3", "clk_ppll2", "clk_ppll2", };
+static const char *const
+clk_mux_a53hpm_p[] = { "clk_ppll0", "clk_ppll2", };
+static const char *const
+clk_mux_320m_p[] = { "clk_ppll2", "clk_ppll0", };
+static const char *const
+clk_mux_uarth_p[] = { "clkin_sys", "clk_div_uarth", };
+static const char *const
+clk_mux_uartl_p[] = { "clkin_sys", "clk_div_uartl", };
+static const char *const
+clk_mux_uart0_p[] = { "clkin_sys", "clk_div_uart0", };
+static const char *const
+clk_mux_i2c_p[] = { "clkin_sys", "clk_div_i2c", };
+static const char *const
+clk_mux_spi_p[] = { "clkin_sys", "clk_div_spi", };
+static const char *const
+clk_mux_pcieaxi_p[] = { "clkin_sys", "clk_ppll0", };
+static const char *const
+clk_mux_ao_asp_p[] = { "clk_ppll2", "clk_ppll3", };
+static const char *const
+clk_mux_vdec_p[] = { "clk_invalid", "clk_ppll4", "clk_ppll0", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", };
+static const char *const
+clk_mux_venc_p[] = { "clk_invalid", "clk_ppll4", "clk_ppll0", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", };
+static const char *const
+clk_isp_snclk_mux0_p[] = { "clkin_sys", "clk_isp_snclk_div0", };
+static const char *const
+clk_isp_snclk_mux1_p[] = { "clkin_sys", "clk_isp_snclk_div1", };
+static const char *const
+clk_isp_snclk_mux2_p[] = { "clkin_sys", "clk_isp_snclk_div2", };
+static const char *const
+clk_mux_rxdphy_cfg_p[] = { "clk_factor_rxdphy", "clkin_sys", };
+static const char *const
+clk_mux_ics_p[] = { "clk_invalid", "clk_ppll4", "clk_ppll0", "clk_invalid",
+ "clk_ppll2", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_ppll3", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", };
+
+static const struct hisi_mux_clock hi3670_crgctrl_mux_clks[] = {
+ { HI3670_CLK_MUX_SYSBUS, "clk_mux_sysbus", clk_mux_sysbus_p,
+ ARRAY_SIZE(clk_mux_sysbus_p), CLK_SET_RATE_PARENT,
+ 0xAC, 0, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_VCODECBUS, "clk_mux_vcodecbus", clk_mux_vcodecbus_p,
+ ARRAY_SIZE(clk_mux_vcodecbus_p), CLK_SET_RATE_PARENT,
+ 0x0C8, 0, 4, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_SD_SYS, "clk_mux_sd_sys", clk_mux_sd_sys_p,
+ ARRAY_SIZE(clk_mux_sd_sys_p), CLK_SET_RATE_PARENT,
+ 0x0B8, 6, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_SD_PLL, "clk_mux_sd_pll", clk_mux_sd_pll_p,
+ ARRAY_SIZE(clk_mux_sd_pll_p), CLK_SET_RATE_PARENT,
+ 0x0B8, 4, 2, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_SDIO_SYS, "clk_mux_sdio_sys", clk_mux_sdio_sys_p,
+ ARRAY_SIZE(clk_mux_sdio_sys_p), CLK_SET_RATE_PARENT,
+ 0x0C0, 6, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_SDIO_PLL, "clk_mux_sdio_pll", clk_mux_sdio_pll_p,
+ ARRAY_SIZE(clk_mux_sdio_pll_p), CLK_SET_RATE_PARENT,
+ 0x0C0, 4, 2, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_A53HPM, "clk_mux_a53hpm", clk_mux_a53hpm_p,
+ ARRAY_SIZE(clk_mux_a53hpm_p), CLK_SET_RATE_PARENT,
+ 0x0D4, 9, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_320M, "clk_mux_320m", clk_mux_320m_p,
+ ARRAY_SIZE(clk_mux_320m_p), CLK_SET_RATE_PARENT,
+ 0x100, 0, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_UARTH, "clk_mux_uarth", clk_mux_uarth_p,
+ ARRAY_SIZE(clk_mux_uarth_p), CLK_SET_RATE_PARENT,
+ 0xAC, 4, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_UARTL, "clk_mux_uartl", clk_mux_uartl_p,
+ ARRAY_SIZE(clk_mux_uartl_p), CLK_SET_RATE_PARENT,
+ 0xAC, 3, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_UART0, "clk_mux_uart0", clk_mux_uart0_p,
+ ARRAY_SIZE(clk_mux_uart0_p), CLK_SET_RATE_PARENT,
+ 0xAC, 2, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_I2C, "clk_mux_i2c", clk_mux_i2c_p,
+ ARRAY_SIZE(clk_mux_i2c_p), CLK_SET_RATE_PARENT,
+ 0xAC, 13, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_SPI, "clk_mux_spi", clk_mux_spi_p,
+ ARRAY_SIZE(clk_mux_spi_p), CLK_SET_RATE_PARENT,
+ 0xAC, 8, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_PCIEAXI, "clk_mux_pcieaxi", clk_mux_pcieaxi_p,
+ ARRAY_SIZE(clk_mux_pcieaxi_p), CLK_SET_RATE_PARENT,
+ 0xb4, 5, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_AO_ASP, "clk_mux_ao_asp", clk_mux_ao_asp_p,
+ ARRAY_SIZE(clk_mux_ao_asp_p), CLK_SET_RATE_PARENT,
+ 0x100, 6, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_VDEC, "clk_mux_vdec", clk_mux_vdec_p,
+ ARRAY_SIZE(clk_mux_vdec_p), CLK_SET_RATE_PARENT,
+ 0xC8, 8, 4, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_VENC, "clk_mux_venc", clk_mux_venc_p,
+ ARRAY_SIZE(clk_mux_venc_p), CLK_SET_RATE_PARENT,
+ 0xC8, 4, 4, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_ISP_SNCLK_MUX0, "clk_isp_snclk_mux0", clk_isp_snclk_mux0_p,
+ ARRAY_SIZE(clk_isp_snclk_mux0_p), CLK_SET_RATE_PARENT,
+ 0x108, 3, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_ISP_SNCLK_MUX1, "clk_isp_snclk_mux1", clk_isp_snclk_mux1_p,
+ ARRAY_SIZE(clk_isp_snclk_mux1_p), CLK_SET_RATE_PARENT,
+ 0x10C, 13, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_ISP_SNCLK_MUX2, "clk_isp_snclk_mux2", clk_isp_snclk_mux2_p,
+ ARRAY_SIZE(clk_isp_snclk_mux2_p), CLK_SET_RATE_PARENT,
+ 0x10C, 10, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_RXDPHY_CFG, "clk_mux_rxdphy_cfg", clk_mux_rxdphy_cfg_p,
+ ARRAY_SIZE(clk_mux_rxdphy_cfg_p), CLK_SET_RATE_PARENT,
+ 0x0C4, 8, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_ICS, "clk_mux_ics", clk_mux_ics_p,
+ ARRAY_SIZE(clk_mux_ics_p), CLK_SET_RATE_PARENT,
+ 0xc8, 12, 4, CLK_MUX_HIWORD_MASK, },
+};
+
+static const struct hisi_divider_clock hi3670_crgctrl_divider_clks[] = {
+ { HI3670_CLK_DIV_CFGBUS, "clk_div_cfgbus", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0xEC, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_MMC0BUS, "clk_div_mmc0bus", "autodiv_emmc0bus",
+ CLK_SET_RATE_PARENT, 0x0EC, 2, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_MMC1BUS, "clk_div_mmc1bus", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x0EC, 3, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_PCLK_DIV_MMC1_PCIE, "pclk_div_mmc1_pcie", "pclk_andgt_mmc1_pcie",
+ CLK_SET_RATE_PARENT, 0xb4, 6, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_VCODECBUS, "clk_div_vcodecbus", "clk_gate_vcodecbus_gt",
+ CLK_SET_RATE_PARENT, 0x0BC, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_SD, "clk_div_sd", "clk_andgt_sd",
+ CLK_SET_RATE_PARENT, 0xB8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_SDIO, "clk_div_sdio", "clk_andgt_sdio",
+ CLK_SET_RATE_PARENT, 0xC0, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_UARTH, "clk_div_uarth", "clk_andgt_uarth",
+ CLK_SET_RATE_PARENT, 0xB0, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_UARTL, "clk_div_uartl", "clk_andgt_uartl",
+ CLK_SET_RATE_PARENT, 0xB0, 8, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_UART0, "clk_div_uart0", "clk_andgt_uart0",
+ CLK_SET_RATE_PARENT, 0xB0, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_I2C, "clk_div_i2c", "clk_div_320m",
+ CLK_SET_RATE_PARENT, 0xE8, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_SPI, "clk_div_spi", "clk_andgt_spi",
+ CLK_SET_RATE_PARENT, 0xC4, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_PCIEAXI, "clk_div_pcieaxi", "clk_andgt_pcieaxi",
+ CLK_SET_RATE_PARENT, 0xb4, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_AO_ASP, "clk_div_ao_asp", "clk_div_ao_asp_gt",
+ CLK_SET_RATE_PARENT, 0x108, 6, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_CSI_TRANS, "clk_div_csi_trans", "clk_gate_csi_trans",
+ CLK_SET_RATE_PARENT, 0xD4, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_DSI_TRANS, "clk_div_dsi_trans", "clk_gate_dsi_trans",
+ CLK_SET_RATE_PARENT, 0xD4, 10, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_PTP, "clk_div_ptp", "clk_andgt_ptp",
+ CLK_SET_RATE_PARENT, 0xD8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_CLKOUT0_PLL, "clk_div_clkout0_pll", "clk_andgt_out0",
+ CLK_SET_RATE_PARENT, 0xe0, 4, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_CLKOUT1_PLL, "clk_div_clkout1_pll", "clk_andgt_out1",
+ CLK_SET_RATE_PARENT, 0xe0, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLKDIV_DP_AUDIO_PLL_AO, "clkdiv_dp_audio_pll_ao", "clkgt_dp_audio_pll_ao",
+ CLK_SET_RATE_PARENT, 0xBC, 11, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_VDEC, "clk_div_vdec", "clk_andgt_vdec",
+ CLK_SET_RATE_PARENT, 0xC4, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_VENC, "clk_div_venc", "clk_andgt_venc",
+ CLK_SET_RATE_PARENT, 0xC0, 8, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_ISP_SNCLK_DIV0, "clk_isp_snclk_div0", "clk_isp_snclk_fac",
+ CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_ISP_SNCLK_DIV1, "clk_isp_snclk_div1", "clk_isp_snclk_fac",
+ CLK_SET_RATE_PARENT, 0x10C, 14, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_ISP_SNCLK_DIV2, "clk_isp_snclk_div2", "clk_isp_snclk_fac",
+ CLK_SET_RATE_PARENT, 0x10C, 11, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_ICS, "clk_div_ics", "clk_andgt_ics",
+ CLK_SET_RATE_PARENT, 0xE4, 9, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+};
+
+/* clk_pmuctrl */
+static const struct hisi_gate_clock hi3670_pmu_gate_clks[] = {
+ { HI3670_GATE_ABB_192, "clk_gate_abb_192", "clkin_sys",
+ CLK_SET_RATE_PARENT, (0x037 << 2), 0, 0, },
+};
+
+/* clk_pctrl */
+static const struct hisi_gate_clock hi3670_pctrl_gate_clks[] = {
+ { HI3670_GATE_UFS_TCXO_EN, "clk_gate_ufs_tcxo_en", "clk_gate_abb_192",
+ CLK_SET_RATE_PARENT, 0x10, 0, CLK_GATE_HIWORD_MASK, },
+ { HI3670_GATE_USB_TCXO_EN, "clk_gate_usb_tcxo_en", "clk_gate_abb_192",
+ CLK_SET_RATE_PARENT, 0x10, 1, CLK_GATE_HIWORD_MASK, },
+};
+
+/* clk_sctrl */
+static const struct hisi_gate_clock hi3670_sctrl_gate_sep_clks[] = {
+ { HI3670_PPLL0_EN_ACPU, "ppll0_en_acpu", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x190, 26, 0, },
+ { HI3670_PPLL0_GT_CPU, "ppll0_gt_cpu", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x190, 15, 0, },
+ { HI3670_CLK_GATE_PPLL0_MEDIA, "clk_gate_ppll0_media", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x1b0, 6, 0, },
+ { HI3670_PCLK_GPIO18, "pclk_gpio18", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x1B0, 9, 0, },
+ { HI3670_PCLK_GPIO19, "pclk_gpio19", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x1B0, 8, 0, },
+ { HI3670_CLK_GATE_SPI, "clk_gate_spi", "clk_div_ioperi",
+ CLK_SET_RATE_PARENT, 0x1B0, 10, 0, },
+ { HI3670_PCLK_GATE_SPI, "pclk_gate_spi", "clk_div_ioperi",
+ CLK_SET_RATE_PARENT, 0x1B0, 10, 0, },
+ { HI3670_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_ufs_subsys",
+ CLK_SET_RATE_PARENT, 0x1B0, 14, 0, },
+ { HI3670_CLK_GATE_UFSIO_REF, "clk_gate_ufsio_ref", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x1b0, 12, 0, },
+ { HI3670_PCLK_AO_GPIO0, "pclk_ao_gpio0", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 11, 0, },
+ { HI3670_PCLK_AO_GPIO1, "pclk_ao_gpio1", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 12, 0, },
+ { HI3670_PCLK_AO_GPIO2, "pclk_ao_gpio2", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 13, 0, },
+ { HI3670_PCLK_AO_GPIO3, "pclk_ao_gpio3", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 14, 0, },
+ { HI3670_PCLK_AO_GPIO4, "pclk_ao_gpio4", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 21, 0, },
+ { HI3670_PCLK_AO_GPIO5, "pclk_ao_gpio5", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 22, 0, },
+ { HI3670_PCLK_AO_GPIO6, "pclk_ao_gpio6", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 25, 0, },
+ { HI3670_CLK_GATE_OUT0, "clk_gate_out0", "clk_mux_clkout0",
+ CLK_SET_RATE_PARENT, 0x160, 16, 0, },
+ { HI3670_CLK_GATE_OUT1, "clk_gate_out1", "clk_mux_clkout1",
+ CLK_SET_RATE_PARENT, 0x160, 17, 0, },
+ { HI3670_PCLK_GATE_SYSCNT, "pclk_gate_syscnt", "clk_div_aobus",
+ CLK_SET_RATE_PARENT, 0x160, 19, 0, },
+ { HI3670_CLK_GATE_SYSCNT, "clk_gate_syscnt", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x160, 20, 0, },
+ { HI3670_CLK_GATE_ASP_SUBSYS_PERI, "clk_gate_asp_subsys_peri",
+ "clk_mux_asp_subsys_peri",
+ CLK_SET_RATE_PARENT, 0x170, 6, 0, },
+ { HI3670_CLK_GATE_ASP_SUBSYS, "clk_gate_asp_subsys", "clk_mux_asp_pll",
+ CLK_SET_RATE_PARENT, 0x170, 4, 0, },
+ { HI3670_CLK_GATE_ASP_TCXO, "clk_gate_asp_tcxo", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x160, 27, 0, },
+ { HI3670_CLK_GATE_DP_AUDIO_PLL, "clk_gate_dp_audio_pll",
+ "clk_gate_dp_audio_pll_ao",
+ CLK_SET_RATE_PARENT, 0x1B0, 7, 0, },
+};
+
+static const struct hisi_gate_clock hi3670_sctrl_gate_clks[] = {
+ { HI3670_CLK_ANDGT_IOPERI, "clk_andgt_ioperi", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x270, 6, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLKANDGT_ASP_SUBSYS_PERI, "clkandgt_asp_subsys_peri",
+ "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x268, 3, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANGT_ASP_SUBSYS, "clk_angt_asp_subsys", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x258, 0, CLK_GATE_HIWORD_MASK, 0, },
+};
+
+static const char *const
+clk_mux_ufs_subsys_p[] = { "clkin_sys", "clk_ppll0", };
+static const char *const
+clk_mux_clkout0_p[] = { "clkin_ref", "clk_div_clkout0_tcxo",
+ "clk_div_clkout0_pll", "clk_div_clkout0_pll", };
+static const char *const
+clk_mux_clkout1_p[] = { "clkin_ref", "clk_div_clkout1_tcxo",
+ "clk_div_clkout1_pll", "clk_div_clkout1_pll", };
+static const char *const
+clk_mux_asp_subsys_peri_p[] = { "clk_ppll0", "clk_fll_src", };
+static const char *const
+clk_mux_asp_pll_p[] = { "clk_ppll0", "clk_fll_src", "clk_gate_ao_asp",
+ "clk_pciepll_rev", };
+
+static const struct hisi_mux_clock hi3670_sctrl_mux_clks[] = {
+ { HI3670_CLK_MUX_UFS_SUBSYS, "clk_mux_ufs_subsys", clk_mux_ufs_subsys_p,
+ ARRAY_SIZE(clk_mux_ufs_subsys_p), CLK_SET_RATE_PARENT,
+ 0x274, 8, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_CLKOUT0, "clk_mux_clkout0", clk_mux_clkout0_p,
+ ARRAY_SIZE(clk_mux_clkout0_p), CLK_SET_RATE_PARENT,
+ 0x254, 12, 2, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_CLKOUT1, "clk_mux_clkout1", clk_mux_clkout1_p,
+ ARRAY_SIZE(clk_mux_clkout1_p), CLK_SET_RATE_PARENT,
+ 0x254, 14, 2, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_ASP_SUBSYS_PERI, "clk_mux_asp_subsys_peri",
+ clk_mux_asp_subsys_peri_p, ARRAY_SIZE(clk_mux_asp_subsys_peri_p),
+ CLK_SET_RATE_PARENT, 0x268, 8, 1, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_ASP_PLL, "clk_mux_asp_pll", clk_mux_asp_pll_p,
+ ARRAY_SIZE(clk_mux_asp_pll_p), CLK_SET_RATE_PARENT,
+ 0x268, 9, 2, CLK_MUX_HIWORD_MASK, },
+};
+
+static const struct hisi_divider_clock hi3670_sctrl_divider_clks[] = {
+ { HI3670_CLK_DIV_AOBUS, "clk_div_aobus", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_UFS_SUBSYS, "clk_div_ufs_subsys", "clk_mux_ufs_subsys",
+ CLK_SET_RATE_PARENT, 0x274, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_IOPERI, "clk_div_ioperi", "clk_andgt_ioperi",
+ CLK_SET_RATE_PARENT, 0x270, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_CLKOUT0_TCXO, "clk_div_clkout0_tcxo", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x254, 6, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_CLKOUT1_TCXO, "clk_div_clkout1_tcxo", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x254, 9, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_ASP_SUBSYS_PERI_DIV, "clk_asp_subsys_peri_div", "clkandgt_asp_subsys_peri",
+ CLK_SET_RATE_PARENT, 0x268, 0, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_ASP_SUBSYS, "clk_div_asp_subsys", "clk_angt_asp_subsys",
+ CLK_SET_RATE_PARENT, 0x250, 0, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+};
+
+/* clk_iomcu */
+static const struct hisi_fixed_factor_clock hi3670_iomcu_fixed_factor_clks[] = {
+ { HI3670_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_gate_iomcu", 1, 4, 0, },
+ { HI3670_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_gate_iomcu", 1, 4, 0, },
+ { HI3670_CLK_GATE_I2C2, "clk_gate_i2c2", "clk_i2c2_gate_iomcu", 1, 4, 0, },
+ { HI3670_CLK_GATE_SPI0, "clk_gate_spi0", "clk_spi0_gate_iomcu", 1, 1, 0, },
+ { HI3670_CLK_GATE_SPI2, "clk_gate_spi2", "clk_spi2_gate_iomcu", 1, 1, 0, },
+ { HI3670_CLK_GATE_UART3, "clk_gate_uart3", "clk_uart3_gate_iomcu", 1, 16, 0, },
+};
+
+static const struct hisi_gate_clock hi3670_iomcu_gate_sep_clks[] = {
+ { HI3670_CLK_I2C0_GATE_IOMCU, "clk_i2c0_gate_iomcu", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x10, 3, 0, },
+ { HI3670_CLK_I2C1_GATE_IOMCU, "clk_i2c1_gate_iomcu", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x10, 4, 0, },
+ { HI3670_CLK_I2C2_GATE_IOMCU, "clk_i2c2_gate_iomcu", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x10, 5, 0, },
+ { HI3670_CLK_SPI0_GATE_IOMCU, "clk_spi0_gate_iomcu", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x10, 10, 0, },
+ { HI3670_CLK_SPI2_GATE_IOMCU, "clk_spi2_gate_iomcu", "clk_fll_src",
+ CLK_SET_RATE_PARENT, 0x10, 30, 0, },
+ { HI3670_CLK_UART3_GATE_IOMCU, "clk_uart3_gate_iomcu", "clk_gate_iomcu_peri0",
+ CLK_SET_RATE_PARENT, 0x10, 11, 0, },
+ { HI3670_CLK_GATE_PERI0_IOMCU, "clk_gate_iomcu_peri0", "clk_ppll0",
+ CLK_SET_RATE_PARENT, 0x90, 0, 0, },
+};
+
+/* clk_media1 */
+static const struct hisi_gate_clock hi3670_media1_gate_sep_clks[] = {
+ { HI3670_ACLK_GATE_NOC_DSS, "aclk_gate_noc_dss", "aclk_gate_disp_noc_subsys",
+ CLK_SET_RATE_PARENT, 0x10, 21, 0, },
+ { HI3670_PCLK_GATE_NOC_DSS_CFG, "pclk_gate_noc_dss_cfg", "pclk_gate_disp_noc_subsys",
+ CLK_SET_RATE_PARENT, 0x10, 22, 0, },
+ { HI3670_PCLK_GATE_MMBUF_CFG, "pclk_gate_mmbuf_cfg", "pclk_gate_disp_noc_subsys",
+ CLK_SET_RATE_PARENT, 0x20, 5, 0, },
+ { HI3670_PCLK_GATE_DISP_NOC_SUBSYS, "pclk_gate_disp_noc_subsys", "clk_div_sysbus",
+ CLK_SET_RATE_PARENT, 0x10, 18, 0, },
+ { HI3670_ACLK_GATE_DISP_NOC_SUBSYS, "aclk_gate_disp_noc_subsys", "clk_gate_vivobusfreq",
+ CLK_SET_RATE_PARENT, 0x10, 17, 0, },
+ { HI3670_PCLK_GATE_DSS, "pclk_gate_dss", "pclk_gate_disp_noc_subsys",
+ CLK_SET_RATE_PARENT, 0x00, 14, 0, },
+ { HI3670_ACLK_GATE_DSS, "aclk_gate_dss", "aclk_gate_disp_noc_subsys",
+ CLK_SET_RATE_PARENT, 0x00, 19, 0, },
+ { HI3670_CLK_GATE_VIVOBUSFREQ, "clk_gate_vivobusfreq", "clk_div_vivobus",
+ CLK_SET_RATE_PARENT, 0x00, 18, 0, },
+ { HI3670_CLK_GATE_EDC0, "clk_gate_edc0", "clk_div_edc0",
+ CLK_SET_RATE_PARENT, 0x00, 15, 0, },
+ { HI3670_CLK_GATE_LDI0, "clk_gate_ldi0", "clk_div_ldi0",
+ CLK_SET_RATE_PARENT, 0x00, 16, 0, },
+ { HI3670_CLK_GATE_LDI1FREQ, "clk_gate_ldi1freq", "clk_div_ldi1",
+ CLK_SET_RATE_PARENT, 0x00, 17, 0, },
+ { HI3670_CLK_GATE_BRG, "clk_gate_brg", "clk_media_common_div",
+ CLK_SET_RATE_PARENT, 0x00, 29, 0, },
+ { HI3670_ACLK_GATE_ASC, "aclk_gate_asc", "clk_gate_mmbuf",
+ CLK_SET_RATE_PARENT, 0x20, 3, 0, },
+ { HI3670_CLK_GATE_DSS_AXI_MM, "clk_gate_dss_axi_mm", "clk_gate_mmbuf",
+ CLK_SET_RATE_PARENT, 0x20, 4, 0, },
+ { HI3670_CLK_GATE_MMBUF, "clk_gate_mmbuf", "aclk_div_mmbuf",
+ CLK_SET_RATE_PARENT, 0x20, 0, 0, },
+ { HI3670_PCLK_GATE_MMBUF, "pclk_gate_mmbuf", "pclk_div_mmbuf",
+ CLK_SET_RATE_PARENT, 0x20, 1, 0, },
+ { HI3670_CLK_GATE_ATDIV_VIVO, "clk_gate_atdiv_vivo", "clk_div_vivobus",
+ CLK_SET_RATE_PARENT, 0x010, 1, 0, },
+};
+
+static const struct hisi_gate_clock hi3670_media1_gate_clks[] = {
+ { HI3670_CLK_GATE_VIVOBUS_ANDGT, "clk_gate_vivobus_andgt", "clk_mux_vivobus",
+ CLK_SET_RATE_PARENT, 0x84, 3, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_EDC0, "clk_andgt_edc0", "clk_mux_edc0",
+ CLK_SET_RATE_PARENT, 0x84, 7, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_LDI0, "clk_andgt_ldi0", "clk_mux_ldi0",
+ CLK_SET_RATE_PARENT, 0x84, 9, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_ANDGT_LDI1, "clk_andgt_ldi1", "clk_mux_ldi1",
+ CLK_SET_RATE_PARENT, 0x84, 8, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_CLK_MMBUF_PLL_ANDGT, "clk_mmbuf_pll_andgt", "clk_sw_mmbuf",
+ CLK_SET_RATE_PARENT, 0x84, 14, CLK_GATE_HIWORD_MASK, 0, },
+ { HI3670_PCLK_MMBUF_ANDGT, "pclk_mmbuf_andgt", "aclk_div_mmbuf",
+ CLK_SET_RATE_PARENT, 0x84, 15, CLK_GATE_HIWORD_MASK, 0, },
+};
+
+static const char *const
+clk_mux_vivobus_p[] = { "clk_invalid", "clk_invalid", "clk_gate_ppll0_media",
+ "clk_invalid", "clk_gate_ppll2_media", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_gate_ppll3_media",
+ "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", };
+static const char *const
+clk_mux_edc0_p[] = { "clk_invalid", "clk_invalid", "clk_gate_ppll0_media",
+ "clk_invalid", "clk_gate_ppll2_media", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_gate_ppll3_media",
+ "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid", };
+static const char *const
+clk_mux_ldi0_p[] = { "clk_invalid", "clk_gate_ppll7_media",
+ "clk_gate_ppll0_media", "clk_invalid",
+ "clk_gate_ppll2_media", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_gate_ppll3_media", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", };
+static const char *const
+clk_mux_ldi1_p[] = { "clk_invalid", "clk_gate_ppll7_media",
+ "clk_gate_ppll0_media", "clk_invalid",
+ "clk_gate_ppll2_media", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_gate_ppll3_media", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", };
+static const char *const
+clk_sw_mmbuf_p[] = { "clk_invalid", "clk_invalid", "clk_gate_ppll0_media",
+ "clk_invalid", "clk_gate_ppll2_media", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_gate_ppll3_media",
+ "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid",
+ "clk_invalid", "clk_invalid", "clk_invalid", };
+
+static const struct hisi_mux_clock hi3670_media1_mux_clks[] = {
+ { HI3670_CLK_MUX_VIVOBUS, "clk_mux_vivobus", clk_mux_vivobus_p,
+ ARRAY_SIZE(clk_mux_vivobus_p), CLK_SET_RATE_PARENT,
+ 0x74, 6, 4, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_EDC0, "clk_mux_edc0", clk_mux_edc0_p,
+ ARRAY_SIZE(clk_mux_edc0_p), CLK_SET_RATE_PARENT,
+ 0x68, 6, 4, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_LDI0, "clk_mux_ldi0", clk_mux_ldi0_p,
+ ARRAY_SIZE(clk_mux_ldi0_p), CLK_SET_RATE_PARENT,
+ 0x60, 6, 4, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_MUX_LDI1, "clk_mux_ldi1", clk_mux_ldi1_p,
+ ARRAY_SIZE(clk_mux_ldi1_p), CLK_SET_RATE_PARENT,
+ 0x64, 6, 4, CLK_MUX_HIWORD_MASK, },
+ { HI3670_CLK_SW_MMBUF, "clk_sw_mmbuf", clk_sw_mmbuf_p,
+ ARRAY_SIZE(clk_sw_mmbuf_p), CLK_SET_RATE_PARENT,
+ 0x88, 0, 4, CLK_MUX_HIWORD_MASK, },
+};
+
+static const struct hisi_divider_clock hi3670_media1_divider_clks[] = {
+ { HI3670_CLK_DIV_VIVOBUS, "clk_div_vivobus", "clk_gate_vivobus_andgt",
+ CLK_SET_RATE_PARENT, 0x74, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_EDC0, "clk_div_edc0", "clk_andgt_edc0",
+ CLK_SET_RATE_PARENT, 0x68, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_LDI0, "clk_div_ldi0", "clk_andgt_ldi0",
+ CLK_SET_RATE_PARENT, 0x60, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_CLK_DIV_LDI1, "clk_div_ldi1", "clk_andgt_ldi1",
+ CLK_SET_RATE_PARENT, 0x64, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_ACLK_DIV_MMBUF, "aclk_div_mmbuf", "clk_mmbuf_pll_andgt",
+ CLK_SET_RATE_PARENT, 0x7C, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3670_PCLK_DIV_MMBUF, "pclk_div_mmbuf", "pclk_mmbuf_andgt",
+ CLK_SET_RATE_PARENT, 0x78, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+};
+
+/* clk_media2 */
+static const struct hisi_gate_clock hi3670_media2_gate_sep_clks[] = {
+ { HI3670_CLK_GATE_VDECFREQ, "clk_gate_vdecfreq", "clk_div_vdec",
+ CLK_SET_RATE_PARENT, 0x00, 8, 0, },
+ { HI3670_CLK_GATE_VENCFREQ, "clk_gate_vencfreq", "clk_div_venc",
+ CLK_SET_RATE_PARENT, 0x00, 5, 0, },
+ { HI3670_CLK_GATE_ICSFREQ, "clk_gate_icsfreq", "clk_div_ics",
+ CLK_SET_RATE_PARENT, 0x00, 2, 0, },
+};
+
+static void hi3670_clk_crgctrl_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+
+ int nr = ARRAY_SIZE(hi3670_fixed_rate_clks) +
+ ARRAY_SIZE(hi3670_crgctrl_gate_sep_clks) +
+ ARRAY_SIZE(hi3670_crgctrl_gate_clks) +
+ ARRAY_SIZE(hi3670_crgctrl_mux_clks) +
+ ARRAY_SIZE(hi3670_crg_fixed_factor_clks) +
+ ARRAY_SIZE(hi3670_crgctrl_divider_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_fixed_rate(hi3670_fixed_rate_clks,
+ ARRAY_SIZE(hi3670_fixed_rate_clks),
+ clk_data);
+ hisi_clk_register_gate_sep(hi3670_crgctrl_gate_sep_clks,
+ ARRAY_SIZE(hi3670_crgctrl_gate_sep_clks),
+ clk_data);
+ hisi_clk_register_gate(hi3670_crgctrl_gate_clks,
+ ARRAY_SIZE(hi3670_crgctrl_gate_clks),
+ clk_data);
+ hisi_clk_register_mux(hi3670_crgctrl_mux_clks,
+ ARRAY_SIZE(hi3670_crgctrl_mux_clks),
+ clk_data);
+ hisi_clk_register_fixed_factor(hi3670_crg_fixed_factor_clks,
+ ARRAY_SIZE(hi3670_crg_fixed_factor_clks),
+ clk_data);
+ hisi_clk_register_divider(hi3670_crgctrl_divider_clks,
+ ARRAY_SIZE(hi3670_crgctrl_divider_clks),
+ clk_data);
+}
+
+static void hi3670_clk_pctrl_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+ int nr = ARRAY_SIZE(hi3670_pctrl_gate_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+ hisi_clk_register_gate(hi3670_pctrl_gate_clks,
+ ARRAY_SIZE(hi3670_pctrl_gate_clks), clk_data);
+}
+
+static void hi3670_clk_pmuctrl_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+ int nr = ARRAY_SIZE(hi3670_pmu_gate_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_gate(hi3670_pmu_gate_clks,
+ ARRAY_SIZE(hi3670_pmu_gate_clks), clk_data);
+}
+
+static void hi3670_clk_sctrl_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+ int nr = ARRAY_SIZE(hi3670_sctrl_gate_sep_clks) +
+ ARRAY_SIZE(hi3670_sctrl_gate_clks) +
+ ARRAY_SIZE(hi3670_sctrl_mux_clks) +
+ ARRAY_SIZE(hi3670_sctrl_divider_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_gate_sep(hi3670_sctrl_gate_sep_clks,
+ ARRAY_SIZE(hi3670_sctrl_gate_sep_clks),
+ clk_data);
+ hisi_clk_register_gate(hi3670_sctrl_gate_clks,
+ ARRAY_SIZE(hi3670_sctrl_gate_clks),
+ clk_data);
+ hisi_clk_register_mux(hi3670_sctrl_mux_clks,
+ ARRAY_SIZE(hi3670_sctrl_mux_clks),
+ clk_data);
+ hisi_clk_register_divider(hi3670_sctrl_divider_clks,
+ ARRAY_SIZE(hi3670_sctrl_divider_clks),
+ clk_data);
+}
+
+static void hi3670_clk_iomcu_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+ int nr = ARRAY_SIZE(hi3670_iomcu_gate_sep_clks) +
+ ARRAY_SIZE(hi3670_iomcu_fixed_factor_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_gate(hi3670_iomcu_gate_sep_clks,
+ ARRAY_SIZE(hi3670_iomcu_gate_sep_clks), clk_data);
+
+ hisi_clk_register_fixed_factor(hi3670_iomcu_fixed_factor_clks,
+ ARRAY_SIZE(hi3670_iomcu_fixed_factor_clks),
+ clk_data);
+}
+
+static void hi3670_clk_media1_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+
+ int nr = ARRAY_SIZE(hi3670_media1_gate_sep_clks) +
+ ARRAY_SIZE(hi3670_media1_gate_clks) +
+ ARRAY_SIZE(hi3670_media1_mux_clks) +
+ ARRAY_SIZE(hi3670_media1_divider_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_gate_sep(hi3670_media1_gate_sep_clks,
+ ARRAY_SIZE(hi3670_media1_gate_sep_clks),
+ clk_data);
+ hisi_clk_register_gate(hi3670_media1_gate_clks,
+ ARRAY_SIZE(hi3670_media1_gate_clks),
+ clk_data);
+ hisi_clk_register_mux(hi3670_media1_mux_clks,
+ ARRAY_SIZE(hi3670_media1_mux_clks),
+ clk_data);
+ hisi_clk_register_divider(hi3670_media1_divider_clks,
+ ARRAY_SIZE(hi3670_media1_divider_clks),
+ clk_data);
+}
+
+static void hi3670_clk_media2_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+
+ int nr = ARRAY_SIZE(hi3670_media2_gate_sep_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_gate_sep(hi3670_media2_gate_sep_clks,
+ ARRAY_SIZE(hi3670_media2_gate_sep_clks),
+ clk_data);
+}
+
+static const struct of_device_id hi3670_clk_match_table[] = {
+ { .compatible = "hisilicon,hi3670-crgctrl",
+ .data = hi3670_clk_crgctrl_init },
+ { .compatible = "hisilicon,hi3670-pctrl",
+ .data = hi3670_clk_pctrl_init },
+ { .compatible = "hisilicon,hi3670-pmuctrl",
+ .data = hi3670_clk_pmuctrl_init },
+ { .compatible = "hisilicon,hi3670-sctrl",
+ .data = hi3670_clk_sctrl_init },
+ { .compatible = "hisilicon,hi3670-iomcu",
+ .data = hi3670_clk_iomcu_init },
+ { .compatible = "hisilicon,hi3670-media1-crg",
+ .data = hi3670_clk_media1_init },
+ { .compatible = "hisilicon,hi3670-media2-crg",
+ .data = hi3670_clk_media2_init },
+ { }
+};
+
+static int hi3670_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ void (*init_func)(struct device_node *np);
+
+ init_func = of_device_get_match_data(dev);
+ if (!init_func)
+ return -ENODEV;
+
+ init_func(np);
+
+ return 0;
+}
+
+static struct platform_driver hi3670_clk_driver = {
+ .probe = hi3670_clk_probe,
+ .driver = {
+ .name = "hi3670-clk",
+ .of_match_table = hi3670_clk_match_table,
+ },
+};
+
+static int __init hi3670_clk_init(void)
+{
+ return platform_driver_register(&hi3670_clk_driver);
+}
+core_initcall(hi3670_clk_init);
diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c
index 2a5015c736ce..43e82fa64422 100644
--- a/drivers/clk/hisilicon/reset.c
+++ b/drivers/clk/hisilicon/reset.c
@@ -109,9 +109,8 @@ struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev)
return NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rstc->membase = devm_ioremap(&pdev->dev,
- res->start, resource_size(res));
- if (!rstc->membase)
+ rstc->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rstc->membase))
return NULL;
spin_lock_init(&rstc->lock);
diff --git a/drivers/clk/imx/clk-cpu.c b/drivers/clk/imx/clk-cpu.c
index 9d46eac87f45..ed1b7e97a0d3 100644
--- a/drivers/clk/imx/clk-cpu.c
+++ b/drivers/clk/imx/clk-cpu.c
@@ -94,7 +94,7 @@ struct clk *imx_clk_cpu(const char *name, const char *parent_name,
init.name = name;
init.ops = &clk_cpu_ops;
- init.flags = 0;
+ init.flags = CLK_IS_CRITICAL;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 8c7c2fcb8d94..bbe0c60f4d09 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -789,6 +789,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "axi", base + 0x74, 18);
clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2_flags("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clk[IMX6QDL_CLK_MMDC_CH1_AXI] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
+ clk[IMX6QDL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
clk[IMX6QDL_CLK_OCRAM] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
clk[IMX6QDL_CLK_OPENVG_AXI] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30);
clk[IMX6QDL_CLK_PCIE_AXI] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index eb6bcbf345a3..6fcfbbd907a5 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -386,6 +386,8 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clks[IMX6SL_CLK_LCDIF_AXI] = imx_clk_gate2("lcdif_axi", "lcdif_axi_podf", base + 0x74, 6);
clks[IMX6SL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_pix_podf", base + 0x74, 8);
clks[IMX6SL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_pix_podf", base + 0x74, 10);
+ clks[IMX6SL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6SL_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26);
clks[IMX6SL_CLK_OCRAM] = imx_clk_gate2("ocram", "ocram_podf", base + 0x74, 28);
clks[IMX6SL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16);
clks[IMX6SL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18);
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
index 52379ee49aec..3bd2044cf25c 100644
--- a/drivers/clk/imx/clk-imx6sll.c
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -293,6 +293,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
clks[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clks[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26);
clks[IMX6SLL_CLK_OCRAM] = imx_clk_gate_flags("ocram","ahb", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index d9f2890ffe62..18527a335ace 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -431,6 +431,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_MLB] = imx_clk_gate2("mlb", "ahb", base + 0x74, 18);
clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6SX_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26);
clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2_flags("ocram", "ocram_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 361b43f9742e..fd60d1549f71 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -408,6 +408,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6UL_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26);
clks[IMX6UL_CLK_AXI] = imx_clk_gate_flags("axi", "axi_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 881b772c4ac9..adb08f64c691 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -379,14 +379,6 @@ static const char *pll_enet_bypass_sel[] = { "pll_enet_main", "pll_enet_main_src
static const char *pll_audio_bypass_sel[] = { "pll_audio_main", "pll_audio_main_src", };
static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_src", };
-static int const clks_init_on[] __initconst = {
- IMX7D_ARM_A7_ROOT_CLK, IMX7D_MAIN_AXI_ROOT_CLK,
- IMX7D_PLL_SYS_MAIN_480M_CLK, IMX7D_NAND_USDHC_BUS_ROOT_CLK,
- IMX7D_DRAM_PHYM_ROOT_CLK, IMX7D_DRAM_ROOT_CLK,
- IMX7D_DRAM_PHYM_ALT_ROOT_CLK, IMX7D_DRAM_ALT_ROOT_CLK,
- IMX7D_AHB_CHANNEL_ROOT_CLK, IMX7D_IPG_ROOT_CLK,
-};
-
static struct clk_onecell_data clk_data;
static struct clk ** const uart_clks[] __initconst = {
@@ -404,7 +396,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
- int i;
clks[IMX7D_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
clks[IMX7D_OSC_24M_CLK] = of_clk_get_by_name(ccm_node, "osc");
@@ -467,7 +458,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_PLL_SYS_MAIN_120M] = imx_clk_fixed_factor("pll_sys_main_120m", "pll_sys_main_clk", 1, 4);
clks[IMX7D_PLL_DRAM_MAIN_533M] = imx_clk_fixed_factor("pll_dram_533m", "pll_dram_main_clk", 1, 2);
- clks[IMX7D_PLL_SYS_MAIN_480M_CLK] = imx_clk_gate_dis("pll_sys_main_480m_clk", "pll_sys_main_480m", base + 0xb0, 4);
+ clks[IMX7D_PLL_SYS_MAIN_480M_CLK] = imx_clk_gate_dis_flags("pll_sys_main_480m_clk", "pll_sys_main_480m", base + 0xb0, 4, CLK_IS_CRITICAL);
clks[IMX7D_PLL_SYS_MAIN_240M_CLK] = imx_clk_gate_dis("pll_sys_main_240m_clk", "pll_sys_main_240m", base + 0xb0, 5);
clks[IMX7D_PLL_SYS_MAIN_120M_CLK] = imx_clk_gate_dis("pll_sys_main_120m_clk", "pll_sys_main_120m", base + 0xb0, 6);
clks[IMX7D_PLL_DRAM_MAIN_533M_CLK] = imx_clk_gate("pll_dram_533m_clk", "pll_dram_533m", base + 0x70, 12);
@@ -720,7 +711,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6);
clks[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_divider2("nand_usdhc_root_clk", "nand_usdhc_pre_div", base + 0x8980, 0, 6);
clks[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_divider2("ahb_root_clk", "ahb_pre_div", base + 0x9000, 0, 6);
- clks[IMX7D_IPG_ROOT_CLK] = imx_clk_divider2("ipg_root_clk", "ahb_root_clk", base + 0x9080, 0, 2);
+ clks[IMX7D_IPG_ROOT_CLK] = imx_clk_divider_flags("ipg_root_clk", "ahb_root_clk", base + 0x9080, 0, 2, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_PARENT);
clks[IMX7D_DRAM_ROOT_DIV] = imx_clk_divider2("dram_post_div", "dram_cg", base + 0x9880, 0, 3);
clks[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_divider2("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3);
clks[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_divider2("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3);
@@ -784,17 +775,17 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_CLKO1_ROOT_DIV] = imx_clk_divider2("clko1_post_div", "clko1_pre_div", base + 0xbd80, 0, 6);
clks[IMX7D_CLKO2_ROOT_DIV] = imx_clk_divider2("clko2_post_div", "clko2_pre_div", base + 0xbe00, 0, 6);
- clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate4("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0);
+ clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate2_flags("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0, CLK_OPS_PARENT_ENABLE);
clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0);
- clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0);
+ clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate2_flags("main_axi_root_clk", "axi_post_div", base + 0x4040, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0);
clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0);
clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0);
clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0);
- clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0);
- clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0);
- clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate4("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0);
- clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate4("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0);
+ clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate2_flags("dram_root_clk", "dram_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
+ clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate2_flags("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
+ clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate2_flags("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
+ clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate2_flags("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE);
clks[IMX7D_OCOTP_CLK] = imx_clk_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0);
clks[IMX7D_SNVS_CLK] = imx_clk_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0);
clks[IMX7D_MU_ROOT_CLK] = imx_clk_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0);
@@ -883,9 +874,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
- clk_prepare_enable(clks[clks_init_on[i]]);
-
clk_set_parent(clks[IMX7D_PLL_ARM_MAIN_BYPASS], clks[IMX7D_PLL_ARM_MAIN]);
clk_set_parent(clks[IMX7D_PLL_DRAM_MAIN_BYPASS], clks[IMX7D_PLL_DRAM_MAIN]);
clk_set_parent(clks[IMX7D_PLL_SYS_MAIN_BYPASS], clks[IMX7D_PLL_SYS_MAIN]);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 8076ec040f37..5895e2237b6c 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -137,6 +137,13 @@ static inline struct clk *imx_clk_gate_dis(const char *name, const char *parent,
shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock);
}
+static inline struct clk *imx_clk_gate_dis_flags(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, unsigned long flags)
+{
+ return clk_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
+ shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
diff --git a/drivers/clk/ingenic/Kconfig b/drivers/clk/ingenic/Kconfig
new file mode 100644
index 000000000000..34dc0da79c39
--- /dev/null
+++ b/drivers/clk/ingenic/Kconfig
@@ -0,0 +1,47 @@
+menu "Ingenic JZ47xx CGU drivers"
+ depends on MIPS
+
+config INGENIC_CGU_COMMON
+ bool
+
+config INGENIC_CGU_JZ4740
+ bool "Ingenic JZ4740 CGU driver"
+ default MACH_JZ4740
+ select INGENIC_CGU_COMMON
+ help
+ Support the clocks provided by the CGU hardware on Ingenic JZ4740
+ and compatible SoCs.
+
+ If building for a JZ4740 SoC, you want to say Y here.
+
+config INGENIC_CGU_JZ4725B
+ bool "Ingenic JZ4725B CGU driver"
+ default MACH_JZ4725B
+ select INGENIC_CGU_COMMON
+ help
+ Support the clocks provided by the CGU hardware on Ingenic JZ4725B
+ and compatible SoCs.
+
+ If building for a JZ4725B SoC, you want to say Y here.
+
+config INGENIC_CGU_JZ4770
+ bool "Ingenic JZ4770 CGU driver"
+ default MACH_JZ4770
+ select INGENIC_CGU_COMMON
+ help
+ Support the clocks provided by the CGU hardware on Ingenic JZ4770
+ and compatible SoCs.
+
+ If building for a JZ4770 SoC, you want to say Y here.
+
+config INGENIC_CGU_JZ4780
+ bool "Ingenic JZ4780 CGU driver"
+ default MACH_JZ4780
+ select INGENIC_CGU_COMMON
+ help
+ Support the clocks provided by the CGU hardware on Ingenic JZ4780
+ and compatible SoCs.
+
+ If building for a JZ4780 SoC, you want to say Y here.
+
+endmenu
diff --git a/drivers/clk/ingenic/Makefile b/drivers/clk/ingenic/Makefile
index 1456e4cdb562..00a79b2fba10 100644
--- a/drivers/clk/ingenic/Makefile
+++ b/drivers/clk/ingenic/Makefile
@@ -1,4 +1,5 @@
-obj-y += cgu.o
-obj-$(CONFIG_MACH_JZ4740) += jz4740-cgu.o
-obj-$(CONFIG_MACH_JZ4770) += jz4770-cgu.o
-obj-$(CONFIG_MACH_JZ4780) += jz4780-cgu.o
+obj-$(CONFIG_INGENIC_CGU_COMMON) += cgu.o
+obj-$(CONFIG_INGENIC_CGU_JZ4740) += jz4740-cgu.o
+obj-$(CONFIG_INGENIC_CGU_JZ4725B) += jz4725b-cgu.o
+obj-$(CONFIG_INGENIC_CGU_JZ4770) += jz4770-cgu.o
+obj-$(CONFIG_INGENIC_CGU_JZ4780) += jz4780-cgu.o
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
new file mode 100644
index 000000000000..584ff4ff81c7
--- /dev/null
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic JZ4725B SoC CGU driver
+ *
+ * Copyright (C) 2018 Paul Cercueil
+ * Author: Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <dt-bindings/clock/jz4725b-cgu.h>
+#include "cgu.h"
+
+/* CGU register offsets */
+#define CGU_REG_CPCCR 0x00
+#define CGU_REG_LCR 0x04
+#define CGU_REG_CPPCR 0x10
+#define CGU_REG_CLKGR 0x20
+#define CGU_REG_OPCR 0x24
+#define CGU_REG_I2SCDR 0x60
+#define CGU_REG_LPCDR 0x64
+#define CGU_REG_MSCCDR 0x68
+#define CGU_REG_SSICDR 0x74
+#define CGU_REG_CIMCDR 0x78
+
+/* bits within the LCR register */
+#define LCR_SLEEP BIT(0)
+
+static struct ingenic_cgu *cgu;
+
+static const s8 pll_od_encoding[4] = {
+ 0x0, 0x1, -1, 0x3,
+};
+
+static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
+
+ /* External clocks */
+
+ [JZ4725B_CLK_EXT] = { "ext", CGU_CLK_EXT },
+ [JZ4725B_CLK_OSC32K] = { "osc32k", CGU_CLK_EXT },
+
+ [JZ4725B_CLK_PLL] = {
+ "pll", CGU_CLK_PLL,
+ .parents = { JZ4725B_CLK_EXT, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_CPPCR,
+ .m_shift = 23,
+ .m_bits = 9,
+ .m_offset = 2,
+ .n_shift = 18,
+ .n_bits = 5,
+ .n_offset = 2,
+ .od_shift = 16,
+ .od_bits = 2,
+ .od_max = 4,
+ .od_encoding = pll_od_encoding,
+ .stable_bit = 10,
+ .bypass_bit = 9,
+ .enable_bit = 8,
+ },
+ },
+
+ /* Muxes & dividers */
+
+ [JZ4725B_CLK_PLL_HALF] = {
+ "pll half", CGU_CLK_DIV,
+ .parents = { JZ4725B_CLK_PLL, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1 },
+ },
+
+ [JZ4725B_CLK_CCLK] = {
+ "cclk", CGU_CLK_DIV,
+ .parents = { JZ4725B_CLK_PLL, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
+ },
+
+ [JZ4725B_CLK_HCLK] = {
+ "hclk", CGU_CLK_DIV,
+ .parents = { JZ4725B_CLK_PLL, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 },
+ },
+
+ [JZ4725B_CLK_PCLK] = {
+ "pclk", CGU_CLK_DIV,
+ .parents = { JZ4725B_CLK_PLL, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 8, 1, 4, 22, -1, -1 },
+ },
+
+ [JZ4725B_CLK_MCLK] = {
+ "mclk", CGU_CLK_DIV,
+ .parents = { JZ4725B_CLK_PLL, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1 },
+ },
+
+ [JZ4725B_CLK_IPU] = {
+ "ipu", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_PLL, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 16, 1, 4, 22, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 13 },
+ },
+
+ [JZ4725B_CLK_LCD] = {
+ "lcd", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_PLL_HALF, -1, -1, -1 },
+ .div = { CGU_REG_LPCDR, 0, 1, 11, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 9 },
+ },
+
+ [JZ4725B_CLK_I2S] = {
+ "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 },
+ .mux = { CGU_REG_CPCCR, 31, 1 },
+ .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 6 },
+ },
+
+ [JZ4725B_CLK_SPI] = {
+ "spi", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL, -1, -1 },
+ .mux = { CGU_REG_SSICDR, 31, 1 },
+ .div = { CGU_REG_SSICDR, 0, 1, 4, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 4 },
+ },
+
+ [JZ4725B_CLK_MMC_MUX] = {
+ "mmc_mux", CGU_CLK_DIV,
+ .parents = { JZ4725B_CLK_PLL_HALF, -1, -1, -1 },
+ .div = { CGU_REG_MSCCDR, 0, 1, 5, -1, -1, -1 },
+ },
+
+ [JZ4725B_CLK_UDC] = {
+ "udc", CGU_CLK_MUX | CGU_CLK_DIV,
+ .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 },
+ .mux = { CGU_REG_CPCCR, 29, 1 },
+ .div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 },
+ },
+
+ /* Gate-only clocks */
+
+ [JZ4725B_CLK_UART] = {
+ "uart", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_EXT, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 0 },
+ },
+
+ [JZ4725B_CLK_DMA] = {
+ "dma", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 12 },
+ },
+
+ [JZ4725B_CLK_ADC] = {
+ "adc", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_EXT, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 7 },
+ },
+
+ [JZ4725B_CLK_I2C] = {
+ "i2c", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_EXT, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 3 },
+ },
+
+ [JZ4725B_CLK_AIC] = {
+ "aic", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_EXT, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 5 },
+ },
+
+ [JZ4725B_CLK_MMC0] = {
+ "mmc0", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_MMC_MUX, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 6 },
+ },
+
+ [JZ4725B_CLK_MMC1] = {
+ "mmc1", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_MMC_MUX, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 16 },
+ },
+
+ [JZ4725B_CLK_BCH] = {
+ "bch", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_MCLK/* not sure */, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 11 },
+ },
+
+ [JZ4725B_CLK_TCU] = {
+ "tcu", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_EXT/* not sure */, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 1 },
+ },
+
+ [JZ4725B_CLK_EXT512] = {
+ "ext/512", CGU_CLK_FIXDIV,
+ .parents = { JZ4725B_CLK_EXT },
+
+ /* Doc calls it EXT512, but it seems to be /256... */
+ .fixdiv = { 256 },
+ },
+
+ [JZ4725B_CLK_RTC] = {
+ "rtc", CGU_CLK_MUX,
+ .parents = { JZ4725B_CLK_EXT512, JZ4725B_CLK_OSC32K, -1, -1 },
+ .mux = { CGU_REG_OPCR, 2, 1},
+ },
+};
+
+static void __init jz4725b_cgu_init(struct device_node *np)
+{
+ int retval;
+
+ cgu = ingenic_cgu_new(jz4725b_cgu_clocks,
+ ARRAY_SIZE(jz4725b_cgu_clocks), np);
+ if (!cgu) {
+ pr_err("%s: failed to initialise CGU\n", __func__);
+ return;
+ }
+
+ retval = ingenic_cgu_register_clocks(cgu);
+ if (retval)
+ pr_err("%s: failed to register CGU Clocks\n", __func__);
+}
+CLK_OF_DECLARE(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init);
diff --git a/drivers/clk/keystone/Kconfig b/drivers/clk/keystone/Kconfig
index 7e9f0176578a..b04927d06cd1 100644
--- a/drivers/clk/keystone/Kconfig
+++ b/drivers/clk/keystone/Kconfig
@@ -7,7 +7,7 @@ config COMMON_CLK_KEYSTONE
config TI_SCI_CLK
tristate "TI System Control Interface clock drivers"
- depends on (ARCH_KEYSTONE || COMPILE_TEST) && OF
+ depends on (ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST) && OF
depends on TI_SCI_PROTOCOL
default ARCH_KEYSTONE
---help---
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c
index aed5af23895b..4ed9b29ba438 100644
--- a/drivers/clk/keystone/gate.c
+++ b/drivers/clk/keystone/gate.c
@@ -245,7 +245,7 @@ static void __init of_psc_clk_init(struct device_node *node, spinlock_t *lock)
return;
}
- pr_err("%s: error registering clk %s\n", __func__, node->name);
+ pr_err("%s: error registering clk %pOFn\n", __func__, node);
unmap_domain:
iounmap(data->domain_base);
@@ -266,3 +266,8 @@ static void __init of_keystone_psc_clk_init(struct device_node *node)
}
CLK_OF_DECLARE(keystone_gate_clk, "ti,keystone,psc-clock",
of_keystone_psc_clk_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Clock driver for Keystone 2 based devices");
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index e7e840fb74ea..349540469fc0 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -219,7 +219,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
}
out:
- pr_err("%s: error initializing pll %s\n", __func__, node->name);
+ pr_err("%s: error initializing pll %pOFn\n", __func__, node);
kfree(pll_data);
}
@@ -338,3 +338,8 @@ static void __init of_pll_mux_clk_init(struct device_node *node)
pr_err("%s: error registering mux %s\n", __func__, clk_name);
}
CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PLL clock driver for Keystone devices");
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 4dda8988b2f0..ab6ab07f53e6 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -249,11 +249,6 @@ static const char * const msdc30_parents[] = {
"univpll2_d4"
};
-static const char * const audio_parents[] = {
- "clk26m",
- "syspll1_d16"
-};
-
static const char * const aud_intbus_parents[] = {
"clk26m",
"syspll1_d4",
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index a0ed41e73bde..5f6c860aa122 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -101,10 +101,16 @@ static const char * const mst_mux_parent_names[] = {
"axg_mst_in4", "axg_mst_in5", "axg_mst_in6", "axg_mst_in7",
};
-#define AXG_MST_MCLK_MUX(_name, _reg) \
- AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, CLK_MUX_ROUND_CLOSEST, \
+#define AXG_MST_MUX(_name, _reg, _flag) \
+ AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
mst_mux_parent_names, CLK_SET_RATE_PARENT)
+#define AXG_MST_MCLK_MUX(_name, _reg) \
+ AXG_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
+
+#define AXG_MST_SYS_MUX(_name, _reg) \
+ AXG_MST_MUX(_name, _reg, 0)
+
static AXG_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
static AXG_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
static AXG_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
@@ -112,13 +118,19 @@ static AXG_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
static AXG_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
static AXG_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
static AXG_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AXG_MST_MCLK_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
static AXG_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AXG_MST_MCLK_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static AXG_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+
+#define AXG_MST_DIV(_name, _reg, _flag) \
+ AXG_AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
+ "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \
+
+#define AXG_MST_MCLK_DIV(_name, _reg) \
+ AXG_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
-#define AXG_MST_MCLK_DIV(_name, _reg) \
- AXG_AUD_DIV(_name##_div, _reg, 0, 16, CLK_DIVIDER_ROUND_CLOSEST, \
- "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \
+#define AXG_MST_SYS_DIV(_name, _reg) \
+ AXG_MST_DIV(_name, _reg, 0)
static AXG_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
static AXG_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
@@ -127,12 +139,12 @@ static AXG_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
static AXG_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
static AXG_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
static AXG_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AXG_MST_MCLK_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
static AXG_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AXG_MST_MCLK_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static AXG_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AXG_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-#define AXG_MST_MCLK_GATE(_name, _reg) \
- AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \
+#define AXG_MST_MCLK_GATE(_name, _reg) \
+ AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \
CLK_SET_RATE_PARENT)
static AXG_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 00ce62ad6416..c981159b02c0 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -22,8 +22,13 @@
static DEFINE_SPINLOCK(meson_clk_lock);
-static struct clk_regmap axg_fixed_pll = {
+static struct clk_regmap axg_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_MPLL_CNTL,
.shift = 0,
@@ -34,11 +39,6 @@ static struct clk_regmap axg_fixed_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.frac = {
.reg_off = HHI_MPLL_CNTL2,
.shift = 0,
@@ -56,15 +56,39 @@ static struct clk_regmap axg_fixed_pll = {
},
},
.hw.init = &(struct clk_init_data){
- .name = "fixed_pll",
+ .name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
},
};
-static struct clk_regmap axg_sys_pll = {
+static struct clk_regmap axg_fixed_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ /*
+ * This clock won't ever change at runtime so
+ * CLK_SET_RATE_PARENT is not required
+ */
+ },
+};
+
+static struct clk_regmap axg_sys_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_SYS_PLL_CNTL,
.shift = 0,
@@ -75,11 +99,6 @@ static struct clk_regmap axg_sys_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.l = {
.reg_off = HHI_SYS_PLL_CNTL,
.shift = 31,
@@ -92,102 +111,59 @@ static struct clk_regmap axg_sys_pll = {
},
},
.hw.init = &(struct clk_init_data){
- .name = "sys_pll",
+ .name = "sys_pll_dco",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
},
};
-static const struct pll_rate_table axg_gp0_pll_rate_table[] = {
- PLL_RATE(240000000, 40, 1, 2),
- PLL_RATE(246000000, 41, 1, 2),
- PLL_RATE(252000000, 42, 1, 2),
- PLL_RATE(258000000, 43, 1, 2),
- PLL_RATE(264000000, 44, 1, 2),
- PLL_RATE(270000000, 45, 1, 2),
- PLL_RATE(276000000, 46, 1, 2),
- PLL_RATE(282000000, 47, 1, 2),
- PLL_RATE(288000000, 48, 1, 2),
- PLL_RATE(294000000, 49, 1, 2),
- PLL_RATE(300000000, 50, 1, 2),
- PLL_RATE(306000000, 51, 1, 2),
- PLL_RATE(312000000, 52, 1, 2),
- PLL_RATE(318000000, 53, 1, 2),
- PLL_RATE(324000000, 54, 1, 2),
- PLL_RATE(330000000, 55, 1, 2),
- PLL_RATE(336000000, 56, 1, 2),
- PLL_RATE(342000000, 57, 1, 2),
- PLL_RATE(348000000, 58, 1, 2),
- PLL_RATE(354000000, 59, 1, 2),
- PLL_RATE(360000000, 60, 1, 2),
- PLL_RATE(366000000, 61, 1, 2),
- PLL_RATE(372000000, 62, 1, 2),
- PLL_RATE(378000000, 63, 1, 2),
- PLL_RATE(384000000, 64, 1, 2),
- PLL_RATE(390000000, 65, 1, 3),
- PLL_RATE(396000000, 66, 1, 3),
- PLL_RATE(402000000, 67, 1, 3),
- PLL_RATE(408000000, 68, 1, 3),
- PLL_RATE(480000000, 40, 1, 1),
- PLL_RATE(492000000, 41, 1, 1),
- PLL_RATE(504000000, 42, 1, 1),
- PLL_RATE(516000000, 43, 1, 1),
- PLL_RATE(528000000, 44, 1, 1),
- PLL_RATE(540000000, 45, 1, 1),
- PLL_RATE(552000000, 46, 1, 1),
- PLL_RATE(564000000, 47, 1, 1),
- PLL_RATE(576000000, 48, 1, 1),
- PLL_RATE(588000000, 49, 1, 1),
- PLL_RATE(600000000, 50, 1, 1),
- PLL_RATE(612000000, 51, 1, 1),
- PLL_RATE(624000000, 52, 1, 1),
- PLL_RATE(636000000, 53, 1, 1),
- PLL_RATE(648000000, 54, 1, 1),
- PLL_RATE(660000000, 55, 1, 1),
- PLL_RATE(672000000, 56, 1, 1),
- PLL_RATE(684000000, 57, 1, 1),
- PLL_RATE(696000000, 58, 1, 1),
- PLL_RATE(708000000, 59, 1, 1),
- PLL_RATE(720000000, 60, 1, 1),
- PLL_RATE(732000000, 61, 1, 1),
- PLL_RATE(744000000, 62, 1, 1),
- PLL_RATE(756000000, 63, 1, 1),
- PLL_RATE(768000000, 64, 1, 1),
- PLL_RATE(780000000, 65, 1, 1),
- PLL_RATE(792000000, 66, 1, 1),
- PLL_RATE(804000000, 67, 1, 1),
- PLL_RATE(816000000, 68, 1, 1),
- PLL_RATE(960000000, 40, 1, 0),
- PLL_RATE(984000000, 41, 1, 0),
- PLL_RATE(1008000000, 42, 1, 0),
- PLL_RATE(1032000000, 43, 1, 0),
- PLL_RATE(1056000000, 44, 1, 0),
- PLL_RATE(1080000000, 45, 1, 0),
- PLL_RATE(1104000000, 46, 1, 0),
- PLL_RATE(1128000000, 47, 1, 0),
- PLL_RATE(1152000000, 48, 1, 0),
- PLL_RATE(1176000000, 49, 1, 0),
- PLL_RATE(1200000000, 50, 1, 0),
- PLL_RATE(1224000000, 51, 1, 0),
- PLL_RATE(1248000000, 52, 1, 0),
- PLL_RATE(1272000000, 53, 1, 0),
- PLL_RATE(1296000000, 54, 1, 0),
- PLL_RATE(1320000000, 55, 1, 0),
- PLL_RATE(1344000000, 56, 1, 0),
- PLL_RATE(1368000000, 57, 1, 0),
- PLL_RATE(1392000000, 58, 1, 0),
- PLL_RATE(1416000000, 59, 1, 0),
- PLL_RATE(1440000000, 60, 1, 0),
- PLL_RATE(1464000000, 61, 1, 0),
- PLL_RATE(1488000000, 62, 1, 0),
- PLL_RATE(1512000000, 63, 1, 0),
- PLL_RATE(1536000000, 64, 1, 0),
- PLL_RATE(1560000000, 65, 1, 0),
- PLL_RATE(1584000000, 66, 1, 0),
- PLL_RATE(1608000000, 67, 1, 0),
- PLL_RATE(1632000000, 68, 1, 0),
+static struct clk_regmap axg_sys_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "sys_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct pll_params_table axg_gp0_pll_params_table[] = {
+ PLL_PARAMS(40, 1),
+ PLL_PARAMS(41, 1),
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ PLL_PARAMS(63, 1),
+ PLL_PARAMS(64, 1),
+ PLL_PARAMS(65, 1),
+ PLL_PARAMS(66, 1),
+ PLL_PARAMS(67, 1),
+ PLL_PARAMS(68, 1),
{ /* sentinel */ },
};
@@ -197,11 +173,15 @@ static const struct reg_sequence axg_gp0_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
{ .reg = HHI_GP0_PLL_CNTL4, .def = 0xc000004d },
{ .reg = HHI_GP0_PLL_CNTL5, .def = 0x00078000 },
- { .reg = HHI_GP0_PLL_CNTL, .def = 0x40010250 },
};
-static struct clk_regmap axg_gp0_pll = {
+static struct clk_regmap axg_gp0_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_GP0_PLL_CNTL,
.shift = 0,
@@ -212,11 +192,6 @@ static struct clk_regmap axg_gp0_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.frac = {
.reg_off = HHI_GP0_PLL_CNTL1,
.shift = 0,
@@ -232,29 +207,49 @@ static struct clk_regmap axg_gp0_pll = {
.shift = 29,
.width = 1,
},
- .table = axg_gp0_pll_rate_table,
+ .table = axg_gp0_pll_params_table,
.init_regs = axg_gp0_init_regs,
.init_count = ARRAY_SIZE(axg_gp0_init_regs),
},
.hw.init = &(struct clk_init_data){
- .name = "gp0_pll",
+ .name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
},
};
+static struct clk_regmap axg_gp0_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP0_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gp0_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct reg_sequence axg_hifi_init_regs[] = {
{ .reg = HHI_HIFI_PLL_CNTL1, .def = 0xc084b000 },
{ .reg = HHI_HIFI_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_HIFI_PLL_CNTL3, .def = 0x0a6a3a88 },
{ .reg = HHI_HIFI_PLL_CNTL4, .def = 0xc000004d },
{ .reg = HHI_HIFI_PLL_CNTL5, .def = 0x00058000 },
- { .reg = HHI_HIFI_PLL_CNTL, .def = 0x40010250 },
};
-static struct clk_regmap axg_hifi_pll = {
+static struct clk_regmap axg_hifi_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_HIFI_PLL_CNTL,
.shift = 0,
@@ -265,11 +260,6 @@ static struct clk_regmap axg_hifi_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_HIFI_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.frac = {
.reg_off = HHI_HIFI_PLL_CNTL5,
.shift = 0,
@@ -285,19 +275,35 @@ static struct clk_regmap axg_hifi_pll = {
.shift = 29,
.width = 1,
},
- .table = axg_gp0_pll_rate_table,
+ .table = axg_gp0_pll_params_table,
.init_regs = axg_hifi_init_regs,
.init_count = ARRAY_SIZE(axg_hifi_init_regs),
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "hifi_pll",
+ .name = "hifi_pll_dco",
.ops = &meson_clk_pll_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
},
};
+static struct clk_regmap axg_hifi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HIFI_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "hifi_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_fixed_factor axg_fclk_div2_div = {
.mult = 1,
.div = 2,
@@ -625,29 +631,31 @@ static struct clk_regmap axg_mpll3 = {
},
};
-static const struct pll_rate_table axg_pcie_pll_rate_table[] = {
+static const struct pll_params_table axg_pcie_pll_params_table[] = {
{
- .rate = 100000000,
- .m = 200,
- .n = 3,
- .od = 1,
- .od2 = 3,
+ .m = 200,
+ .n = 3,
},
{ /* sentinel */ },
};
static const struct reg_sequence axg_pcie_init_regs[] = {
- { .reg = HHI_PCIE_PLL_CNTL, .def = 0x400106c8 },
{ .reg = HHI_PCIE_PLL_CNTL1, .def = 0x0084a2aa },
{ .reg = HHI_PCIE_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_PCIE_PLL_CNTL3, .def = 0x0a47488e },
{ .reg = HHI_PCIE_PLL_CNTL4, .def = 0xc000004d },
{ .reg = HHI_PCIE_PLL_CNTL5, .def = 0x00078000 },
{ .reg = HHI_PCIE_PLL_CNTL6, .def = 0x002323c6 },
+ { .reg = HHI_PCIE_PLL_CNTL, .def = 0x400106c8 },
};
-static struct clk_regmap axg_pcie_pll = {
+static struct clk_regmap axg_pcie_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_PCIE_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_PCIE_PLL_CNTL,
.shift = 0,
@@ -658,16 +666,6 @@ static struct clk_regmap axg_pcie_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_PCIE_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .od2 = {
- .reg_off = HHI_PCIE_PLL_CNTL6,
- .shift = 6,
- .width = 2,
- },
.frac = {
.reg_off = HHI_PCIE_PLL_CNTL1,
.shift = 0,
@@ -683,29 +681,63 @@ static struct clk_regmap axg_pcie_pll = {
.shift = 29,
.width = 1,
},
- .table = axg_pcie_pll_rate_table,
+ .table = axg_pcie_pll_params_table,
.init_regs = axg_pcie_init_regs,
.init_count = ARRAY_SIZE(axg_pcie_init_regs),
},
.hw.init = &(struct clk_init_data){
- .name = "pcie_pll",
+ .name = "pcie_pll_dco",
.ops = &meson_clk_pll_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
},
};
+static struct clk_regmap axg_pcie_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_PCIE_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_od",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "pcie_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_pcie_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_PCIE_PLL_CNTL6,
+ .shift = 6,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "pcie_pll_od" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap axg_pcie_mux = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_PCIE_PLL_CNTL6,
.mask = 0x1,
.shift = 2,
+ /* skip the parent mpll3, reserved for debug */
+ .table = (u32[]){ 1 },
},
.hw.init = &(struct clk_init_data){
.name = "pcie_mux",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "mpll3", "pcie_pll" },
- .num_parents = 2,
+ .parent_names = (const char *[]){ "pcie_pll" },
+ .num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1107,6 +1139,12 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_GEN_CLK_SEL] = &axg_gen_clk_sel.hw,
[CLKID_GEN_CLK_DIV] = &axg_gen_clk_div.hw,
[CLKID_GEN_CLK] = &axg_gen_clk.hw,
+ [CLKID_SYS_PLL_DCO] = &axg_sys_pll_dco.hw,
+ [CLKID_FIXED_PLL_DCO] = &axg_fixed_pll_dco.hw,
+ [CLKID_GP0_PLL_DCO] = &axg_gp0_pll_dco.hw,
+ [CLKID_HIFI_PLL_DCO] = &axg_hifi_pll_dco.hw,
+ [CLKID_PCIE_PLL_DCO] = &axg_pcie_pll_dco.hw,
+ [CLKID_PCIE_PLL_OD] = &axg_pcie_pll_od.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1185,6 +1223,8 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_fclk_div4,
&axg_fclk_div5,
&axg_fclk_div7,
+ &axg_pcie_pll_dco,
+ &axg_pcie_pll_od,
&axg_pcie_pll,
&axg_pcie_mux,
&axg_pcie_ref,
@@ -1194,6 +1234,12 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_gen_clk_sel,
&axg_gen_clk_div,
&axg_gen_clk,
+ &axg_fixed_pll_dco,
+ &axg_sys_pll_dco,
+ &axg_gp0_pll_dco,
+ &axg_hifi_pll_dco,
+ &axg_pcie_pll_dco,
+ &axg_pcie_pll_od,
};
static const struct of_device_id clkc_match_table[] = {
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index 1d04144a1b2c..0431dabac629 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -133,8 +133,14 @@
#define CLKID_PCIE_REF 78
#define CLKID_GEN_CLK_SEL 82
#define CLKID_GEN_CLK_DIV 83
+#define CLKID_SYS_PLL_DCO 85
+#define CLKID_FIXED_PLL_DCO 86
+#define CLKID_GP0_PLL_DCO 87
+#define CLKID_HIFI_PLL_DCO 88
+#define CLKID_PCIE_PLL_DCO 89
+#define CLKID_PCIE_PLL_OD 90
-#define NR_CLKS 85
+#define NR_CLKS 91
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/axg-clkc.h>
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 3e04617ac47f..f5b5b3fabe3c 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -11,15 +11,19 @@
* In the most basic form, a Meson PLL is composed as follows:
*
* PLL
- * +------------------------------+
- * | |
- * in -----[ /N ]---[ *M ]---[ >>OD ]----->> out
- * | ^ ^ |
- * +------------------------------+
- * | |
- * FREF VCO
+ * +--------------------------------+
+ * | |
+ * | +--+ |
+ * in >>-----[ /N ]--->| | +-----+ |
+ * | | |------| DCO |---->> out
+ * | +--------->| | +--v--+ |
+ * | | +--+ | |
+ * | | | |
+ * | +--[ *(M + (F/Fmax) ]<--+ |
+ * | |
+ * +--------------------------------+
*
- * out = in * (m + frac / frac_max) / (n << sum(ods))
+ * out = in * (m + frac / frac_max) / n
*/
#include <linux/clk-provider.h>
@@ -41,12 +45,11 @@ meson_clk_pll_data(struct clk_regmap *clk)
}
static unsigned long __pll_params_to_rate(unsigned long parent_rate,
- const struct pll_rate_table *pllt,
+ const struct pll_params_table *pllt,
u16 frac,
struct meson_clk_pll_data *pll)
{
u64 rate = (u64)parent_rate * pllt->m;
- unsigned int od = pllt->od + pllt->od2 + pllt->od3;
if (frac && MESON_PARM_APPLICABLE(&pll->frac)) {
u64 frac_rate = (u64)parent_rate * frac;
@@ -55,7 +58,7 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate,
(1 << pll->frac.width));
}
- return DIV_ROUND_UP_ULL(rate, pllt->n << od);
+ return DIV_ROUND_UP_ULL(rate, pllt->n);
}
static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
@@ -63,20 +66,11 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- struct pll_rate_table pllt;
+ struct pll_params_table pllt;
u16 frac;
pllt.n = meson_parm_read(clk->map, &pll->n);
pllt.m = meson_parm_read(clk->map, &pll->m);
- pllt.od = meson_parm_read(clk->map, &pll->od);
-
- pllt.od2 = MESON_PARM_APPLICABLE(&pll->od2) ?
- meson_parm_read(clk->map, &pll->od2) :
- 0;
-
- pllt.od3 = MESON_PARM_APPLICABLE(&pll->od3) ?
- meson_parm_read(clk->map, &pll->od3) :
- 0;
frac = MESON_PARM_APPLICABLE(&pll->frac) ?
meson_parm_read(clk->map, &pll->frac) :
@@ -87,14 +81,12 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
static u16 __pll_params_with_frac(unsigned long rate,
unsigned long parent_rate,
- const struct pll_rate_table *pllt,
+ const struct pll_params_table *pllt,
struct meson_clk_pll_data *pll)
{
u16 frac_max = (1 << pll->frac.width);
u64 val = (u64)rate * pllt->n;
- val <<= pllt->od + pllt->od2 + pllt->od3;
-
if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST)
val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate);
else
@@ -105,29 +97,50 @@ static u16 __pll_params_with_frac(unsigned long rate,
return min((u16)val, (u16)(frac_max - 1));
}
-static const struct pll_rate_table *
+static bool meson_clk_pll_is_better(unsigned long rate,
+ unsigned long best,
+ unsigned long now,
+ struct meson_clk_pll_data *pll)
+{
+ if (!(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) ||
+ MESON_PARM_APPLICABLE(&pll->frac)) {
+ /* Round down */
+ if (now < rate && best < now)
+ return true;
+ } else {
+ /* Round Closest */
+ if (abs(now - rate) < abs(best - rate))
+ return true;
+ }
+
+ return false;
+}
+
+static const struct pll_params_table *
meson_clk_get_pll_settings(unsigned long rate,
+ unsigned long parent_rate,
struct meson_clk_pll_data *pll)
{
- const struct pll_rate_table *table = pll->table;
- unsigned int i = 0;
+ const struct pll_params_table *table = pll->table;
+ unsigned long best = 0, now = 0;
+ unsigned int i, best_i = 0;
if (!table)
return NULL;
- /* Find the first table element exceeding rate */
- while (table[i].rate && table[i].rate <= rate)
- i++;
+ for (i = 0; table[i].n; i++) {
+ now = __pll_params_to_rate(parent_rate, &table[i], 0, pll);
- if (i != 0) {
- if (MESON_PARM_APPLICABLE(&pll->frac) ||
- !(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) ||
- (abs(rate - table[i - 1].rate) <
- abs(rate - table[i].rate)))
- i--;
+ /* If we get an exact match, don't bother any further */
+ if (now == rate) {
+ return &table[i];
+ } else if (meson_clk_pll_is_better(rate, best, now, pll)) {
+ best = now;
+ best_i = i;
+ }
}
- return (struct pll_rate_table *)&table[i];
+ return (struct pll_params_table *)&table[best_i];
}
static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -135,16 +148,18 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- const struct pll_rate_table *pllt =
- meson_clk_get_pll_settings(rate, pll);
+ const struct pll_params_table *pllt =
+ meson_clk_get_pll_settings(rate, *parent_rate, pll);
+ unsigned long round;
u16 frac;
if (!pllt)
return meson_clk_pll_recalc_rate(hw, *parent_rate);
- if (!MESON_PARM_APPLICABLE(&pll->frac)
- || rate == pllt->rate)
- return pllt->rate;
+ round = __pll_params_to_rate(*parent_rate, pllt, 0, pll);
+
+ if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round)
+ return round;
/*
* The rate provided by the setting is not an exact match, let's
@@ -185,12 +200,45 @@ static void meson_clk_pll_init(struct clk_hw *hw)
}
}
+static int meson_clk_pll_enable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+
+ /* Make sure the pll is in reset */
+ meson_parm_write(clk->map, &pll->rst, 1);
+
+ /* Enable the pll */
+ meson_parm_write(clk->map, &pll->en, 1);
+
+ /* Take the pll out reset */
+ meson_parm_write(clk->map, &pll->rst, 0);
+
+ if (meson_clk_pll_wait_lock(hw))
+ return -EIO;
+
+ return 0;
+}
+
+static void meson_clk_pll_disable(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+
+ /* Put the pll is in reset */
+ meson_parm_write(clk->map, &pll->rst, 1);
+
+ /* Disable the pll */
+ meson_parm_write(clk->map, &pll->en, 0);
+}
+
static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
- const struct pll_rate_table *pllt;
+ const struct pll_params_table *pllt;
+ unsigned int enabled;
unsigned long old_rate;
u16 frac = 0;
@@ -199,32 +247,28 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
old_rate = rate;
- pllt = meson_clk_get_pll_settings(rate, pll);
+ pllt = meson_clk_get_pll_settings(rate, parent_rate, pll);
if (!pllt)
return -EINVAL;
- /* Put the pll in reset to write the params */
- meson_parm_write(clk->map, &pll->rst, 1);
+ enabled = meson_parm_read(clk->map, &pll->en);
+ if (enabled)
+ meson_clk_pll_disable(hw);
meson_parm_write(clk->map, &pll->n, pllt->n);
meson_parm_write(clk->map, &pll->m, pllt->m);
- meson_parm_write(clk->map, &pll->od, pllt->od);
- if (MESON_PARM_APPLICABLE(&pll->od2))
- meson_parm_write(clk->map, &pll->od2, pllt->od2);
-
- if (MESON_PARM_APPLICABLE(&pll->od3))
- meson_parm_write(clk->map, &pll->od3, pllt->od3);
if (MESON_PARM_APPLICABLE(&pll->frac)) {
frac = __pll_params_with_frac(rate, parent_rate, pllt, pll);
meson_parm_write(clk->map, &pll->frac, frac);
}
- /* make sure the reset is cleared at this point */
- meson_parm_write(clk->map, &pll->rst, 0);
+ /* If the pll is stopped, bail out now */
+ if (!enabled)
+ return 0;
- if (meson_clk_pll_wait_lock(hw)) {
+ if (meson_clk_pll_enable(hw)) {
pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
__func__, old_rate);
/*
@@ -244,6 +288,8 @@ const struct clk_ops meson_clk_pll_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
.round_rate = meson_clk_pll_round_rate,
.set_rate = meson_clk_pll_set_rate,
+ .enable = meson_clk_pll_enable,
+ .disable = meson_clk_pll_disable
};
const struct clk_ops meson_clk_pll_ro_ops = {
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index 24cec16b6038..6b96d55c047d 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -43,37 +43,29 @@ static inline void meson_parm_write(struct regmap *map, struct parm *p,
}
-struct pll_rate_table {
- unsigned long rate;
+struct pll_params_table {
u16 m;
u16 n;
- u16 od;
- u16 od2;
- u16 od3;
};
-#define PLL_RATE(_r, _m, _n, _od) \
+#define PLL_PARAMS(_m, _n) \
{ \
- .rate = (_r), \
.m = (_m), \
.n = (_n), \
- .od = (_od), \
}
#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
struct meson_clk_pll_data {
+ struct parm en;
struct parm m;
struct parm n;
struct parm frac;
- struct parm od;
- struct parm od2;
- struct parm od3;
struct parm l;
struct parm rst;
const struct reg_sequence *init_regs;
unsigned int init_count;
- const struct pll_rate_table *table;
+ const struct pll_params_table *table;
u8 flags;
};
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 86d3ae58e84c..9309cfaaa464 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -18,165 +18,77 @@
static DEFINE_SPINLOCK(meson_clk_lock);
-static const struct pll_rate_table gxbb_gp0_pll_rate_table[] = {
- PLL_RATE(96000000, 32, 1, 3),
- PLL_RATE(99000000, 33, 1, 3),
- PLL_RATE(102000000, 34, 1, 3),
- PLL_RATE(105000000, 35, 1, 3),
- PLL_RATE(108000000, 36, 1, 3),
- PLL_RATE(111000000, 37, 1, 3),
- PLL_RATE(114000000, 38, 1, 3),
- PLL_RATE(117000000, 39, 1, 3),
- PLL_RATE(120000000, 40, 1, 3),
- PLL_RATE(123000000, 41, 1, 3),
- PLL_RATE(126000000, 42, 1, 3),
- PLL_RATE(129000000, 43, 1, 3),
- PLL_RATE(132000000, 44, 1, 3),
- PLL_RATE(135000000, 45, 1, 3),
- PLL_RATE(138000000, 46, 1, 3),
- PLL_RATE(141000000, 47, 1, 3),
- PLL_RATE(144000000, 48, 1, 3),
- PLL_RATE(147000000, 49, 1, 3),
- PLL_RATE(150000000, 50, 1, 3),
- PLL_RATE(153000000, 51, 1, 3),
- PLL_RATE(156000000, 52, 1, 3),
- PLL_RATE(159000000, 53, 1, 3),
- PLL_RATE(162000000, 54, 1, 3),
- PLL_RATE(165000000, 55, 1, 3),
- PLL_RATE(168000000, 56, 1, 3),
- PLL_RATE(171000000, 57, 1, 3),
- PLL_RATE(174000000, 58, 1, 3),
- PLL_RATE(177000000, 59, 1, 3),
- PLL_RATE(180000000, 60, 1, 3),
- PLL_RATE(183000000, 61, 1, 3),
- PLL_RATE(186000000, 62, 1, 3),
- PLL_RATE(192000000, 32, 1, 2),
- PLL_RATE(198000000, 33, 1, 2),
- PLL_RATE(204000000, 34, 1, 2),
- PLL_RATE(210000000, 35, 1, 2),
- PLL_RATE(216000000, 36, 1, 2),
- PLL_RATE(222000000, 37, 1, 2),
- PLL_RATE(228000000, 38, 1, 2),
- PLL_RATE(234000000, 39, 1, 2),
- PLL_RATE(240000000, 40, 1, 2),
- PLL_RATE(246000000, 41, 1, 2),
- PLL_RATE(252000000, 42, 1, 2),
- PLL_RATE(258000000, 43, 1, 2),
- PLL_RATE(264000000, 44, 1, 2),
- PLL_RATE(270000000, 45, 1, 2),
- PLL_RATE(276000000, 46, 1, 2),
- PLL_RATE(282000000, 47, 1, 2),
- PLL_RATE(288000000, 48, 1, 2),
- PLL_RATE(294000000, 49, 1, 2),
- PLL_RATE(300000000, 50, 1, 2),
- PLL_RATE(306000000, 51, 1, 2),
- PLL_RATE(312000000, 52, 1, 2),
- PLL_RATE(318000000, 53, 1, 2),
- PLL_RATE(324000000, 54, 1, 2),
- PLL_RATE(330000000, 55, 1, 2),
- PLL_RATE(336000000, 56, 1, 2),
- PLL_RATE(342000000, 57, 1, 2),
- PLL_RATE(348000000, 58, 1, 2),
- PLL_RATE(354000000, 59, 1, 2),
- PLL_RATE(360000000, 60, 1, 2),
- PLL_RATE(366000000, 61, 1, 2),
- PLL_RATE(372000000, 62, 1, 2),
- PLL_RATE(384000000, 32, 1, 1),
- PLL_RATE(396000000, 33, 1, 1),
- PLL_RATE(408000000, 34, 1, 1),
- PLL_RATE(420000000, 35, 1, 1),
- PLL_RATE(432000000, 36, 1, 1),
- PLL_RATE(444000000, 37, 1, 1),
- PLL_RATE(456000000, 38, 1, 1),
- PLL_RATE(468000000, 39, 1, 1),
- PLL_RATE(480000000, 40, 1, 1),
- PLL_RATE(492000000, 41, 1, 1),
- PLL_RATE(504000000, 42, 1, 1),
- PLL_RATE(516000000, 43, 1, 1),
- PLL_RATE(528000000, 44, 1, 1),
- PLL_RATE(540000000, 45, 1, 1),
- PLL_RATE(552000000, 46, 1, 1),
- PLL_RATE(564000000, 47, 1, 1),
- PLL_RATE(576000000, 48, 1, 1),
- PLL_RATE(588000000, 49, 1, 1),
- PLL_RATE(600000000, 50, 1, 1),
- PLL_RATE(612000000, 51, 1, 1),
- PLL_RATE(624000000, 52, 1, 1),
- PLL_RATE(636000000, 53, 1, 1),
- PLL_RATE(648000000, 54, 1, 1),
- PLL_RATE(660000000, 55, 1, 1),
- PLL_RATE(672000000, 56, 1, 1),
- PLL_RATE(684000000, 57, 1, 1),
- PLL_RATE(696000000, 58, 1, 1),
- PLL_RATE(708000000, 59, 1, 1),
- PLL_RATE(720000000, 60, 1, 1),
- PLL_RATE(732000000, 61, 1, 1),
- PLL_RATE(744000000, 62, 1, 1),
- PLL_RATE(768000000, 32, 1, 0),
- PLL_RATE(792000000, 33, 1, 0),
- PLL_RATE(816000000, 34, 1, 0),
- PLL_RATE(840000000, 35, 1, 0),
- PLL_RATE(864000000, 36, 1, 0),
- PLL_RATE(888000000, 37, 1, 0),
- PLL_RATE(912000000, 38, 1, 0),
- PLL_RATE(936000000, 39, 1, 0),
- PLL_RATE(960000000, 40, 1, 0),
- PLL_RATE(984000000, 41, 1, 0),
- PLL_RATE(1008000000, 42, 1, 0),
- PLL_RATE(1032000000, 43, 1, 0),
- PLL_RATE(1056000000, 44, 1, 0),
- PLL_RATE(1080000000, 45, 1, 0),
- PLL_RATE(1104000000, 46, 1, 0),
- PLL_RATE(1128000000, 47, 1, 0),
- PLL_RATE(1152000000, 48, 1, 0),
- PLL_RATE(1176000000, 49, 1, 0),
- PLL_RATE(1200000000, 50, 1, 0),
- PLL_RATE(1224000000, 51, 1, 0),
- PLL_RATE(1248000000, 52, 1, 0),
- PLL_RATE(1272000000, 53, 1, 0),
- PLL_RATE(1296000000, 54, 1, 0),
- PLL_RATE(1320000000, 55, 1, 0),
- PLL_RATE(1344000000, 56, 1, 0),
- PLL_RATE(1368000000, 57, 1, 0),
- PLL_RATE(1392000000, 58, 1, 0),
- PLL_RATE(1416000000, 59, 1, 0),
- PLL_RATE(1440000000, 60, 1, 0),
- PLL_RATE(1464000000, 61, 1, 0),
- PLL_RATE(1488000000, 62, 1, 0),
+static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
+ PLL_PARAMS(32, 1),
+ PLL_PARAMS(33, 1),
+ PLL_PARAMS(34, 1),
+ PLL_PARAMS(35, 1),
+ PLL_PARAMS(36, 1),
+ PLL_PARAMS(37, 1),
+ PLL_PARAMS(38, 1),
+ PLL_PARAMS(39, 1),
+ PLL_PARAMS(40, 1),
+ PLL_PARAMS(41, 1),
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
{ /* sentinel */ },
};
-static const struct pll_rate_table gxl_gp0_pll_rate_table[] = {
- PLL_RATE(504000000, 42, 1, 1),
- PLL_RATE(516000000, 43, 1, 1),
- PLL_RATE(528000000, 44, 1, 1),
- PLL_RATE(540000000, 45, 1, 1),
- PLL_RATE(552000000, 46, 1, 1),
- PLL_RATE(564000000, 47, 1, 1),
- PLL_RATE(576000000, 48, 1, 1),
- PLL_RATE(588000000, 49, 1, 1),
- PLL_RATE(600000000, 50, 1, 1),
- PLL_RATE(612000000, 51, 1, 1),
- PLL_RATE(624000000, 52, 1, 1),
- PLL_RATE(636000000, 53, 1, 1),
- PLL_RATE(648000000, 54, 1, 1),
- PLL_RATE(660000000, 55, 1, 1),
- PLL_RATE(672000000, 56, 1, 1),
- PLL_RATE(684000000, 57, 1, 1),
- PLL_RATE(696000000, 58, 1, 1),
- PLL_RATE(708000000, 59, 1, 1),
- PLL_RATE(720000000, 60, 1, 1),
- PLL_RATE(732000000, 61, 1, 1),
- PLL_RATE(744000000, 62, 1, 1),
- PLL_RATE(756000000, 63, 1, 1),
- PLL_RATE(768000000, 64, 1, 1),
- PLL_RATE(780000000, 65, 1, 1),
- PLL_RATE(792000000, 66, 1, 1),
+static const struct pll_params_table gxl_gp0_pll_params_table[] = {
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ PLL_PARAMS(63, 1),
+ PLL_PARAMS(64, 1),
+ PLL_PARAMS(65, 1),
+ PLL_PARAMS(66, 1),
{ /* sentinel */ },
};
-static struct clk_regmap gxbb_fixed_pll = {
+static struct clk_regmap gxbb_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_MPLL_CNTL,
.shift = 0,
@@ -187,11 +99,6 @@ static struct clk_regmap gxbb_fixed_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.frac = {
.reg_off = HHI_MPLL_CNTL2,
.shift = 0,
@@ -209,11 +116,29 @@ static struct clk_regmap gxbb_fixed_pll = {
},
},
.hw.init = &(struct clk_init_data){
- .name = "fixed_pll",
+ .name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_fixed_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ /*
+ * This clock won't ever change at runtime so
+ * CLK_SET_RATE_PARENT is not required
+ */
},
};
@@ -228,8 +153,13 @@ static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = {
},
};
-static struct clk_regmap gxbb_hdmi_pll = {
+static struct clk_regmap gxbb_hdmi_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_HDMI_PLL_CNTL,
.shift = 0,
@@ -245,21 +175,6 @@ static struct clk_regmap gxbb_hdmi_pll = {
.shift = 0,
.width = 12,
},
- .od = {
- .reg_off = HHI_HDMI_PLL_CNTL2,
- .shift = 16,
- .width = 2,
- },
- .od2 = {
- .reg_off = HHI_HDMI_PLL_CNTL2,
- .shift = 22,
- .width = 2,
- },
- .od3 = {
- .reg_off = HHI_HDMI_PLL_CNTL2,
- .shift = 18,
- .width = 2,
- },
.l = {
.reg_off = HHI_HDMI_PLL_CNTL,
.shift = 31,
@@ -272,74 +187,121 @@ static struct clk_regmap gxbb_hdmi_pll = {
},
},
.hw.init = &(struct clk_init_data){
- .name = "hdmi_pll",
+ .name = "hdmi_pll_dco",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "hdmi_pll_pre_mult" },
.num_parents = 1,
+ /*
+ * Display directly handle hdmi pll registers ATM, we need
+ * NOCACHE to keep our view of the clock as accurate as possible
+ */
.flags = CLK_GET_RATE_NOCACHE,
},
};
+static struct clk_regmap gxbb_hdmi_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL2,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_hdmi_pll_od2 = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL2,
+ .shift = 22,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od2",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_od" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxbb_hdmi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL2,
+ .shift = 18,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_od2" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxl_hdmi_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL + 8,
+ .shift = 21,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap gxl_hdmi_pll_od2 = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL + 8,
+ .shift = 23,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od2",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_od" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap gxl_hdmi_pll = {
- .data = &(struct meson_clk_pll_data){
- .m = {
- .reg_off = HHI_HDMI_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_HDMI_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .frac = {
- /*
- * On gxl, there is a register shift due to
- * HHI_HDMI_PLL_CNTL1 which does not exist on gxbb,
- * so we compute the register offset based on the PLL
- * base to get it right
- */
- .reg_off = HHI_HDMI_PLL_CNTL + 4,
- .shift = 0,
- .width = 12,
- },
- .od = {
- .reg_off = HHI_HDMI_PLL_CNTL + 8,
- .shift = 21,
- .width = 2,
- },
- .od2 = {
- .reg_off = HHI_HDMI_PLL_CNTL + 8,
- .shift = 23,
- .width = 2,
- },
- .od3 = {
- .reg_off = HHI_HDMI_PLL_CNTL + 8,
- .shift = 19,
- .width = 2,
- },
- .l = {
- .reg_off = HHI_HDMI_PLL_CNTL,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_HDMI_PLL_CNTL,
- .shift = 29,
- .width = 1,
- },
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL + 8,
+ .shift = 19,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll",
- .ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_od2" },
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap gxbb_sys_pll = {
+static struct clk_regmap gxbb_sys_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_SYS_PLL_CNTL,
.shift = 0,
@@ -350,11 +312,6 @@ static struct clk_regmap gxbb_sys_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 10,
- .width = 2,
- },
.l = {
.reg_off = HHI_SYS_PLL_CNTL,
.shift = 31,
@@ -367,11 +324,26 @@ static struct clk_regmap gxbb_sys_pll = {
},
},
.hw.init = &(struct clk_init_data){
- .name = "sys_pll",
+ .name = "sys_pll_dco",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_sys_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_PLL_CNTL,
+ .shift = 10,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "sys_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -379,11 +351,15 @@ static const struct reg_sequence gxbb_gp0_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL2, .def = 0x69c80000 },
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a5590c4 },
{ .reg = HHI_GP0_PLL_CNTL4, .def = 0x0000500d },
- { .reg = HHI_GP0_PLL_CNTL, .def = 0x4a000228 },
};
-static struct clk_regmap gxbb_gp0_pll = {
+static struct clk_regmap gxbb_gp0_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_GP0_PLL_CNTL,
.shift = 0,
@@ -394,11 +370,6 @@ static struct clk_regmap gxbb_gp0_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.l = {
.reg_off = HHI_GP0_PLL_CNTL,
.shift = 31,
@@ -409,16 +380,15 @@ static struct clk_regmap gxbb_gp0_pll = {
.shift = 29,
.width = 1,
},
- .table = gxbb_gp0_pll_rate_table,
+ .table = gxbb_gp0_pll_params_table,
.init_regs = gxbb_gp0_init_regs,
.init_count = ARRAY_SIZE(gxbb_gp0_init_regs),
},
.hw.init = &(struct clk_init_data){
- .name = "gp0_pll",
+ .name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
},
};
@@ -428,11 +398,15 @@ static const struct reg_sequence gxl_gp0_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
{ .reg = HHI_GP0_PLL_CNTL4, .def = 0xc000004d },
{ .reg = HHI_GP0_PLL_CNTL5, .def = 0x00078000 },
- { .reg = HHI_GP0_PLL_CNTL, .def = 0x40010250 },
};
-static struct clk_regmap gxl_gp0_pll = {
+static struct clk_regmap gxl_gp0_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_GP0_PLL_CNTL,
.shift = 0,
@@ -443,11 +417,6 @@ static struct clk_regmap gxl_gp0_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.frac = {
.reg_off = HHI_GP0_PLL_CNTL1,
.shift = 0,
@@ -463,16 +432,31 @@ static struct clk_regmap gxl_gp0_pll = {
.shift = 29,
.width = 1,
},
- .table = gxl_gp0_pll_rate_table,
+ .table = gxl_gp0_pll_params_table,
.init_regs = gxl_gp0_init_regs,
.init_count = ARRAY_SIZE(gxl_gp0_init_regs),
},
.hw.init = &(struct clk_init_data){
- .name = "gp0_pll",
+ .name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_gp0_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP0_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gp0_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -1933,6 +1917,12 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw,
[CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw,
[CLKID_GEN_CLK] = &gxbb_gen_clk.hw,
+ [CLKID_FIXED_PLL_DCO] = &gxbb_fixed_pll_dco.hw,
+ [CLKID_HDMI_PLL_DCO] = &gxbb_hdmi_pll_dco.hw,
+ [CLKID_HDMI_PLL_OD] = &gxbb_hdmi_pll_od.hw,
+ [CLKID_HDMI_PLL_OD2] = &gxbb_hdmi_pll_od2.hw,
+ [CLKID_SYS_PLL_DCO] = &gxbb_sys_pll_dco.hw,
+ [CLKID_GP0_PLL_DCO] = &gxbb_gp0_pll_dco.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1948,7 +1938,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_FCLK_DIV4] = &gxbb_fclk_div4.hw,
[CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
- [CLKID_GP0_PLL] = &gxl_gp0_pll.hw,
+ [CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
[CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
[CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
[CLKID_CLK81] = &gxbb_clk81.hw,
@@ -2098,19 +2088,29 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw,
[CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw,
[CLKID_GEN_CLK] = &gxbb_gen_clk.hw,
+ [CLKID_FIXED_PLL_DCO] = &gxbb_fixed_pll_dco.hw,
+ [CLKID_HDMI_PLL_DCO] = &gxbb_hdmi_pll_dco.hw,
+ [CLKID_HDMI_PLL_OD] = &gxl_hdmi_pll_od.hw,
+ [CLKID_HDMI_PLL_OD2] = &gxl_hdmi_pll_od2.hw,
+ [CLKID_SYS_PLL_DCO] = &gxbb_sys_pll_dco.hw,
+ [CLKID_GP0_PLL_DCO] = &gxl_gp0_pll_dco.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
static struct clk_regmap *const gxbb_clk_regmaps[] = {
- &gxbb_gp0_pll,
+ &gxbb_gp0_pll_dco,
&gxbb_hdmi_pll,
+ &gxbb_hdmi_pll_od,
+ &gxbb_hdmi_pll_od2,
};
static struct clk_regmap *const gxl_clk_regmaps[] = {
- &gxl_gp0_pll,
+ &gxl_gp0_pll_dco,
&gxl_hdmi_pll,
+ &gxl_hdmi_pll_od,
+ &gxl_hdmi_pll_od2,
};
static struct clk_regmap *const gx_clk_regmaps[] = {
@@ -2265,6 +2265,10 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_gen_clk_sel,
&gxbb_gen_clk_div,
&gxbb_gen_clk,
+ &gxbb_fixed_pll_dco,
+ &gxbb_hdmi_pll_dco,
+ &gxbb_sys_pll_dco,
+ &gxbb_gp0_pll,
};
struct clkc_data {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index 20dfb1daf5b8..72bc077d9663 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -159,8 +159,14 @@
#define CLKID_VDEC_HEVC_DIV 155
#define CLKID_GEN_CLK_SEL 157
#define CLKID_GEN_CLK_DIV 158
-
-#define NR_CLKS 160
+#define CLKID_FIXED_PLL_DCO 160
+#define CLKID_HDMI_PLL_DCO 161
+#define CLKID_HDMI_PLL_OD 162
+#define CLKID_HDMI_PLL_OD2 163
+#define CLKID_SYS_PLL_DCO 164
+#define CLKID_GP0_PLL_DCO 165
+
+#define NR_CLKS 166
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 7447d96a265f..346b9e165b7a 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -11,7 +11,6 @@
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/of_address.h>
-#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -22,66 +21,27 @@
static DEFINE_SPINLOCK(meson_clk_lock);
-static void __iomem *clk_base;
-
struct meson8b_clk_reset {
struct reset_controller_dev reset;
- void __iomem *base;
+ struct regmap *regmap;
};
-static const struct pll_rate_table sys_pll_rate_table[] = {
- PLL_RATE(312000000, 52, 1, 2),
- PLL_RATE(336000000, 56, 1, 2),
- PLL_RATE(360000000, 60, 1, 2),
- PLL_RATE(384000000, 64, 1, 2),
- PLL_RATE(408000000, 68, 1, 2),
- PLL_RATE(432000000, 72, 1, 2),
- PLL_RATE(456000000, 76, 1, 2),
- PLL_RATE(480000000, 80, 1, 2),
- PLL_RATE(504000000, 84, 1, 2),
- PLL_RATE(528000000, 88, 1, 2),
- PLL_RATE(552000000, 92, 1, 2),
- PLL_RATE(576000000, 96, 1, 2),
- PLL_RATE(600000000, 50, 1, 1),
- PLL_RATE(624000000, 52, 1, 1),
- PLL_RATE(648000000, 54, 1, 1),
- PLL_RATE(672000000, 56, 1, 1),
- PLL_RATE(696000000, 58, 1, 1),
- PLL_RATE(720000000, 60, 1, 1),
- PLL_RATE(744000000, 62, 1, 1),
- PLL_RATE(768000000, 64, 1, 1),
- PLL_RATE(792000000, 66, 1, 1),
- PLL_RATE(816000000, 68, 1, 1),
- PLL_RATE(840000000, 70, 1, 1),
- PLL_RATE(864000000, 72, 1, 1),
- PLL_RATE(888000000, 74, 1, 1),
- PLL_RATE(912000000, 76, 1, 1),
- PLL_RATE(936000000, 78, 1, 1),
- PLL_RATE(960000000, 80, 1, 1),
- PLL_RATE(984000000, 82, 1, 1),
- PLL_RATE(1008000000, 84, 1, 1),
- PLL_RATE(1032000000, 86, 1, 1),
- PLL_RATE(1056000000, 88, 1, 1),
- PLL_RATE(1080000000, 90, 1, 1),
- PLL_RATE(1104000000, 92, 1, 1),
- PLL_RATE(1128000000, 94, 1, 1),
- PLL_RATE(1152000000, 96, 1, 1),
- PLL_RATE(1176000000, 98, 1, 1),
- PLL_RATE(1200000000, 50, 1, 0),
- PLL_RATE(1224000000, 51, 1, 0),
- PLL_RATE(1248000000, 52, 1, 0),
- PLL_RATE(1272000000, 53, 1, 0),
- PLL_RATE(1296000000, 54, 1, 0),
- PLL_RATE(1320000000, 55, 1, 0),
- PLL_RATE(1344000000, 56, 1, 0),
- PLL_RATE(1368000000, 57, 1, 0),
- PLL_RATE(1392000000, 58, 1, 0),
- PLL_RATE(1416000000, 59, 1, 0),
- PLL_RATE(1440000000, 60, 1, 0),
- PLL_RATE(1464000000, 61, 1, 0),
- PLL_RATE(1488000000, 62, 1, 0),
- PLL_RATE(1512000000, 63, 1, 0),
- PLL_RATE(1536000000, 64, 1, 0),
+static const struct pll_params_table sys_pll_params_table[] = {
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ PLL_PARAMS(63, 1),
+ PLL_PARAMS(64, 1),
{ /* sentinel */ },
};
@@ -94,8 +54,13 @@ static struct clk_fixed_rate meson8b_xtal = {
},
};
-static struct clk_regmap meson8b_fixed_pll = {
+static struct clk_regmap meson8b_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_MPLL_CNTL,
.shift = 0,
@@ -106,11 +71,6 @@ static struct clk_regmap meson8b_fixed_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.frac = {
.reg_off = HHI_MPLL_CNTL2,
.shift = 0,
@@ -128,16 +88,39 @@ static struct clk_regmap meson8b_fixed_pll = {
},
},
.hw.init = &(struct clk_init_data){
- .name = "fixed_pll",
+ .name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
},
};
-static struct clk_regmap meson8b_vid_pll = {
+static struct clk_regmap meson8b_fixed_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .num_parents = 1,
+ /*
+ * This clock won't ever change at runtime so
+ * CLK_SET_RATE_PARENT is not required
+ */
+ },
+};
+
+static struct clk_regmap meson8b_vid_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_VID_PLL_CNTL,
.shift = 0,
@@ -148,11 +131,6 @@ static struct clk_regmap meson8b_vid_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_VID_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.l = {
.reg_off = HHI_VID_PLL_CNTL,
.shift = 31,
@@ -165,16 +143,36 @@ static struct clk_regmap meson8b_vid_pll = {
},
},
.hw.init = &(struct clk_init_data){
- .name = "vid_pll",
+ .name = "vid_pll_dco",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
},
};
-static struct clk_regmap meson8b_sys_pll = {
+static struct clk_regmap meson8b_vid_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "vid_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_sys_pll_dco = {
.data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
.m = {
.reg_off = HHI_SYS_PLL_CNTL,
.shift = 0,
@@ -185,11 +183,6 @@ static struct clk_regmap meson8b_sys_pll = {
.shift = 9,
.width = 5,
},
- .od = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
.l = {
.reg_off = HHI_SYS_PLL_CNTL,
.shift = 31,
@@ -200,14 +193,29 @@ static struct clk_regmap meson8b_sys_pll = {
.shift = 29,
.width = 1,
},
- .table = sys_pll_rate_table,
+ .table = sys_pll_params_table,
},
.hw.init = &(struct clk_init_data){
- .name = "sys_pll",
+ .name = "sys_pll_dco",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap meson8b_sys_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "sys_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -879,6 +887,9 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
[CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
+ [CLKID_PLL_VID_DCO] = &meson8b_vid_pll_dco.hw,
+ [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -987,6 +998,9 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_nand_clk_sel,
&meson8b_nand_clk_div,
&meson8b_nand_clk_gate,
+ &meson8b_fixed_pll_dco,
+ &meson8b_vid_pll_dco,
+ &meson8b_sys_pll_dco,
};
static const struct meson8b_clk_reset_line {
@@ -1050,7 +1064,6 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
container_of(rcdev, struct meson8b_clk_reset, reset);
unsigned long flags;
const struct meson8b_clk_reset_line *reset;
- u32 val;
if (id >= ARRAY_SIZE(meson8b_clk_reset_bits))
return -EINVAL;
@@ -1059,12 +1072,12 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&meson_clk_lock, flags);
- val = readl(meson8b_clk_reset->base + reset->reg);
if (assert)
- val |= BIT(reset->bit_idx);
+ regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
+ BIT(reset->bit_idx), BIT(reset->bit_idx));
else
- val &= ~BIT(reset->bit_idx);
- writel(val, meson8b_clk_reset->base + reset->reg);
+ regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
+ BIT(reset->bit_idx), 0);
spin_unlock_irqrestore(&meson_clk_lock, flags);
@@ -1094,62 +1107,12 @@ static const struct regmap_config clkc_regmap_config = {
.reg_stride = 4,
};
-static int meson8b_clkc_probe(struct platform_device *pdev)
-{
- int ret, i;
- struct device *dev = &pdev->dev;
- struct regmap *map;
-
- if (!clk_base)
- return -ENXIO;
-
- map = devm_regmap_init_mmio(dev, clk_base, &clkc_regmap_config);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_regmaps); i++)
- meson8b_clk_regmaps[i]->map = map;
-
- /*
- * register all clks
- * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1
- */
- for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
- /* array might be sparse */
- if (!meson8b_hw_onecell_data.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[i]);
- if (ret)
- return ret;
- }
-
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- &meson8b_hw_onecell_data);
-}
-
-static const struct of_device_id meson8b_clkc_match_table[] = {
- { .compatible = "amlogic,meson8-clkc" },
- { .compatible = "amlogic,meson8b-clkc" },
- { .compatible = "amlogic,meson8m2-clkc" },
- { }
-};
-
-static struct platform_driver meson8b_driver = {
- .probe = meson8b_clkc_probe,
- .driver = {
- .name = "meson8b-clkc",
- .of_match_table = meson8b_clkc_match_table,
- },
-};
-
-builtin_platform_driver(meson8b_driver);
-
-static void __init meson8b_clkc_reset_init(struct device_node *np)
+static void __init meson8b_clkc_init(struct device_node *np)
{
struct meson8b_clk_reset *rstc;
- int ret;
+ void __iomem *clk_base;
+ struct regmap *map;
+ int i, ret;
/* Generic clocks, PLLs and some of the reset-bits */
clk_base = of_iomap(np, 1);
@@ -1158,12 +1121,16 @@ static void __init meson8b_clkc_reset_init(struct device_node *np)
return;
}
+ map = regmap_init_mmio(NULL, clk_base, &clkc_regmap_config);
+ if (IS_ERR(map))
+ return;
+
rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
if (!rstc)
return;
/* Reset Controller */
- rstc->base = clk_base;
+ rstc->regmap = map;
rstc->reset.ops = &meson8b_clk_reset_ops;
rstc->reset.nr_resets = ARRAY_SIZE(meson8b_clk_reset_bits);
rstc->reset.of_node = np;
@@ -1173,11 +1140,34 @@ static void __init meson8b_clkc_reset_init(struct device_node *np)
__func__, ret);
return;
}
+
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(meson8b_clk_regmaps); i++)
+ meson8b_clk_regmaps[i]->map = map;
+
+ /*
+ * register all clks
+ * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1
+ */
+ for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
+ /* array might be sparse */
+ if (!meson8b_hw_onecell_data.hws[i])
+ continue;
+
+ ret = clk_hw_register(NULL, meson8b_hw_onecell_data.hws[i]);
+ if (ret)
+ return;
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+ &meson8b_hw_onecell_data);
+ if (ret)
+ pr_err("%s: failed to register clock provider\n", __func__);
}
CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc",
- meson8b_clkc_reset_init);
+ meson8b_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc",
- meson8b_clkc_reset_init);
+ meson8b_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc",
- meson8b_clkc_reset_init);
+ meson8b_clkc_init);
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 5d09412b5084..1c6fb180e6a2 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -75,8 +75,11 @@
#define CLKID_FCLK_DIV7_DIV 109
#define CLKID_NAND_SEL 110
#define CLKID_NAND_DIV 111
+#define CLKID_PLL_FIXED_DCO 113
+#define CLKID_PLL_VID_DCO 114
+#define CLKID_PLL_SYS_DCO 115
-#define CLK_NR_CLKS 113
+#define CLK_NR_CLKS 116
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 0fc75c395957..d083b860f083 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
/* The gate clocks has mux parent. */
{MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
- {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
- {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c
index fa2fbd2cef4a..ea54a874bbda 100644
--- a/drivers/clk/mvebu/ap806-system-controller.c
+++ b/drivers/clk/mvebu/ap806-system-controller.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada AP806 System Controller
*
@@ -5,9 +6,6 @@
*
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#define pr_fmt(fmt) "ap806-system-controller: " fmt
@@ -155,7 +153,6 @@ static int ap806_syscon_common_probe(struct platform_device *pdev,
goto fail4;
}
- of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data);
ret = of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data);
if (ret)
goto fail_clk_add;
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 2c7c1085f883..7dedfaa6e152 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada 370 SoC clocks
*
@@ -7,9 +8,6 @@
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/armada-375.c b/drivers/clk/mvebu/armada-375.c
index c7af2242b796..a7157c690238 100644
--- a/drivers/clk/mvebu/armada-375.c
+++ b/drivers/clk/mvebu/armada-375.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada 375 SoC clocks
*
@@ -7,9 +8,6 @@
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 499f5962c8b0..1f1cff428d78 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -56,6 +56,15 @@
struct clk_periph_driver_data {
struct clk_hw_onecell_data *hw_data;
spinlock_t lock;
+ void __iomem *reg;
+
+ /* Storage registers for suspend/resume operations */
+ u32 tbg_sel;
+ u32 div_sel0;
+ u32 div_sel1;
+ u32 div_sel2;
+ u32 clk_sel;
+ u32 clk_dis;
};
struct clk_double_div {
@@ -672,6 +681,40 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
return PTR_ERR_OR_ZERO(*hw);
}
+static int __maybe_unused armada_3700_periph_clock_suspend(struct device *dev)
+{
+ struct clk_periph_driver_data *data = dev_get_drvdata(dev);
+
+ data->tbg_sel = readl(data->reg + TBG_SEL);
+ data->div_sel0 = readl(data->reg + DIV_SEL0);
+ data->div_sel1 = readl(data->reg + DIV_SEL1);
+ data->div_sel2 = readl(data->reg + DIV_SEL2);
+ data->clk_sel = readl(data->reg + CLK_SEL);
+ data->clk_dis = readl(data->reg + CLK_DIS);
+
+ return 0;
+}
+
+static int __maybe_unused armada_3700_periph_clock_resume(struct device *dev)
+{
+ struct clk_periph_driver_data *data = dev_get_drvdata(dev);
+
+ /* Follow the same order than what the Cortex-M3 does (ATF code) */
+ writel(data->clk_dis, data->reg + CLK_DIS);
+ writel(data->div_sel0, data->reg + DIV_SEL0);
+ writel(data->div_sel1, data->reg + DIV_SEL1);
+ writel(data->div_sel2, data->reg + DIV_SEL2);
+ writel(data->tbg_sel, data->reg + TBG_SEL);
+ writel(data->clk_sel, data->reg + CLK_SEL);
+
+ return 0;
+}
+
+static const struct dev_pm_ops armada_3700_periph_clock_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(armada_3700_periph_clock_suspend,
+ armada_3700_periph_clock_resume)
+};
+
static int armada_3700_periph_clock_probe(struct platform_device *pdev)
{
struct clk_periph_driver_data *driver_data;
@@ -680,7 +723,6 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int num_periph = 0, i, ret;
struct resource *res;
- void __iomem *reg;
data = of_device_get_match_data(dev);
if (!data)
@@ -689,11 +731,6 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
while (data[num_periph].name)
num_periph++;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg))
- return PTR_ERR(reg);
-
driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL);
if (!driver_data)
return -ENOMEM;
@@ -706,12 +743,16 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
return -ENOMEM;
driver_data->hw_data->num = num_periph;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ driver_data->reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(driver_data->reg))
+ return PTR_ERR(driver_data->reg);
+
spin_lock_init(&driver_data->lock);
for (i = 0; i < num_periph; i++) {
struct clk_hw **hw = &driver_data->hw_data->hws[i];
-
- if (armada_3700_add_composite_clk(&data[i], reg,
+ if (armada_3700_add_composite_clk(&data[i], driver_data->reg,
&driver_data->lock, dev, hw))
dev_err(dev, "Can't register periph clock %s\n",
data[i].name);
@@ -749,6 +790,7 @@ static struct platform_driver armada_3700_periph_clock_driver = {
.driver = {
.name = "marvell-armada-3700-periph-clock",
.of_match_table = armada_3700_periph_clock_of_match,
+ .pm = &armada_3700_periph_clock_pm_ops,
},
};
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index 7ff041f73b55..ee272d4d8c24 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell Armada 37xx SoC Time Base Generator clocks
*
* Copyright (C) 2016 Marvell
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2 or later. This program is licensed "as is"
- * without any warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
@@ -99,12 +96,13 @@ static int armada_3700_tbg_clock_probe(struct platform_device *pdev)
hw_tbg_data->num = NUM_TBG;
platform_set_drvdata(pdev, hw_tbg_data);
- parent = devm_clk_get(dev, NULL);
+ parent = clk_get(dev, NULL);
if (IS_ERR(parent)) {
dev_err(dev, "Could get the clock parent\n");
return -EINVAL;
}
parent_name = __clk_get_name(parent);
+ clk_put(parent);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
index 612d65ede10a..e9e306d4e9af 100644
--- a/drivers/clk/mvebu/armada-37xx-xtal.c
+++ b/drivers/clk/mvebu/armada-37xx-xtal.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada 37xx SoC xtal clocks
*
@@ -5,9 +6,6 @@
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
index 9ff4ea63932d..ef2ab81f087d 100644
--- a/drivers/clk/mvebu/armada-38x.c
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada 380/385 SoC clocks
*
@@ -7,9 +8,6 @@
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/armada-39x.c b/drivers/clk/mvebu/armada-39x.c
index 4fdfd32247a9..674ccfd6236e 100644
--- a/drivers/clk/mvebu/armada-39x.c
+++ b/drivers/clk/mvebu/armada-39x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada 39x SoC clocks
*
@@ -8,9 +9,6 @@
* Andrew Lunn <andrew@lunn.ch>
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 0ec44ae9a2a2..e8f03293ec83 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada XP SoC clocks
*
@@ -7,9 +8,6 @@
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 68f05c53d40e..1fc84b0e72ee 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MVEBU Core divider clock
*
@@ -5,9 +6,6 @@
*
* Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 3045067448fb..c2af3395cf13 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell MVEBU CPU clock handling.
*
@@ -5,9 +6,6 @@
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 472c88b90256..6ab3c2e627c7 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell EBU SoC common clock handling
*
@@ -7,9 +8,6 @@
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/common.h b/drivers/clk/mvebu/common.h
index f0de6c8a494a..d1ab79b43105 100644
--- a/drivers/clk/mvebu/common.h
+++ b/drivers/clk/mvebu/common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Marvell EBU SoC common clock handling
*
@@ -7,9 +8,6 @@
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __CLK_MVEBU_COMMON_H_
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 75bf7b8f282f..9781b1bf5998 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Armada CP110 System Controller
*
@@ -5,9 +6,6 @@
*
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
/*
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index 59fad9546c84..e0dd99f36bf4 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Dove SoC clocks
*
@@ -7,9 +8,6 @@
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index a2a8d614039d..6f784167bda4 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Kirkwood SoC clocks
*
@@ -7,9 +8,6 @@
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c
index 6e203af73cac..0a74cf7a7725 100644
--- a/drivers/clk/mvebu/mv98dx3236.c
+++ b/drivers/clk/mvebu/mv98dx3236.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell MV98DX3236 SoC clocks
*
@@ -7,9 +8,6 @@
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/mvebu/orion.c b/drivers/clk/mvebu/orion.c
index a6e5bee23385..f681a65be20a 100644
--- a/drivers/clk/mvebu/orion.c
+++ b/drivers/clk/mvebu/orion.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Orion SoC clocks
*
@@ -5,9 +6,6 @@
*
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 064768699fe7..a611531df115 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -1,3 +1,7 @@
+config KRAIT_CLOCKS
+ bool
+ select KRAIT_L2_ACCESSORS
+
config QCOM_GDSC
bool
select PM_GENERIC_DOMAINS if PM
@@ -235,6 +239,31 @@ config MSM_GCC_8998
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, UFS, SD/eMMC, PCIe, etc.
+config QCS_GCC_404
+ tristate "QCS404 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on QCS404 devices.
+ Say Y if you want to use multimedia devices or peripheral
+ devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+
+config SDM_CAMCC_845
+ tristate "SDM845 Camera Clock Controller"
+ depends on COMMON_CLK_QCOM
+ select SDM_GCC_845
+ help
+ Support for the camera clock controller on SDM845 devices.
+ Say Y if you want to support camera devices and camera functionality.
+
+config SDM_GCC_660
+ tristate "SDM660 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SDM660 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2C, USB, UFS, SDDC, PCIe, etc.
+
config SDM_GCC_845
tristate "SDM845 Global Clock Controller"
select QCOM_GDSC
@@ -272,3 +301,27 @@ config SPMI_PMIC_CLKDIV
Technologies, Inc. SPMI PMIC. It configures the frequency of
clkdiv outputs of the PMIC. These clocks are typically wired
through alternate functions on GPIO pins.
+
+config QCOM_HFPLL
+ tristate "High-Frequency PLL (HFPLL) Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the high-frequency PLLs present on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as MSM8974, APQ8084, etc.
+
+config KPSS_XCC
+ tristate "KPSS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the Krait ACC and GCC clock controllers. Say Y
+ if you want to support CPU frequency scaling on devices such
+ as MSM8960, APQ8064, etc.
+
+config KRAITCC
+ tristate "Krait Clock Controller"
+ depends on COMMON_CLK_QCOM && ARM
+ select KRAIT_CLOCKS
+ help
+ Support for the Krait CPU clocks on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 21a45035930d..981882e16189 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -11,6 +11,8 @@ clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
+clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
@@ -39,7 +41,13 @@ obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
+obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
+obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
+obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
+obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
+obj-$(CONFIG_KRAITCC) += krait-cc.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
new file mode 100644
index 000000000000..1b2cefef7431
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -0,0 +1,1745 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CORE_BI_PLL_TEST_SE,
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 1 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_0[] = {
+ "bi_tcxo",
+ "cam_cc_pll2_out_even",
+ "cam_cc_pll1_out_even",
+ "cam_cc_pll3_out_even",
+ "cam_cc_pll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0_out_even",
+ .parent_names = (const char *[]){ "cam_cc_pll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll1",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll1_out_even",
+ .parent_names = (const char *[]){ "cam_cc_pll1" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2_out_even",
+ .parent_names = (const char *[]){ "cam_cc_pll2" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll3",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll3_out_even",
+ .parent_names = (const char *[]){ "cam_cc_pll3" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+/*
+ * As per HW design, some of the CAMCC RCGs needs to
+ * move to XO clock during their clock disable so using
+ * clk_rcg2_shared_ops for such RCGs. This is required
+ * to power down the camera memories gracefully.
+ * Also, use CLK_SET_RATE_PARENT flag for the RCGs which
+ * have CAM_CC_PLL2_OUT_EVEN PLL as parent in frequency
+ * table and requires reconfiguration of the PLL frequency.
+ */
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x600c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_clk_src = {
+ .cmd_rcgr = 0xb0d8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x9060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(240000000, P_CAM_CC_PLL2_OUT_EVEN, 2, 0, 0),
+ F(269333333, P_CAM_CC_PLL1_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x504c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x5070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x6038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+ .cmd_rcgr = 0xb0b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_fd_core_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xb088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x900c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0x9038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xa00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xa030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0xb004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0xb024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+ F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x700c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ipe_1_clk_src = {
+ .cmd_rcgr = 0x800c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_1_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xb04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(269333333, P_CAM_CC_PLL1_OUT_EVEN, 3, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xb0f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 2),
+ F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9),
+ F(34285714, P_CAM_CC_PLL2_OUT_EVEN, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x4044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x4064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_CAM_CC_PLL0_OUT_EVEN, 10, 0, 0),
+ F(66666667, P_CAM_CC_PLL0_OUT_EVEN, 9, 0, 0),
+ F(73846154, P_CAM_CC_PLL2_OUT_EVEN, 6.5, 0, 0),
+ F(80000000, P_CAM_CC_PLL2_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x6054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_names = cam_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x606c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x606c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_slow_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x6050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_areg_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_fast_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x6034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x6024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_bps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_atb_clk = {
+ .halt_reg = 0xb12c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb12c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xb124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_clk = {
+ .halt_reg = 0xb0f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_cci_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xb11c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb11c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_slow_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_csi0phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x5040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_csi1phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x5064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_csi2phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x5088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_csi3phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy0_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x5044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy1_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x5068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy2_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x508c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy3_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+ .halt_reg = 0xb0c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_fd_core_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_fd_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+ .halt_reg = 0xb0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_fd_core_uar_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_fd_core_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_apb_clk = {
+ .halt_reg = 0xb084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_atb_clk = {
+ .halt_reg = 0xb078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xb0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_icp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_cti_clk = {
+ .halt_reg = 0xb07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_cti_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ts_clk = {
+ .halt_reg = 0xb080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_ts_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0x907c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x907c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x9024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ife_0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0x9078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0x9050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ife_0_csid_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x9034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ife_0_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xa024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ife_1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xa050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xa048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ife_1_csid_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xa02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ife_1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ife_lite_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0xb044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ife_lite_csid_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x703c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x703c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_slow_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_fast_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x7034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x7024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ipe_0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_1_ahb_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_slow_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_1_areg_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_fast_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_1_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_ipe_1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xb064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_jpeg_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xb110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_lrme_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_mclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x403c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_mclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x405c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x405c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_mclk2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x407c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x407c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk",
+ .parent_names = (const char *[]){
+ "cam_cc_mclk3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0xb13c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb13c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0xb0a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_sys_tmr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x6004,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x7004,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ipe_1_gdsc = {
+ .gdscr = 0x8004,
+ .pd = {
+ .name = "ipe_1_gdsc",
+ },
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x9004,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xa004,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xb134,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .flags = POLL_CFG_GDSCR,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *cam_cc_sdm845_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CCI_CLK] = &cam_cc_cci_clk.clkr,
+ [CAM_CC_CCI_CLK_SRC] = &cam_cc_cci_clk_src.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+ [CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+ [CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+ [CAM_CC_ICP_APB_CLK] = &cam_cc_icp_apb_clk.clkr,
+ [CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr,
+ [CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+ [CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+ [CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+ [CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+ [CAM_CC_IPE_1_CLK_SRC] = &cam_cc_ipe_1_clk_src.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_EVEN] = &cam_cc_pll2_out_even.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+};
+
+static struct gdsc *cam_cc_sdm845_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [IPE_1_GDSC] = &ipe_1_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct regmap_config cam_cc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xd004,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_sdm845_desc = {
+ .config = &cam_cc_sdm845_regmap_config,
+ .clks = cam_cc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sdm845_clocks),
+ .gdscs = cam_cc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sdm845_gdscs),
+};
+
+static const struct of_device_id cam_cc_sdm845_match_table[] = {
+ { .compatible = "qcom,sdm845-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sdm845_match_table);
+
+static int cam_cc_sdm845_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct alpha_pll_config cam_cc_pll_config = { };
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ cam_cc_pll_config.l = 0x1f;
+ cam_cc_pll_config.alpha = 0x4000;
+ clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll_config);
+
+ cam_cc_pll_config.l = 0x2a;
+ cam_cc_pll_config.alpha = 0x1556;
+ clk_fabia_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll_config);
+
+ cam_cc_pll_config.l = 0x32;
+ cam_cc_pll_config.alpha = 0x0;
+ clk_fabia_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll_config);
+
+ cam_cc_pll_config.l = 0x14;
+ clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll_config);
+
+ return qcom_cc_really_probe(pdev, &cam_cc_sdm845_desc, regmap);
+}
+
+static struct platform_driver cam_cc_sdm845_driver = {
+ .probe = cam_cc_sdm845_probe,
+ .driver = {
+ .name = "sdm845-camcc",
+ .of_match_table = cam_cc_sdm845_match_table,
+ },
+};
+
+static int __init cam_cc_sdm845_init(void)
+{
+ return platform_driver_register(&cam_cc_sdm845_driver);
+}
+subsys_initcall(cam_cc_sdm845_init);
+
+static void __exit cam_cc_sdm845_exit(void)
+{
+ platform_driver_unregister(&cam_cc_sdm845_driver);
+}
+module_exit(cam_cc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index a91d97cecbad..0ced4a5a9a17 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -220,6 +220,7 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
if (pll->flags & SUPPORTS_FSM_MODE)
qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
}
+EXPORT_SYMBOL_GPL(clk_alpha_pll_configure);
static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
{
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index bc2205c450b6..99446bf630aa 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -18,7 +18,7 @@ static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
u32 val;
if (!br->hwcg_reg)
- return 0;
+ return false;
regmap_read(br->clkr.regmap, br->hwcg_reg, &val);
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
new file mode 100644
index 000000000000..3c04805f2a55
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+
+/* Initialize a HFPLL at a given rate and enable it. */
+static void __clk_hfpll_init_once(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ if (likely(h->init_done))
+ return;
+
+ /* Configure PLL parameters for integer mode. */
+ if (hd->config_val)
+ regmap_write(regmap, hd->config_reg, hd->config_val);
+ regmap_write(regmap, hd->m_reg, 0);
+ regmap_write(regmap, hd->n_reg, 1);
+
+ if (hd->user_reg) {
+ u32 regval = hd->user_val;
+ unsigned long rate;
+
+ rate = clk_hw_get_rate(hw);
+
+ /* Pick the right VCO. */
+ if (hd->user_vco_mask && rate > hd->low_vco_max_rate)
+ regval |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, regval);
+ }
+
+ if (hd->droop_reg)
+ regmap_write(regmap, hd->droop_reg, hd->droop_val);
+
+ h->init_done = true;
+}
+
+static void __clk_hfpll_enable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 val;
+
+ __clk_hfpll_init_once(hw);
+
+ /* Disable PLL bypass mode. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_BYPASSNL, PLL_BYPASSNL);
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N);
+
+ /* Wait for PLL to lock. */
+ if (hd->status_reg) {
+ do {
+ regmap_read(regmap, hd->status_reg, &val);
+ } while (!(val & BIT(hd->lock_bit)));
+ } else {
+ udelay(60);
+ }
+
+ /* Enable PLL output. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL);
+}
+
+/* Enable an already-configured HFPLL. */
+static int clk_hfpll_enable(struct clk_hw *hw)
+{
+ unsigned long flags;
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ spin_lock_irqsave(&h->lock, flags);
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (!(mode & (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)))
+ __clk_hfpll_enable(hw);
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static void __clk_hfpll_disable(struct clk_hfpll *h)
+{
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ /*
+ * Disable the PLL output, disable test mode, enable the bypass mode,
+ * and assert the reset.
+ */
+ regmap_update_bits(regmap, hd->mode_reg,
+ PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL, 0);
+}
+
+static void clk_hfpll_disable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+ __clk_hfpll_disable(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static long clk_hfpll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ unsigned long rrate;
+
+ rate = clamp(rate, hd->min_rate, hd->max_rate);
+
+ rrate = DIV_ROUND_UP(rate, *parent_rate) * *parent_rate;
+ if (rrate > hd->max_rate)
+ rrate -= *parent_rate;
+
+ return rrate;
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int clk_hfpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ unsigned long flags;
+ u32 l_val, val;
+ bool enabled;
+
+ l_val = rate / parent_rate;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ enabled = __clk_is_enabled(hw->clk);
+ if (enabled)
+ __clk_hfpll_disable(h);
+
+ /* Pick the right VCO. */
+ if (hd->user_reg && hd->user_vco_mask) {
+ regmap_read(regmap, hd->user_reg, &val);
+ if (rate <= hd->low_vco_max_rate)
+ val &= ~hd->user_vco_mask;
+ else
+ val |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, val);
+ }
+
+ regmap_write(regmap, hd->l_reg, l_val);
+
+ if (enabled)
+ __clk_hfpll_enable(hw);
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 l_val;
+
+ regmap_read(regmap, hd->l_reg, &l_val);
+
+ return l_val * parent_rate;
+}
+
+static void clk_hfpll_init(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode, status;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) {
+ __clk_hfpll_init_once(hw);
+ return;
+ }
+
+ if (hd->status_reg) {
+ regmap_read(regmap, hd->status_reg, &status);
+ if (!(status & BIT(hd->lock_bit))) {
+ WARN(1, "HFPLL %s is ON, but not locked!\n",
+ __clk_get_name(hw->clk));
+ clk_hfpll_disable(hw);
+ __clk_hfpll_init_once(hw);
+ }
+ }
+}
+
+static int hfpll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ mode &= 0x7;
+ return mode == (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL);
+}
+
+const struct clk_ops clk_ops_hfpll = {
+ .enable = clk_hfpll_enable,
+ .disable = clk_hfpll_disable,
+ .is_enabled = hfpll_is_enabled,
+ .round_rate = clk_hfpll_round_rate,
+ .set_rate = clk_hfpll_set_rate,
+ .recalc_rate = clk_hfpll_recalc_rate,
+ .init = clk_hfpll_init,
+};
+EXPORT_SYMBOL_GPL(clk_ops_hfpll);
diff --git a/drivers/clk/qcom/clk-hfpll.h b/drivers/clk/qcom/clk-hfpll.h
new file mode 100644
index 000000000000..2a57b2fb2f2f
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_CLK_HFPLL_H__
+#define __QCOM_CLK_HFPLL_H__
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include "clk-regmap.h"
+
+struct hfpll_data {
+ u32 mode_reg;
+ u32 l_reg;
+ u32 m_reg;
+ u32 n_reg;
+ u32 user_reg;
+ u32 droop_reg;
+ u32 config_reg;
+ u32 status_reg;
+ u8 lock_bit;
+
+ u32 droop_val;
+ u32 config_val;
+ u32 user_val;
+ u32 user_vco_mask;
+ unsigned long low_vco_max_rate;
+
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct clk_hfpll {
+ struct hfpll_data const *d;
+ int init_done;
+
+ struct clk_regmap clkr;
+ spinlock_t lock;
+};
+
+#define to_clk_hfpll(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_hfpll, clkr)
+
+extern const struct clk_ops clk_ops_hfpll;
+
+#endif
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
new file mode 100644
index 000000000000..59f1af415b58
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include <asm/krait-l2-accessors.h>
+
+#include "clk-krait.h"
+
+/* Secondary and primary muxes share the same cp15 register */
+static DEFINE_SPINLOCK(krait_clock_reg_lock);
+
+#define LPL_SHIFT 8
+static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
+{
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ regval = krait_get_l2_indirect_reg(mux->offset);
+ regval &= ~(mux->mask << mux->shift);
+ regval |= (sel & mux->mask) << mux->shift;
+ if (mux->lpl) {
+ regval &= ~(mux->mask << (mux->shift + LPL_SHIFT));
+ regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
+ }
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ /* Wait for switch to complete. */
+ mb();
+ udelay(1);
+}
+
+static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = clk_mux_index_to_val(mux->parent_map, 0, index);
+ mux->en_mask = sel;
+ /* Don't touch mux if CPU is off as it won't work */
+ if (__clk_is_enabled(hw->clk))
+ __krait_mux_set_sel(mux, sel);
+
+ mux->reparent = true;
+
+ return 0;
+}
+
+static u8 krait_mux_get_parent(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = krait_get_l2_indirect_reg(mux->offset);
+ sel >>= mux->shift;
+ sel &= mux->mask;
+ mux->en_mask = sel;
+
+ return clk_mux_val_to_index(hw, mux->parent_map, 0, sel);
+}
+
+const struct clk_ops krait_mux_clk_ops = {
+ .set_parent = krait_mux_set_parent,
+ .get_parent = krait_mux_get_parent,
+ .determine_rate = __clk_mux_determine_rate_closest,
+};
+EXPORT_SYMBOL_GPL(krait_mux_clk_ops);
+
+/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */
+static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2);
+ return DIV_ROUND_UP(*parent_rate, 2);
+}
+
+static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ unsigned long flags;
+ u32 val;
+ u32 mask = BIT(d->width) - 1;
+
+ if (d->lpl)
+ mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ val = krait_get_l2_indirect_reg(d->offset);
+ val &= ~mask;
+ krait_set_l2_indirect_reg(d->offset, val);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static unsigned long
+krait_div2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ u32 mask = BIT(d->width) - 1;
+ u32 div;
+
+ div = krait_get_l2_indirect_reg(d->offset);
+ div >>= d->shift;
+ div &= mask;
+ div = (div + 1) * 2;
+
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+const struct clk_ops krait_div2_clk_ops = {
+ .round_rate = krait_div2_round_rate,
+ .set_rate = krait_div2_set_rate,
+ .recalc_rate = krait_div2_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(krait_div2_clk_ops);
diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h
new file mode 100644
index 000000000000..9120bd2f5297
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_CLK_KRAIT_H
+#define __QCOM_CLK_KRAIT_H
+
+#include <linux/clk-provider.h>
+
+struct krait_mux_clk {
+ unsigned int *parent_map;
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 en_mask;
+ bool lpl;
+ u8 safe_sel;
+ u8 old_index;
+ bool reparent;
+
+ struct clk_hw hw;
+ struct notifier_block clk_nb;
+};
+
+#define to_krait_mux_clk(_hw) container_of(_hw, struct krait_mux_clk, hw)
+
+extern const struct clk_ops krait_mux_clk_ops;
+
+struct krait_div2_clk {
+ u32 offset;
+ u8 width;
+ u32 shift;
+ bool lpl;
+
+ struct clk_hw hw;
+};
+
+#define to_krait_div2_clk(_hw) container_of(_hw, struct krait_div2_clk, hw)
+
+extern const struct clk_ops krait_div2_clk_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index dbd5a9e83554..e5eca8a1abe4 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -163,4 +163,15 @@ extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
+struct clk_rcg_dfs_data {
+ struct clk_rcg2 *rcg;
+ struct clk_init_data *init;
+};
+
+#define DEFINE_RCG_DFS(r) \
+ { .rcg = &r##_src, .init = &r##_init }
+
+extern int qcom_cc_register_rcg_dfs(struct regmap *regmap,
+ const struct clk_rcg_dfs_data *rcgs,
+ size_t len);
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 52208d4165f4..6e3bd195d012 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/math64.h>
+#include <linux/slab.h>
#include <asm/div64.h>
@@ -40,6 +41,14 @@
#define N_REG 0xc
#define D_REG 0x10
+/* Dynamic Frequency Scaling */
+#define MAX_PERF_LEVEL 8
+#define SE_CMD_DFSR_OFFSET 0x14
+#define SE_CMD_DFS_EN BIT(0)
+#define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
+#define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
+#define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
+
enum freq_policy {
FLOOR,
CEIL,
@@ -929,3 +938,189 @@ const struct clk_ops clk_rcg2_shared_ops = {
.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
};
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+
+/* Common APIs to be used for DFS based RCGR */
+static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
+ struct freq_tbl *f)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct clk_hw *p;
+ unsigned long prate = 0;
+ u32 val, mask, cfg, mode;
+ int i, num_parents;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
+
+ mask = BIT(rcg->hid_width) - 1;
+ f->pre_div = 1;
+ if (cfg & mask)
+ f->pre_div = cfg & mask;
+
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ num_parents = clk_hw_get_num_parents(hw);
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f->src = rcg->parent_map[i].src;
+ p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
+ prate = clk_hw_get_rate(p);
+ }
+ }
+
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ if (mode) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
+ &val);
+ val &= mask;
+ f->m = val;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
+ &val);
+ val = ~val;
+ val &= mask;
+ val += f->m;
+ f->n = val;
+ }
+
+ f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
+}
+
+static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
+{
+ struct freq_tbl *freq_tbl;
+ int i;
+
+ /* Allocate space for 1 extra since table is NULL terminated */
+ freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
+ if (!freq_tbl)
+ return -ENOMEM;
+ rcg->freq_tbl = freq_tbl;
+
+ for (i = 0; i < MAX_PERF_LEVEL; i++)
+ clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
+
+ return 0;
+}
+
+static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int ret;
+
+ if (!rcg->freq_tbl) {
+ ret = clk_rcg2_dfs_populate_freq_table(rcg);
+ if (ret) {
+ pr_err("Failed to update DFS tables for %s\n",
+ clk_hw_get_name(hw));
+ return ret;
+ }
+ }
+
+ return clk_rcg2_determine_rate(hw, req);
+}
+
+static unsigned long
+clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
+
+ regmap_read(rcg->clkr.regmap,
+ rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
+ level &= GENMASK(4, 1);
+ level >>= 1;
+
+ if (rcg->freq_tbl)
+ return rcg->freq_tbl[level].freq;
+
+ /*
+ * Assume that parent_rate is actually the parent because
+ * we can't do any better at figuring it out when the table
+ * hasn't been populated yet. We only populate the table
+ * in determine_rate because we can't guarantee the parents
+ * will be registered with the framework until then.
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
+ &cfg);
+
+ mask = BIT(rcg->hid_width) - 1;
+ pre_div = 1;
+ if (cfg & mask)
+ pre_div = cfg & mask;
+
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ if (mode) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap,
+ rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
+ m &= mask;
+
+ regmap_read(rcg->clkr.regmap,
+ rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
+ n = ~n;
+ n &= mask;
+ n += m;
+ }
+
+ return calc_rate(parent_rate, m, n, mode, pre_div);
+}
+
+static const struct clk_ops clk_rcg2_dfs_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .determine_rate = clk_rcg2_dfs_determine_rate,
+ .recalc_rate = clk_rcg2_dfs_recalc_rate,
+};
+
+static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
+ struct regmap *regmap)
+{
+ struct clk_rcg2 *rcg = data->rcg;
+ struct clk_init_data *init = data->init;
+ u32 val;
+ int ret;
+
+ ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
+ if (ret)
+ return -EINVAL;
+
+ if (!(val & SE_CMD_DFS_EN))
+ return 0;
+
+ /*
+ * Rate changes with consumer writing a register in
+ * their own I/O region
+ */
+ init->flags |= CLK_GET_RATE_NOCACHE;
+ init->ops = &clk_rcg2_dfs_ops;
+
+ rcg->freq_tbl = NULL;
+
+ pr_debug("DFS registered for clk %s\n", init->name);
+
+ return 0;
+}
+
+int qcom_cc_register_rcg_dfs(struct regmap *regmap,
+ const struct clk_rcg_dfs_data *rcgs, size_t len)
+{
+ int i, ret;
+
+ for (i = 0; i < len; i++) {
+ ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
+ if (ret) {
+ const char *name = rcgs[i].init->name;
+
+ pr_err("DFS register failed for clk %s\n", name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 5f61225657ab..33d1bc5c6a46 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -30,6 +30,7 @@
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-branch.h"
+#include "clk-hfpll.h"
#include "reset.h"
static struct clk_pll pll0 = {
@@ -113,6 +114,84 @@ static struct clk_regmap pll8_vote = {
},
};
+static struct hfpll_data hfpll0_data = {
+ .mode_reg = 0x3200,
+ .l_reg = 0x3208,
+ .m_reg = 0x320c,
+ .n_reg = 0x3210,
+ .config_reg = 0x3204,
+ .status_reg = 0x321c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3214,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll0 = {
+ .d = &hfpll0_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll0",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock),
+};
+
+static struct hfpll_data hfpll1_data = {
+ .mode_reg = 0x3240,
+ .l_reg = 0x3248,
+ .m_reg = 0x324c,
+ .n_reg = 0x3250,
+ .config_reg = 0x3244,
+ .status_reg = 0x325c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll1 = {
+ .d = &hfpll1_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll1",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock),
+};
+
+static struct hfpll_data hfpll_l2_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll_l2 = {
+ .d = &hfpll_l2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll_l2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock),
+};
+
static struct clk_pll pll14 = {
.l_reg = 0x31c4,
.m_reg = 0x31c8,
@@ -2797,6 +2876,9 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
[UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
[NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
[NSSTCM_CLK] = &nss_tcm_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
};
static const struct qcom_reset_map gcc_ipq806x_resets[] = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index fd495e0471bb..399474755654 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -30,6 +30,7 @@
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-branch.h"
+#include "clk-hfpll.h"
#include "reset.h"
static struct clk_pll pll3 = {
@@ -86,6 +87,164 @@ static struct clk_regmap pll8_vote = {
},
};
+static struct hfpll_data hfpll0_data = {
+ .mode_reg = 0x3200,
+ .l_reg = 0x3208,
+ .m_reg = 0x320c,
+ .n_reg = 0x3210,
+ .config_reg = 0x3204,
+ .status_reg = 0x321c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3214,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll0 = {
+ .d = &hfpll0_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll0",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock),
+};
+
+static struct hfpll_data hfpll1_8064_data = {
+ .mode_reg = 0x3240,
+ .l_reg = 0x3248,
+ .m_reg = 0x324c,
+ .n_reg = 0x3250,
+ .config_reg = 0x3244,
+ .status_reg = 0x325c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3254,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct hfpll_data hfpll1_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll1 = {
+ .d = &hfpll1_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll1",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock),
+};
+
+static struct hfpll_data hfpll2_data = {
+ .mode_reg = 0x3280,
+ .l_reg = 0x3288,
+ .m_reg = 0x328c,
+ .n_reg = 0x3290,
+ .config_reg = 0x3284,
+ .status_reg = 0x329c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3294,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll2 = {
+ .d = &hfpll2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll2.lock),
+};
+
+static struct hfpll_data hfpll3_data = {
+ .mode_reg = 0x32c0,
+ .l_reg = 0x32c8,
+ .m_reg = 0x32cc,
+ .n_reg = 0x32d0,
+ .config_reg = 0x32c4,
+ .status_reg = 0x32dc,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x32d4,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll3 = {
+ .d = &hfpll3_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll3",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll3.lock),
+};
+
+static struct hfpll_data hfpll_l2_8064_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct hfpll_data hfpll_l2_data = {
+ .mode_reg = 0x3400,
+ .l_reg = 0x3408,
+ .m_reg = 0x340c,
+ .n_reg = 0x3410,
+ .config_reg = 0x3404,
+ .status_reg = 0x341c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3414,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll_l2 = {
+ .d = &hfpll_l2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll_l2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock),
+};
+
static struct clk_pll pll14 = {
.l_reg = 0x31c4,
.m_reg = 0x31c8,
@@ -3107,6 +3266,9 @@ static struct clk_regmap *gcc_msm8960_clks[] = {
[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
};
static const struct qcom_reset_map gcc_msm8960_resets[] = {
@@ -3318,6 +3480,11 @@ static struct clk_regmap *gcc_apq8064_clks[] = {
[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
+ [PLL16] = &hfpll2.clkr,
+ [PLL17] = &hfpll3.clkr,
};
static const struct qcom_reset_map gcc_apq8064_resets[] = {
@@ -3477,6 +3644,11 @@ static int gcc_msm8960_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (match->data == &gcc_apq8064_desc) {
+ hfpll1.d = &hfpll1_8064_data;
+ hfpll_l2.d = &hfpll_l2_8064_data;
+ }
+
tsens = platform_device_register_data(&pdev->dev, "qcom-tsens", -1,
NULL, 0);
if (IS_ERR(tsens))
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9a3290fdd01b..9d136172c27c 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -260,6 +260,36 @@ static struct clk_alpha_pll_postdiv gpll0 = {
},
};
+static struct clk_branch gcc_mmss_gpll0_div_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_gpll0_div_clk",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_div_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops
+ },
+ },
+};
+
static struct clk_alpha_pll gpll4_early = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
@@ -2951,6 +2981,20 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
},
};
+static struct clk_branch gcc_aggre1_pnoc_ahb_clk = {
+ .halt_reg = 0x82014,
+ .clkr = {
+ .enable_reg = 0x82014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre1_pnoc_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_aggre2_ufs_axi_clk = {
.halt_reg = 0x83014,
.clkr = {
@@ -2981,6 +3025,34 @@ static struct clk_branch gcc_aggre2_usb3_axi_clk = {
},
};
+static struct clk_branch gcc_dcc_ahb_clk = {
+ .halt_reg = 0x84004,
+ .clkr = {
+ .enable_reg = 0x84004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = {
+ .halt_reg = 0x85000,
+ .clkr = {
+ .enable_reg = 0x85000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_qspi_ahb_clk = {
.halt_reg = 0x8b004,
.clkr = {
@@ -3039,6 +3111,20 @@ static struct clk_branch gcc_hdmi_clkref_clk = {
},
};
+static struct clk_branch gcc_edp_clkref_clk = {
+ .halt_reg = 0x88004,
+ .clkr = {
+ .enable_reg = 0x88004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_edp_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_clkref_clk = {
.halt_reg = 0x88008,
.clkr = {
@@ -3095,6 +3181,62 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = {
},
};
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .halt_reg = 0x8a004,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mnoc_bimc_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a024,
+ .clkr = {
+ .enable_reg = 0x8a024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x8a028,
+ .clkr = {
+ .enable_reg = 0x8a028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_hw *gcc_msm8996_hws[] = {
&xo.hw,
&gpll0_early_div.hw,
@@ -3355,6 +3497,7 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GCC_AGGRE0_CNOC_AHB_CLK] = &gcc_aggre0_cnoc_ahb_clk.clkr,
[GCC_SMMU_AGGRE0_AXI_CLK] = &gcc_smmu_aggre0_axi_clk.clkr,
[GCC_SMMU_AGGRE0_AHB_CLK] = &gcc_smmu_aggre0_ahb_clk.clkr,
+ [GCC_AGGRE1_PNOC_AHB_CLK] = &gcc_aggre1_pnoc_ahb_clk.clkr,
[GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr,
[GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr,
[GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr,
@@ -3365,6 +3508,15 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GCC_PCIE_CLKREF_CLK] = &gcc_pcie_clkref_clk.clkr,
[GCC_RX2_USB2_CLKREF_CLK] = &gcc_rx2_usb2_clkref_clk.clkr,
[GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+ [GCC_EDP_CLKREF_CLK] = &gcc_edp_clkref_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr,
+ [GCC_DCC_AHB_CLK] = &gcc_dcc_ahb_clk.clkr,
+ [GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK] = &gcc_aggre0_noc_mpu_cfg_ahb_clk.clkr,
+ [GCC_MMSS_GPLL0_DIV_CLK] = &gcc_mmss_gpll0_div_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK] = &gcc_mss_gpll0_div_clk.clkr,
};
static struct gdsc *gcc_msm8996_gdscs[] = {
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
new file mode 100644
index 000000000000..e4ca6a45f313
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -0,0 +1,2744 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-qcs404.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "reset.h"
+
+enum {
+ P_CORE_BI_PLL_TEST_SE,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_AUX,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL1_OUT_MAIN,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL4_OUT_AUX,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_AUX,
+ P_HDMI_PHY_PLL_CLK,
+ P_PCIE_0_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_XO,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "cxo",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_ao_0[] = {
+ "cxo",
+ "gpll0_ao_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "cxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "cxo",
+ "gpll0_out_main",
+ "gpll6_out_aux",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_AUX, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "cxo",
+ "gpll0_out_main",
+ "gpll6_out_aux",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_GPLL1_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "cxo",
+ "gpll1_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "cxo",
+ "dsi0pll_byteclk_src",
+ "gpll0_out_aux",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_XO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_GPLL0_OUT_AUX, 3 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+ "cxo",
+ "dsi0_phy_pll_out_byteclk",
+ "gpll0_out_aux",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN, 2 },
+ { P_GPLL6_OUT_AUX, 3 },
+ { P_GPLL4_OUT_AUX, 4 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_7[] = {
+ "cxo",
+ "gpll0_out_main",
+ "gpll3_out_main",
+ "gpll6_out_aux",
+ "gpll4_out_aux",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_XO, 0 },
+ { P_HDMI_PHY_PLL_CLK, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_8[] = {
+ "cxo",
+ "hdmi_phy_pll_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 2 },
+ { P_GPLL6_OUT_AUX, 3 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_9[] = {
+ "cxo",
+ "gpll0_out_main",
+ "dsi0_phy_pll_out_dsiclk",
+ "gpll6_out_aux",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+ "cxo",
+ "sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_XO, 0 },
+ { P_PCIE_0_PIPE_CLK, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_11[] = {
+ "cxo",
+ "pcie_0_pipe_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_XO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_12[] = {
+ "cxo",
+ "dsi0pll_pclk_src",
+ "gpll0_out_aux",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 2 },
+ { P_GPLL6_OUT_AUX, 3 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_13[] = {
+ "cxo",
+ "gpll0_out_main",
+ "gpll4_out_main",
+ "gpll6_out_aux",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_AUX, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_14[] = {
+ "cxo",
+ "gpll0_out_main",
+ "gpll4_out_aux",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_15[] = {
+ "cxo",
+ "gpll0_out_aux",
+ "core_bi_pll_test_se",
+};
+
+static struct clk_fixed_factor cxo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "cxo",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll0_sleep_clk_src = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45008,
+ .enable_mask = BIT(23),
+ .enable_is_inverted = true,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_sleep_clk_src",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll0_out_main = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_FSM_MODE,
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main",
+ .parent_names = (const char *[])
+ { "gpll0_sleep_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll0_ao_out_main = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_FSM_MODE,
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_ao_out_main",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll1_out_main = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_main",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 930MHz configuration */
+static const struct alpha_pll_config gpll3_config = {
+ .l = 48,
+ .alpha = 0x0,
+ .alpha_en_mask = BIT(24),
+ .post_div_mask = 0xf << 8,
+ .post_div_val = 0x1 << 8,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+ .config_ctl_val = 0x4001055b,
+};
+
+static const struct pll_vco gpll3_vco[] = {
+ { 700000000, 1400000000, 0 },
+};
+
+static struct clk_alpha_pll gpll3_out_main = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .vco_table = gpll3_vco,
+ .num_vco = ARRAY_SIZE(gpll3_vco),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_main",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4_out_main = {
+ .offset = 0x24000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_main",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_pll gpll6 = {
+ .l_reg = 0x37004,
+ .m_reg = 0x37008,
+ .n_reg = 0x3700C,
+ .config_reg = 0x37014,
+ .mode_reg = 0x37000,
+ .status_reg = 0x3701C,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_names = (const char *[]){ "cxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll6_out_aux = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_aux",
+ .parent_names = (const char *[]){ "gpll6" },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_apss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_ahb_clk_src",
+ .parent_names = gcc_parent_names_ao_0,
+ .num_parents = 3,
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup0_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup0_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x602c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup0_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup0_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup0_spi_apps_clk_src = {
+ .cmd_rcgr = 0x6034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup0_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x200c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(10480000, P_GPLL0_OUT_MAIN, 1, 3, 229),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(20961000, P_GPLL0_OUT_MAIN, 1, 6, 229),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_MAIN, 1, 3, 160),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(30000000, P_GPLL0_OUT_MAIN, 1, 3, 80),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x3014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup2_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x4000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x5024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart0_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_MAIN, 1, 72, 15625),
+ F(7372800, P_GPLL0_OUT_MAIN, 1, 144, 15625),
+ F(14745600, P_GPLL0_OUT_MAIN, 1, 288, 15625),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 1, 3, 100),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 1, 25),
+ F(40000000, P_GPLL0_OUT_MAIN, 1, 1, 20),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 500),
+ F(48000000, P_GPLL0_OUT_MAIN, 1, 3, 50),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 8, 125),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 100),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1152, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 1, 3, 40),
+ F(64000000, P_GPLL0_OUT_MAIN, 1, 2, 25),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart0_apps_clk_src = {
+ .cmd_rcgr = 0x600c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart0_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x3034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x4014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup0_i2c_apps_clk_src = {
+ .cmd_rcgr = 0xc00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup0_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup0_spi_apps_clk_src = {
+ .cmd_rcgr = 0xc024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup0_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart0_apps_clk_src = {
+ .cmd_rcgr = 0xc044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_blsp1_uart0_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart0_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_names = gcc_parent_names_5,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_emac_clk_src[] = {
+ F(5000000, P_GPLL1_OUT_MAIN, 2, 1, 50),
+ F(50000000, P_GPLL1_OUT_MAIN, 10, 0, 0),
+ F(125000000, P_GPLL1_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 emac_clk_src = {
+ .cmd_rcgr = 0x4e01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_emac_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "emac_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_emac_ptp_clk_src[] = {
+ F(50000000, P_GPLL1_OUT_MAIN, 10, 0, 0),
+ F(125000000, P_GPLL1_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 emac_ptp_clk_src = {
+ .cmd_rcgr = 0x4e014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_emac_ptp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "emac_ptp_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_esc0_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(228571429, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(240000000, P_GPLL6_OUT_AUX, 4.5, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(270000000, P_GPLL6_OUT_AUX, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(484800000, P_GPLL3_OUT_MAIN, 1, 0, 0),
+ F(523200000, P_GPLL3_OUT_MAIN, 1, 0, 0),
+ F(550000000, P_GPLL3_OUT_MAIN, 1, 0, 0),
+ F(598000000, P_GPLL3_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_names = gcc_parent_names_7,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x8004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x9004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0xa004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 hdmi_app_clk_src = {
+ .cmd_rcgr = 0x4d0e4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hdmi_app_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 hdmi_pclk_clk_src = {
+ .cmd_rcgr = 0x4d0dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hdmi_pclk_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(145454545, P_GPLL0_OUT_MAIN, 5.5, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_names = gcc_parent_names_9,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pcie_0_aux_clk_src[] = {
+ F(1200000, P_XO, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x3e024,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcie_0_aux_clk_src",
+ .parent_names = gcc_parent_names_10,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pcie_0_pipe_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(125000000, P_PCIE_0_PIPE_CLK, 2, 0, 0),
+ F(250000000, P_PCIE_0_PIPE_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcie_0_pipe_clk_src = {
+ .cmd_rcgr = 0x3e01c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_pcie_0_pipe_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcie_0_pipe_clk_src",
+ .parent_names = gcc_parent_names_11,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_names = gcc_parent_names_12,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(192000000, P_GPLL4_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(384000000, P_GPLL4_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_parent_names_13,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x5d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_14,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_14,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x41048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb20_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0x39028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3901c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x3903c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb3_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x41010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_usb_hs_system_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hs_system_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_15,
+ .freq_tbl = ftbl_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_names = gcc_parent_names_15,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_apss_ahb_clk = {
+ .halt_reg = 0x4601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_ahb_clk",
+ .parent_names = (const char *[]){
+ "apss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x5b004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_tcu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x59034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ .parent_names = (const char *[]){
+ "gcc_apss_tcu_clk",
+ },
+
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x59030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_mdss_clk = {
+ .halt_reg = 0x31038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_mdss_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x1008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcc_clk = {
+ .halt_reg = 0x77004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x77004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcc_xo_clk = {
+ .halt_reg = 0x77008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x77008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup0_i2c_apps_clk = {
+ .halt_reg = 0x6028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup0_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup0_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup0_spi_apps_clk = {
+ .halt_reg = 0x6024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup0_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup0_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x3010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x4020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart0_apps_clk = {
+ .halt_reg = 0x6004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart0_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart0_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup0_i2c_apps_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup0_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup0_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup0_spi_apps_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup0_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup0_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart0_apps_clk = {
+ .halt_reg = 0xc03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart0_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart0_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_axi_clk = {
+ .halt_reg = 0x4e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_ptp_clk = {
+ .halt_reg = 0x4e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_ptp_clk",
+ .parent_names = (const char *[]){
+ "emac_ptp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_rgmii_clk = {
+ .halt_reg = 0x4e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_rgmii_clk",
+ .parent_names = (const char *[]){
+ "emac_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_slave_ahb_clk = {
+ .halt_reg = 0x4e00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_slave_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_geni_ir_s_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_geni_ir_s_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_geni_ir_h_clk = {
+ .halt_reg = 0xf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_geni_ir_h_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500C,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tcu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tbu_clk = {
+ .halt_reg = 0x12010,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500C,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x8000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0xa000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gtcu_ahb_clk = {
+ .halt_reg = 0x12044,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gtcu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte0_clk",
+ .parent_names = (const char *[]){
+ "byte0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc0_clk",
+ .parent_names = (const char *[]){
+ "esc0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_hdmi_app_clk = {
+ .halt_reg = 0x4d0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_hdmi_app_clk",
+ .parent_names = (const char *[]){
+ "hdmi_app_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_hdmi_pclk_clk = {
+ .halt_reg = 0x4d0d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_hdmi_pclk_clk",
+ .parent_names = (const char *[]){
+ "hdmi_pclk_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4d088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_mdp_clk",
+ .parent_names = (const char *[]){
+ "mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_names = (const char *[]){
+ "pclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_vsync_clk",
+ .parent_names = (const char *[]){
+ "vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_names = (const char *[]){
+ "gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x3e014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]){
+ "pcie_0_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x3e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x3e018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x3e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_names = (const char *[]){
+ "pcie_0_pipe_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x3e010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcnoc_usb2_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcnoc_usb2_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcnoc_usb3_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcnoc_usb3_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* PWM clks do not have XO as parent as src clk is a balance root */
+static struct clk_branch gcc_pwm0_xo512_clk = {
+ .halt_reg = 0x44018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pwm0_xo512_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pwm1_xo512_clk = {
+ .halt_reg = 0x49004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pwm1_xo512_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pwm2_xo512_clk = {
+ .halt_reg = 0x4a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pwm2_xo512_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x29084,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qdss_dap_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x5d014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x3600C,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_cfg_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_inactivity_timers_clk = {
+ .halt_reg = 0x4100C,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4100C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0x41044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb20_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x4102c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x3900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x39014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0x39010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x39044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "usb3_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_phy_cfg_ahb_clk = {
+ .halt_reg = 0x41030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_phy_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_system_clk",
+ .parent_names = (const char *[]){
+ "usb_hs_system_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gcc_qcs404_hws[] = {
+ &cxo.hw,
+};
+
+static struct clk_regmap *gcc_qcs404_clocks[] = {
+ [GCC_APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [GCC_BLSP1_QUP0_I2C_APPS_CLK_SRC] = &blsp1_qup0_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP0_SPI_APPS_CLK_SRC] = &blsp1_qup0_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_UART0_APPS_CLK_SRC] = &blsp1_uart0_apps_clk_src.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [GCC_BLSP2_QUP0_I2C_APPS_CLK_SRC] = &blsp2_qup0_i2c_apps_clk_src.clkr,
+ [GCC_BLSP2_QUP0_SPI_APPS_CLK_SRC] = &blsp2_qup0_spi_apps_clk_src.clkr,
+ [GCC_BLSP2_UART0_APPS_CLK_SRC] = &blsp2_uart0_apps_clk_src.clkr,
+ [GCC_BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [GCC_EMAC_CLK_SRC] = &emac_clk_src.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &emac_ptp_clk_src.clkr,
+ [GCC_ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_MDSS_CLK] = &gcc_bimc_mdss_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP0_I2C_APPS_CLK] = &gcc_blsp1_qup0_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP0_SPI_APPS_CLK] = &gcc_blsp1_qup0_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART0_APPS_CLK] = &gcc_blsp1_uart0_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP0_I2C_APPS_CLK] = &gcc_blsp2_qup0_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP0_SPI_APPS_CLK] = &gcc_blsp2_qup0_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART0_APPS_CLK] = &gcc_blsp2_uart0_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_ETH_AXI_CLK] = &gcc_eth_axi_clk.clkr,
+ [GCC_ETH_PTP_CLK] = &gcc_eth_ptp_clk.clkr,
+ [GCC_ETH_RGMII_CLK] = &gcc_eth_rgmii_clk.clkr,
+ [GCC_ETH_SLAVE_AHB_CLK] = &gcc_eth_slave_ahb_clk.clkr,
+ [GCC_GENI_IR_S_CLK] = &gcc_geni_ir_s_clk.clkr,
+ [GCC_GENI_IR_H_CLK] = &gcc_geni_ir_h_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_HDMI_APP_CLK] = &gcc_mdss_hdmi_app_clk.clkr,
+ [GCC_MDSS_HDMI_PCLK_CLK] = &gcc_mdss_hdmi_pclk_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCNOC_USB2_CLK] = &gcc_pcnoc_usb2_clk.clkr,
+ [GCC_PCNOC_USB3_CLK] = &gcc_pcnoc_usb3_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_PWM0_XO512_CLK] = &gcc_pwm0_xo512_clk.clkr,
+ [GCC_PWM1_XO512_CLK] = &gcc_pwm1_xo512_clk.clkr,
+ [GCC_PWM2_XO512_CLK] = &gcc_pwm2_xo512_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SYS_NOC_USB3_CLK] = &gcc_sys_noc_usb3_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GCC_GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GCC_GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GCC_GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [GCC_GPLL0_OUT_MAIN] = &gpll0_out_main.clkr,
+ [GCC_GPLL0_AO_OUT_MAIN] = &gpll0_ao_out_main.clkr,
+ [GCC_GPLL0_SLEEP_CLK_SRC] = &gpll0_sleep_clk_src.clkr,
+ [GCC_GPLL1_OUT_MAIN] = &gpll1_out_main.clkr,
+ [GCC_GPLL3_OUT_MAIN] = &gpll3_out_main.clkr,
+ [GCC_GPLL4_OUT_MAIN] = &gpll4_out_main.clkr,
+ [GCC_GPLL6] = &gpll6.clkr,
+ [GCC_GPLL6_OUT_AUX] = &gpll6_out_aux,
+ [GCC_HDMI_APP_CLK_SRC] = &hdmi_app_clk_src.clkr,
+ [GCC_HDMI_PCLK_CLK_SRC] = &hdmi_pclk_clk_src.clkr,
+ [GCC_MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &pcie_0_pipe_clk_src.clkr,
+ [GCC_PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [GCC_PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK_SRC] = &usb20_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [GCC_USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [GCC_USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [GCC_VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_USB_HS_INACTIVITY_TIMERS_CLK] =
+ &gcc_usb_hs_inactivity_timers_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr,
+ [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [GCC_DCC_XO_CLK] = &gcc_dcc_xo_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_qcs404_resets[] = {
+ [GCC_GENI_IR_BCR] = { 0x0F000 },
+ [GCC_USB_HS_BCR] = { 0x41000 },
+ [GCC_USB2_HS_PHY_ONLY_BCR] = { 0x41034 },
+ [GCC_QUSB2_PHY_BCR] = { 0x4103c },
+ [GCC_USB_HS_PHY_CFG_AHB_BCR] = { 0x0000c, 1 },
+ [GCC_USB2A_PHY_BCR] = { 0x0000c, 0 },
+ [GCC_USB3_PHY_BCR] = { 0x39004 },
+ [GCC_USB_30_BCR] = { 0x39000 },
+ [GCC_USB3PHY_PHY_BCR] = { 0x39008 },
+ [GCC_PCIE_0_BCR] = { 0x3e000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x3e004 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x3e038 },
+ [GCC_PCIEPHY_0_PHY_BCR] = { 0x3e03c },
+ [GCC_EMAC_BCR] = { 0x4e000 },
+};
+
+static const struct regmap_config gcc_qcs404_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7f000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qcs404_desc = {
+ .config = &gcc_qcs404_regmap_config,
+ .clks = gcc_qcs404_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qcs404_clocks),
+ .resets = gcc_qcs404_resets,
+ .num_resets = ARRAY_SIZE(gcc_qcs404_resets),
+};
+
+static const struct of_device_id gcc_qcs404_match_table[] = {
+ { .compatible = "qcom,gcc-qcs404" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qcs404_match_table);
+
+static int gcc_qcs404_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret, i;
+
+ regmap = qcom_cc_map(pdev, &gcc_qcs404_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config);
+
+ for (i = 0; i < ARRAY_SIZE(gcc_qcs404_hws); i++) {
+ ret = devm_clk_hw_register(&pdev->dev, gcc_qcs404_hws[i]);
+ if (ret)
+ return ret;
+ }
+
+ return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap);
+}
+
+static struct platform_driver gcc_qcs404_driver = {
+ .probe = gcc_qcs404_probe,
+ .driver = {
+ .name = "gcc-qcs404",
+ .of_match_table = gcc_qcs404_match_table,
+ },
+};
+
+static int __init gcc_qcs404_init(void)
+{
+ return platform_driver_register(&gcc_qcs404_driver);
+}
+subsys_initcall(gcc_qcs404_init);
+
+static void __exit gcc_qcs404_exit(void)
+{
+ platform_driver_unregister(&gcc_qcs404_driver);
+}
+module_exit(gcc_qcs404_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC QCS404 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
new file mode 100644
index 000000000000..ba239ea4c842
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -0,0 +1,2480 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, Craig Tatlor.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdm660.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+ P_XO,
+ P_SLEEP_CLK,
+ P_GPLL0,
+ P_GPLL1,
+ P_GPLL4,
+ P_GPLL0_EARLY_DIV,
+ P_GPLL1_EARLY_DIV,
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_EARLY_DIV, 6 },
+};
+
+static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "gpll0_early_div",
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const char * const gcc_parent_names_xo_gpll0[] = {
+ "xo",
+ "gpll0",
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_EARLY_DIV, 6 },
+};
+
+static const char * const gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "sleep_clk",
+ "gpll0_early_div",
+};
+
+static const struct parent_map gcc_parent_map_xo_sleep_clk[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const char * const gcc_parent_names_xo_sleep_clk[] = {
+ "xo",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll4[] = {
+ { P_XO, 0 },
+ { P_GPLL4, 5 },
+};
+
+static const char * const gcc_parent_names_xo_gpll4[] = {
+ "xo",
+ "gpll4",
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_EARLY_DIV, 3 },
+ { P_GPLL1, 4 },
+ { P_GPLL4, 5 },
+ { P_GPLL1_EARLY_DIV, 6 },
+};
+
+static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = {
+ "xo",
+ "gpll0",
+ "gpll0_early_div",
+ "gpll1",
+ "gpll4",
+ "gpll1_early_div",
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 },
+ { P_GPLL0_EARLY_DIV, 6 },
+};
+
+static const char * const gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "gpll4",
+ "gpll0_early_div",
+};
+
+static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_EARLY_DIV, 2 },
+ { P_GPLL4, 5 },
+};
+
+static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4[] = {
+ "xo",
+ "gpll0",
+ "gpll0_early_div",
+ "gpll4",
+};
+
+static struct clk_fixed_factor xo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "xo",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll0_early = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_early_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_early_div",
+ .parent_names = (const char *[]){ "gpll0_early" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x00000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "gpll0_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1_early = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll1_early_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_early_div",
+ .parent_names = (const char *[]){ "gpll1_early" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_names = (const char *[]){ "gpll1_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4_early = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x77000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data)
+ {
+ .name = "gpll4",
+ .parent_names = (const char *[]) { "gpll4_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x19020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1900c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1b020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1b00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1d020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1d00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1f020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1f00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0, 1, 96, 15625),
+ F(7372800, P_GPLL0, 1, 192, 15625),
+ F(14745600, P_GPLL0, 1, 384, 15625),
+ F(16000000, P_GPLL0, 5, 2, 15),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ F(32000000, P_GPLL0, 1, 4, 75),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(46400000, P_GPLL0, 1, 29, 375),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(51200000, P_GPLL0, 1, 32, 375),
+ F(56000000, P_GPLL0, 1, 7, 75),
+ F(58982400, P_GPLL0, 1, 1536, 15625),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(63157895, P_GPLL0, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1a00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1c00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x26020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x28020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2800c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2a020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2a00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2c020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2c00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2700c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x2900c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(600000000, P_GPLL0, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_gpll0_clk_src = {
+ .cmd_rcgr = 0x4805c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_hmss_gpll0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_gpll0_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_gpll4_clk_src[] = {
+ F(384000000, P_GPLL4, 4, 0, 0),
+ F(768000000, P_GPLL4, 2, 0, 0),
+ F(1536000000, P_GPLL4, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_gpll4_clk_src = {
+ .cmd_rcgr = 0x48074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll4,
+ .freq_tbl = ftbl_hmss_gpll4_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_gpll4_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll4,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_hmss_rbcpr_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hmss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x48044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_qspi_ser_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(80200000, P_GPLL1_EARLY_DIV, 5, 0, 0),
+ F(160400000, P_GPLL1, 5, 0, 0),
+ F(267333333, P_GPLL1, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 qspi_ser_clk_src = {
+ .cmd_rcgr = 0x4d00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
+ .freq_tbl = ftbl_qspi_ser_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qspi_ser_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_EARLY_DIV, 5, 1, 3),
+ F(25000000, P_GPLL0_EARLY_DIV, 6, 1, 2),
+ F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(192000000, P_GPLL4, 8, 0, 0),
+ F(384000000, P_GPLL4, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x1602c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_EARLY_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x16010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_EARLY_DIV, 5, 1, 3),
+ F(25000000, P_GPLL0_EARLY_DIV, 6, 1, 2),
+ F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(192000000, P_GPLL4, 8, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ufs_axi_clk_src[] = {
+ F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_axi_clk_src = {
+ .cmd_rcgr = 0x75018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_ufs_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_axi_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ufs_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_EARLY_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_ice_core_clk_src = {
+ .cmd_rcgr = 0x76010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_ufs_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_ice_core_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 ufs_phy_aux_clk_src = {
+ .cmd_rcgr = 0x76044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_sleep_clk,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_xo_sleep_clk,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ufs_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_EARLY_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_unipro_core_clk_src = {
+ .cmd_rcgr = 0x76028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_ufs_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb20_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(120000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb20_master_clk_src = {
+ .cmd_rcgr = 0x2f010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_usb20_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb20_master_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb20_mock_utmi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x2f024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_usb20_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb20_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(66666667, P_GPLL0_EARLY_DIV, 4.5, 0, 0),
+ F(120000000, P_GPLL0, 5, 0, 0),
+ F(133333333, P_GPLL0, 4.5, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0xf014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0_EARLY_DIV, 7.5, 0, 0),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+ F(1200000, P_XO, 16, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x5000c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo_sleep_clk,
+ .freq_tbl = ftbl_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb3_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_xo_sleep_clk,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre2_ufs_axi_clk = {
+ .halt_reg = 0x75034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre2_ufs_axi_clk",
+ .parent_names = (const char *[]){
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre2_usb3_axi_clk = {
+ .halt_reg = 0xf03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre2_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x7106c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7106c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_hmss_axi_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_hmss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_mss_q6_axi_clk = {
+ .halt_reg = 0x4401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_mss_q6_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x1b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x1b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x1a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x25004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x2a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x2a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x2c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x2c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x27004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x29004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp2_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_axi_clk = {
+ .halt_reg = 0x5058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb2_axi_clk",
+ .parent_names = (const char *[]){
+ "usb20_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
+ .halt_reg = 0x5018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_axi_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcc_ahb_clk = {
+ .halt_reg = 0x84004,
+ .clkr = {
+ .enable_reg = 0x84004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_bimc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk = {
+ .halt_reg = 0x5200c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk = {
+ .halt_reg = 0x5200c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk",
+ .parent_names = (const char *[]){
+ "gpll0_early_div",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_dvm_bus_clk = {
+ .halt_reg = 0x4808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_dvm_bus_clk",
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "hmss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_gpll0_clk = {
+ .halt_reg = 0x5200c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_gpll0_clk",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_gpll0_div_clk = {
+ .halt_reg = 0x5200c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_gpll0_div_clk",
+ .parent_names = (const char *[]){
+ "gpll0_early_div",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_sys_noc_axi_clk = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_sys_noc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .halt_reg = 0x8a004,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mnoc_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x8a040,
+ .clkr = {
+ .enable_reg = 0x8a040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a03c,
+ .clkr = {
+ .enable_reg = 0x8a03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_ahb_clk = {
+ .halt_reg = 0x4d004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_ser_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_ser_clk",
+ .parent_names = (const char *[]){
+ "qspi_ser_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx0_usb2_clkref_clk = {
+ .halt_reg = 0x88018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x88018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx0_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_clk = {
+ .halt_reg = 0x88014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x88014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx1_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x1600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ahb_clk = {
+ .halt_reg = 0x7500c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_axi_clk = {
+ .halt_reg = 0x75008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_axi_clk",
+ .parent_names = (const char *[]){
+ "ufs_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_clkref_clk = {
+ .halt_reg = 0x88008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x88008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ice_core_clk = {
+ .halt_reg = 0x7600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ice_core_clk",
+ .parent_names = (const char *[]){
+ "ufs_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_aux_clk = {
+ .halt_reg = 0x76040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "ufs_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
+ .halt_reg = 0x75014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
+ .halt_reg = 0x7605c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7605c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_unipro_core_clk = {
+ .halt_reg = 0x76008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "ufs_unipro_core_clk_src",
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0x2f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_master_clk",
+ .parent_names = (const char *[]){
+ "usb20_master_clk_src"
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0x2f00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb20_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0x2f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){
+ "usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0xf00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_clkref_clk = {
+ .halt_reg = 0x8800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x50000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "usb3_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc ufs_gdsc = {
+ .gdscr = 0x75004,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "ufs_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc usb_30_gdsc = {
+ .gdscr = 0xf004,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "usb_30_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .gds_hw_ctrl = 0x0,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_hw *gcc_sdm660_hws[] = {
+ &xo.hw,
+ &gpll0_early_div.hw,
+ &gpll1_early_div.hw,
+};
+
+static struct clk_regmap *gcc_sdm660_clocks[] = {
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr,
+ [GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_HMSS_AXI_CLK] = &gcc_bimc_hmss_axi_clk.clkr,
+ [GCC_BIMC_MSS_Q6_AXI_CLK] = &gcc_bimc_mss_q6_axi_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB2_AXI_CLK] = &gcc_cfg_noc_usb2_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_AXI_CLK] = &gcc_cfg_noc_usb3_axi_clk.clkr,
+ [GCC_DCC_AHB_CLK] = &gcc_dcc_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GPU_BIMC_GFX_CLK] = &gcc_gpu_bimc_gfx_clk.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr,
+ [GCC_HMSS_DVM_BUS_CLK] = &gcc_hmss_dvm_bus_clk.clkr,
+ [GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
+ [GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr,
+ [GCC_MMSS_GPLL0_DIV_CLK] = &gcc_mmss_gpll0_div_clk.clkr,
+ [GCC_MMSS_NOC_CFG_AHB_CLK] = &gcc_mmss_noc_cfg_ahb_clk.clkr,
+ [GCC_MMSS_SYS_NOC_AXI_CLK] = &gcc_mmss_sys_noc_axi_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr,
+ [GCC_QSPI_SER_CLK] = &gcc_qspi_ser_clk.clkr,
+ [GCC_RX0_USB2_CLKREF_CLK] = &gcc_rx0_usb2_clkref_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr,
+ [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
+ [GCC_UFS_CLKREF_CLK] = &gcc_ufs_clkref_clk.clkr,
+ [GCC_UFS_ICE_CORE_CLK] = &gcc_ufs_ice_core_clk.clkr,
+ [GCC_UFS_PHY_AUX_CLK] = &gcc_ufs_phy_aux_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr,
+ [GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr,
+ [GCC_UFS_UNIPRO_CORE_CLK] = &gcc_ufs_unipro_core_clk.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB3_CLKREF_CLK] = &gcc_usb3_clkref_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_EARLY] = &gpll1_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
+ [HMSS_GPLL4_CLK_SRC] = &hmss_gpll4_clk_src.clkr,
+ [HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [QSPI_SER_CLK_SRC] = &qspi_ser_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+ [UFS_ICE_CORE_CLK_SRC] = &ufs_ice_core_clk_src.clkr,
+ [UFS_PHY_AUX_CLK_SRC] = &ufs_phy_aux_clk_src.clkr,
+ [UFS_UNIPRO_CORE_CLK_SRC] = &ufs_unipro_core_clk_src.clkr,
+ [USB20_MASTER_CLK_SRC] = &usb20_master_clk_src.clkr,
+ [USB20_MOCK_UTMI_CLK_SRC] = &usb20_mock_utmi_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+};
+
+static struct gdsc *gcc_sdm660_gdscs[] = {
+ [UFS_GDSC] = &ufs_gdsc,
+ [USB_30_GDSC] = &usb_30_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sdm660_resets[] = {
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_UFS_BCR] = { 0x75000 },
+ [GCC_USB3_DP_PHY_BCR] = { 0x50028 },
+ [GCC_USB3_PHY_BCR] = { 0x50020 },
+ [GCC_USB3PHY_PHY_BCR] = { 0x50024 },
+ [GCC_USB_20_BCR] = { 0x2f000 },
+ [GCC_USB_30_BCR] = { 0xf000 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+};
+
+static const struct regmap_config gcc_sdm660_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x94000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdm660_desc = {
+ .config = &gcc_sdm660_regmap_config,
+ .clks = gcc_sdm660_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdm660_clocks),
+ .resets = gcc_sdm660_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdm660_resets),
+ .gdscs = gcc_sdm660_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdm660_gdscs),
+};
+
+static const struct of_device_id gcc_sdm660_match_table[] = {
+ { .compatible = "qcom,gcc-sdm630" },
+ { .compatible = "qcom,gcc-sdm660" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdm660_match_table);
+
+static int gcc_sdm660_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sdm660_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
+ * turned off by hardware during certain apps low power modes.
+ */
+ ret = regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
+ if (ret)
+ return ret;
+
+ /* Register the hws */
+ for (i = 0; i < ARRAY_SIZE(gcc_sdm660_hws); i++) {
+ ret = devm_clk_hw_register(&pdev->dev, gcc_sdm660_hws[i]);
+ if (ret)
+ return ret;
+ }
+
+ return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap);
+}
+
+static struct platform_driver gcc_sdm660_driver = {
+ .probe = gcc_sdm660_probe,
+ .driver = {
+ .name = "gcc-sdm660",
+ .of_match_table = gcc_sdm660_match_table,
+ },
+};
+
+static int __init gcc_sdm660_init(void)
+{
+ return platform_driver_register(&gcc_sdm660_driver);
+}
+core_initcall_sync(gcc_sdm660_init);
+
+static void __exit gcc_sdm660_exit(void)
+{
+ platform_driver_unregister(&gcc_sdm660_driver);
+}
+module_exit(gcc_sdm660_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM GCC sdm660 Driver");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index fa1a196350f1..f133b7f5652f 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -99,22 +99,6 @@ static const char * const gcc_parent_names_4[] = {
"core_bi_pll_test_se",
};
-static const struct parent_map gcc_parent_map_5[] = {
- { P_BI_TCXO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL4_OUT_MAIN, 5 },
- { P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_5[] = {
- "bi_tcxo",
- "gpll0",
- "gpll4",
- "gpll0_out_even",
- "core_bi_pll_test_se",
-};
-
static const struct parent_map gcc_parent_map_6[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@@ -356,6 +340,28 @@ static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qspi_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
F(9600000, P_BI_TCXO, 2, 0, 0),
F(19200000, P_BI_TCXO, 1, 0, 0),
@@ -396,18 +402,27 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
{ }
};
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+};
+
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.cmd_rcgr = 0x17034,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s0_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -416,12 +431,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s1_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -430,12 +447,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s2_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -444,12 +463,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s3_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -458,12 +479,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s4_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -472,12 +495,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s5_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -486,12 +511,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s6_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -500,12 +527,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s7_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -514,12 +543,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s0_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -528,12 +559,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s1_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -542,12 +575,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s2_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -556,12 +591,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s3_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -570,12 +607,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s4_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -584,12 +623,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s5_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -598,12 +639,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s6_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@@ -612,12 +655,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s7_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_init,
};
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
@@ -1933,6 +1971,37 @@ static struct clk_branch gcc_qmip_video_ahb_clk = {
},
};
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_qspi_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
.halt_reg = 0x17030,
.halt_check = BRANCH_HALT_VOTED,
@@ -3381,6 +3450,9 @@ static struct clk_regmap *gcc_sdm845_clocks[] = {
[GPLL4] = &gpll4.clkr,
[GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
[GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
};
static const struct qcom_reset_map gcc_sdm845_resets[] = {
@@ -3458,9 +3530,29 @@ static const struct of_device_id gcc_sdm845_match_table[] = {
};
MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk),
+};
+
static int gcc_sdm845_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
if (IS_ERR(regmap))
@@ -3470,6 +3562,11 @@ static int gcc_sdm845_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
}
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
new file mode 100644
index 000000000000..a6de7101430c
--- /dev/null
+++ b/drivers/clk/qcom/hfpll.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+static const struct hfpll_data hdata = {
+ .mode_reg = 0x00,
+ .l_reg = 0x04,
+ .m_reg = 0x08,
+ .n_reg = 0x0c,
+ .user_reg = 0x10,
+ .config_reg = 0x14,
+ .config_val = 0x430405d,
+ .status_reg = 0x1c,
+ .lock_bit = 16,
+
+ .user_val = 0x8,
+ .user_vco_mask = 0x100000,
+ .low_vco_max_rate = 1248000000,
+ .min_rate = 537600000UL,
+ .max_rate = 2900000000UL,
+};
+
+static const struct of_device_id qcom_hfpll_match_table[] = {
+ { .compatible = "qcom,hfpll" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table);
+
+static const struct regmap_config hfpll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30,
+ .fast_io = true,
+};
+
+static int qcom_hfpll_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct regmap *regmap;
+ struct clk_hfpll *h;
+ struct clk_init_data init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_ops_hfpll,
+ };
+
+ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &hfpll_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ if (of_property_read_string_index(dev->of_node, "clock-output-names",
+ 0, &init.name))
+ return -ENODEV;
+
+ h->d = &hdata;
+ h->clkr.hw.init = &init;
+ spin_lock_init(&h->lock);
+
+ return devm_clk_register_regmap(&pdev->dev, &h->clkr);
+}
+
+static struct platform_driver qcom_hfpll_driver = {
+ .probe = qcom_hfpll_probe,
+ .driver = {
+ .name = "qcom-hfpll",
+ .of_match_table = qcom_hfpll_match_table,
+ },
+};
+module_platform_driver(qcom_hfpll_driver);
+
+MODULE_DESCRIPTION("QCOM HFPLL Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-hfpll");
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
new file mode 100644
index 000000000000..8590b5edd19d
--- /dev/null
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+static const char *aux_parents[] = {
+ "pll8_vote",
+ "pxo",
+};
+
+static unsigned int aux_parent_map[] = {
+ 3,
+ 0,
+};
+
+static const struct of_device_id kpss_xcc_match_table[] = {
+ { .compatible = "qcom,kpss-acc-v1", .data = (void *)1UL },
+ { .compatible = "qcom,kpss-gcc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, kpss_xcc_match_table);
+
+static int kpss_xcc_driver_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct clk *clk;
+ struct resource *res;
+ void __iomem *base;
+ const char *name;
+
+ id = of_match_device(kpss_xcc_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (id->data) {
+ if (of_property_read_string_index(pdev->dev.of_node,
+ "clock-output-names",
+ 0, &name))
+ return -ENODEV;
+ base += 0x14;
+ } else {
+ name = "acpu_l2_aux";
+ base += 0x28;
+ }
+
+ clk = clk_register_mux_table(&pdev->dev, name, aux_parents,
+ ARRAY_SIZE(aux_parents), 0, base, 0, 0x3,
+ 0, aux_parent_map, NULL);
+
+ platform_set_drvdata(pdev, clk);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int kpss_xcc_driver_remove(struct platform_device *pdev)
+{
+ clk_unregister_mux(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver kpss_xcc_driver = {
+ .probe = kpss_xcc_driver_probe,
+ .remove = kpss_xcc_driver_remove,
+ .driver = {
+ .name = "kpss-xcc",
+ .of_match_table = kpss_xcc_match_table,
+ },
+};
+module_platform_driver(kpss_xcc_driver);
+
+MODULE_DESCRIPTION("Krait Processor Sub System (KPSS) Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kpss-xcc");
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
new file mode 100644
index 000000000000..4d4b657d33c3
--- /dev/null
+++ b/drivers/clk/qcom/krait-cc.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+
+#include "clk-krait.h"
+
+static unsigned int sec_mux_map[] = {
+ 2,
+ 0,
+};
+
+static unsigned int pri_mux_map[] = {
+ 1,
+ 2,
+ 0,
+};
+
+/*
+ * Notifier function for switching the muxes to safe parent
+ * while the hfpll is getting reprogrammed.
+ */
+static int krait_notifier_cb(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ int ret = 0;
+ struct krait_mux_clk *mux = container_of(nb, struct krait_mux_clk,
+ clk_nb);
+ /* Switch to safe parent */
+ if (event == PRE_RATE_CHANGE) {
+ mux->old_index = krait_mux_clk_ops.get_parent(&mux->hw);
+ ret = krait_mux_clk_ops.set_parent(&mux->hw, mux->safe_sel);
+ mux->reparent = false;
+ /*
+ * By the time POST_RATE_CHANGE notifier is called,
+ * clk framework itself would have changed the parent for the new rate.
+ * Only otherwise, put back to the old parent.
+ */
+ } else if (event == POST_RATE_CHANGE) {
+ if (!mux->reparent)
+ ret = krait_mux_clk_ops.set_parent(&mux->hw,
+ mux->old_index);
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int krait_notifier_register(struct device *dev, struct clk *clk,
+ struct krait_mux_clk *mux)
+{
+ int ret = 0;
+
+ mux->clk_nb.notifier_call = krait_notifier_cb;
+ ret = clk_notifier_register(clk, &mux->clk_nb);
+ if (ret)
+ dev_err(dev, "failed to register clock notifier: %d\n", ret);
+
+ return ret;
+}
+
+static int
+krait_add_div(struct device *dev, int id, const char *s, unsigned int offset)
+{
+ struct krait_div2_clk *div;
+ struct clk_init_data init = {
+ .num_parents = 1,
+ .ops = &krait_div2_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ const char *p_names[1];
+ struct clk *clk;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return -ENOMEM;
+
+ div->width = 2;
+ div->shift = 6;
+ div->lpl = id >= 0;
+ div->offset = offset;
+ div->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ init.parent_names = p_names;
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ kfree(init.name);
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_register(dev, &div->hw);
+ kfree(p_names[0]);
+ kfree(init.name);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int
+krait_add_sec_mux(struct device *dev, int id, const char *s,
+ unsigned int offset, bool unique_aux)
+{
+ int ret;
+ struct krait_mux_clk *mux;
+ static const char *sec_mux_list[] = {
+ "acpu_aux",
+ "qsb",
+ };
+ struct clk_init_data init = {
+ .parent_names = sec_mux_list,
+ .num_parents = ARRAY_SIZE(sec_mux_list),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->mask = 0x3;
+ mux->shift = 2;
+ mux->parent_map = sec_mux_map;
+ mux->hw.init = &init;
+ mux->safe_sel = 0;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ if (unique_aux) {
+ sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s);
+ if (!sec_mux_list[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_aux;
+ }
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ ret = krait_notifier_register(dev, clk, mux);
+ if (ret)
+ goto unique_aux;
+
+unique_aux:
+ if (unique_aux)
+ kfree(sec_mux_list[0]);
+err_aux:
+ kfree(init.name);
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static struct clk *
+krait_add_pri_mux(struct device *dev, int id, const char *s,
+ unsigned int offset)
+{
+ int ret;
+ struct krait_mux_clk *mux;
+ const char *p_names[3];
+ struct clk_init_data init = {
+ .parent_names = p_names,
+ .num_parents = ARRAY_SIZE(p_names),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->mask = 0x3;
+ mux->shift = 0;
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->parent_map = pri_mux_map;
+ mux->hw.init = &init;
+ mux->safe_sel = 2;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
+
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p0;
+ }
+
+ p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!p_names[1]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p1;
+ }
+
+ p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!p_names[2]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p2;
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ ret = krait_notifier_register(dev, clk, mux);
+ if (ret)
+ goto err_p3;
+err_p3:
+ kfree(p_names[2]);
+err_p2:
+ kfree(p_names[1]);
+err_p1:
+ kfree(p_names[0]);
+err_p0:
+ kfree(init.name);
+ return clk;
+}
+
+/* id < 0 for L2, otherwise id == physical CPU number */
+static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux)
+{
+ int ret;
+ unsigned int offset;
+ void *p = NULL;
+ const char *s;
+ struct clk *clk;
+
+ if (id >= 0) {
+ offset = 0x4501 + (0x1000 * id);
+ s = p = kasprintf(GFP_KERNEL, "%d", id);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ offset = 0x500;
+ s = "_l2";
+ }
+
+ ret = krait_add_div(dev, id, s, offset);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ ret = krait_add_sec_mux(dev, id, s, offset, unique_aux);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ clk = krait_add_pri_mux(dev, id, s, offset);
+err:
+ kfree(p);
+ return clk;
+}
+
+static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data)
+{
+ unsigned int idx = clkspec->args[0];
+ struct clk **clks = data;
+
+ if (idx >= 5) {
+ pr_err("%s: invalid clock index %d\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clks[idx] ? : ERR_PTR(-ENODEV);
+}
+
+static const struct of_device_id krait_cc_match_table[] = {
+ { .compatible = "qcom,krait-cc-v1", (void *)1UL },
+ { .compatible = "qcom,krait-cc-v2" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, krait_cc_match_table);
+
+static int krait_cc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ unsigned long cur_rate, aux_rate;
+ int cpu;
+ struct clk *clk;
+ struct clk **clks;
+ struct clk *l2_pri_mux_clk;
+
+ id = of_match_device(krait_cc_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ /* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */
+ clk = clk_register_fixed_rate(dev, "qsb", NULL, 0, 1);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (!id->data) {
+ clk = clk_register_fixed_factor(dev, "acpu_aux",
+ "gpll0_vote", 0, 1, 2);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ /* Krait configurations have at most 4 CPUs and one L2 */
+ clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ clk = krait_add_clks(dev, cpu, id->data);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[cpu] = clk;
+ }
+
+ l2_pri_mux_clk = krait_add_clks(dev, -1, id->data);
+ if (IS_ERR(l2_pri_mux_clk))
+ return PTR_ERR(l2_pri_mux_clk);
+ clks[4] = l2_pri_mux_clk;
+
+ /*
+ * We don't want the CPU or L2 clocks to be turned off at late init
+ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+ * refcount of these clocks. Any cpufreq/hotplug manager can assume
+ * that the clocks have already been prepared and enabled by the time
+ * they take over.
+ */
+ for_each_online_cpu(cpu) {
+ clk_prepare_enable(l2_pri_mux_clk);
+ WARN(clk_prepare_enable(clks[cpu]),
+ "Unable to turn on CPU%d clock", cpu);
+ }
+
+ /*
+ * Force reinit of HFPLLs and muxes to overwrite any potential
+ * incorrect configuration of HFPLLs and muxes by the bootloader.
+ * While at it, also make sure the cores are running at known rates
+ * and print the current rate.
+ *
+ * The clocks are set to aux clock rate first to make sure the
+ * secondary mux is not sourcing off of QSB. The rate is then set to
+ * two different rates to force a HFPLL reinit under all
+ * circumstances.
+ */
+ cur_rate = clk_get_rate(l2_pri_mux_clk);
+ aux_rate = 384000000;
+ if (cur_rate == 1) {
+ pr_info("L2 @ QSB rate. Forcing new rate.\n");
+ cur_rate = aux_rate;
+ }
+ clk_set_rate(l2_pri_mux_clk, aux_rate);
+ clk_set_rate(l2_pri_mux_clk, 2);
+ clk_set_rate(l2_pri_mux_clk, cur_rate);
+ pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000);
+ for_each_possible_cpu(cpu) {
+ clk = clks[cpu];
+ cur_rate = clk_get_rate(clk);
+ if (cur_rate == 1) {
+ pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu);
+ cur_rate = aux_rate;
+ }
+
+ clk_set_rate(clk, aux_rate);
+ clk_set_rate(clk, 2);
+ clk_set_rate(clk, cur_rate);
+ pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
+ }
+
+ of_clk_add_provider(dev->of_node, krait_of_get, clks);
+
+ return 0;
+}
+
+static struct platform_driver krait_cc_driver = {
+ .probe = krait_cc_probe,
+ .driver = {
+ .name = "krait-cc",
+ .of_match_table = krait_cc_match_table,
+ },
+};
+module_platform_driver(krait_cc_driver);
+
+MODULE_DESCRIPTION("Krait CPU Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:krait-cc");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 9022bbe1297e..b879e3e3a6b4 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -1,13 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
config CLK_RENESAS
bool "Renesas SoC clock support" if COMPILE_TEST && !ARCH_RENESAS
default y if ARCH_RENESAS
select CLK_EMEV2 if ARCH_EMEV2
select CLK_RZA1 if ARCH_R7S72100
+ select CLK_R7S9210 if ARCH_R7S9210
select CLK_R8A73A4 if ARCH_R8A73A4
select CLK_R8A7740 if ARCH_R8A7740
- select CLK_R8A7743 if ARCH_R8A7743
+ select CLK_R8A7743 if ARCH_R8A7743 || ARCH_R8A7744
select CLK_R8A7745 if ARCH_R8A7745
select CLK_R8A77470 if ARCH_R8A77470
+ select CLK_R8A774A1 if ARCH_R8A774A1
+ select CLK_R8A774C0 if ARCH_R8A774C0
select CLK_R8A7778 if ARCH_R8A7778
select CLK_R8A7779 if ARCH_R8A7779
select CLK_R8A7790 if ARCH_R8A7790
@@ -45,6 +50,10 @@ config CLK_RZA1
bool "RZ/A1H clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
+config CLK_R7S9210
+ bool "RZ/A2 clock support" if COMPILE_TEST
+ select CLK_RENESAS_CPG_MSSR
+
config CLK_R8A73A4
bool "R-Mobile APE6 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
@@ -67,6 +76,14 @@ config CLK_R8A77470
bool "RZ/G1C clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
+config CLK_R8A774A1
+ bool "RZ/G2M clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
+config CLK_R8A774C0
+ bool "RZ/G2E clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A7778
bool "R-Car M1A clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index e4aa3d6143d2..c793e3cc9452 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -2,11 +2,14 @@
# SoC
obj-$(CONFIG_CLK_EMEV2) += clk-emev2.o
obj-$(CONFIG_CLK_RZA1) += clk-rz.o
+obj-$(CONFIG_CLK_R7S9210) += r7s9210-cpg-mssr.o
obj-$(CONFIG_CLK_R8A73A4) += clk-r8a73a4.o
obj-$(CONFIG_CLK_R8A7740) += clk-r8a7740.o
obj-$(CONFIG_CLK_R8A7743) += r8a7743-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7745) += r8a7745-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77470) += r8a77470-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A774A1) += r8a774a1-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A774C0) += r8a774c0-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7778) += clk-r8a7778.o
obj-$(CONFIG_CLK_R8A7779) += clk-r8a7779.o
obj-$(CONFIG_CLK_R8A7790) += r8a7790-cpg-mssr.o
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 9febbf42c3df..57c934164306 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7790 Common Clock Framework support
*
* Copyright (C) 2013 Renesas Solutions Corp.
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk-provider.h>
@@ -312,8 +309,8 @@ static void __init cpg_div6_clock_init(struct device_node *np)
num_parents = of_clk_get_parent_count(np);
if (num_parents < 1) {
- pr_err("%s: no parent found for %s DIV6 clock\n",
- __func__, np->name);
+ pr_err("%s: no parent found for %pOFn DIV6 clock\n",
+ __func__, np);
return;
}
@@ -324,8 +321,8 @@ static void __init cpg_div6_clock_init(struct device_node *np)
reg = of_iomap(np, 0);
if (reg == NULL) {
- pr_err("%s: failed to map %s DIV6 clock register\n",
- __func__, np->name);
+ pr_err("%s: failed to map %pOFn DIV6 clock register\n",
+ __func__, np);
goto error;
}
@@ -337,8 +334,8 @@ static void __init cpg_div6_clock_init(struct device_node *np)
clk = cpg_div6_register(clk_name, num_parents, parent_names, reg, NULL);
if (IS_ERR(clk)) {
- pr_err("%s: failed to register %s DIV6 clock (%ld)\n",
- __func__, np->name, PTR_ERR(clk));
+ pr_err("%s: failed to register %pOFn DIV6 clock (%ld)\n",
+ __func__, np, PTR_ERR(clk));
goto error;
}
diff --git a/drivers/clk/renesas/clk-emev2.c b/drivers/clk/renesas/clk-emev2.c
index a91825471c79..7807b30a5bbb 100644
--- a/drivers/clk/renesas/clk-emev2.c
+++ b/drivers/clk/renesas/clk-emev2.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* EMMA Mobile EV2 common clock framework support
*
* Copyright (C) 2013 Takashi Yoshii <takashi.yoshii.ze@renesas.com>
* Copyright (C) 2012 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -86,8 +74,8 @@ static void __init emev2_smu_clkdiv_init(struct device_node *np)
clk = clk_register_divider(NULL, np->name, parent_name, 0,
smu_base + reg[0], reg[1], 8, 0, &lock);
of_clk_add_provider(np, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, np->name, NULL);
- pr_debug("## %s %s %p\n", __func__, np->name, clk);
+ clk_register_clkdev(clk, np->full_name, NULL);
+ pr_debug("## %s %pOFn %p\n", __func__, np, clk);
}
CLK_OF_DECLARE(emev2_smu_clkdiv, "renesas,emev2-smu-clkdiv",
emev2_smu_clkdiv_init);
@@ -104,7 +92,7 @@ static void __init emev2_smu_gclk_init(struct device_node *np)
clk = clk_register_gate(NULL, np->name, parent_name, 0,
smu_base + reg[0], reg[1], 0, &lock);
of_clk_add_provider(np, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, np->name, NULL);
- pr_debug("## %s %s %p\n", __func__, np->name, clk);
+ clk_register_clkdev(clk, np->full_name, NULL);
+ pr_debug("## %s %pOFn %p\n", __func__, np, clk);
}
CLK_OF_DECLARE(emev2_smu_gclk, "renesas,emev2-smu-gclk", emev2_smu_gclk_init);
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index e82adcb16a52..1c1768c2cc82 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car MSTP clocks
*
@@ -5,10 +6,6 @@
* Copyright (C) 2015 Glider bvba
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk.h>
@@ -239,8 +236,8 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
break;
if (clkidx >= MSTP_MAX_CLOCKS) {
- pr_err("%s: invalid clock %s %s index %u\n",
- __func__, np->name, name, clkidx);
+ pr_err("%s: invalid clock %pOFn %s index %u\n",
+ __func__, np, name, clkidx);
continue;
}
@@ -259,8 +256,8 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
*/
clk_register_clkdev(clks[clkidx], name, NULL);
} else {
- pr_err("%s: failed to register %s %s clock (%ld)\n",
- __func__, np->name, name, PTR_ERR(clks[clkidx]));
+ pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
+ __func__, np, name, PTR_ERR(clks[clkidx]));
}
}
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 7b903ce4c901..2719c248c67b 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a73a4 Core CPG Clocks
*
* Copyright (C) 2014 Ulrich Hecht
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk-provider.h>
@@ -228,8 +225,8 @@ static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
clk = r8a73a4_cpg_register_clock(np, cpg, name);
if (IS_ERR(clk))
- pr_err("%s: failed to register %s %s clock (%ld)\n",
- __func__, np->name, name, PTR_ERR(clk));
+ pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
+ __func__, np, name, PTR_ERR(clk));
else
cpg->data.clks[i] = clk;
}
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index a7a30d2eca41..5967656c13cc 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7740 Core CPG Clocks
*
* Copyright (C) 2014 Ulrich Hecht
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk-provider.h>
@@ -187,8 +184,8 @@ static void __init r8a7740_cpg_clocks_init(struct device_node *np)
clk = r8a7740_cpg_register_clock(np, cpg, name);
if (IS_ERR(clk))
- pr_err("%s: failed to register %s %s clock (%ld)\n",
- __func__, np->name, name, PTR_ERR(clk));
+ pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
+ __func__, np, name, PTR_ERR(clk));
else
cpg->data.clks[i] = clk;
}
diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index 886a8380e912..3ccc53685bdd 100644
--- a/drivers/clk/renesas/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7778 Core CPG Clocks
*
* Copyright (C) 2014 Ulrich Hecht
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk-provider.h>
@@ -130,8 +127,8 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np)
clk = r8a7778_cpg_register_clock(np, cpg, name);
if (IS_ERR(clk))
- pr_err("%s: failed to register %s %s clock (%ld)\n",
- __func__, np->name, name, PTR_ERR(clk));
+ pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
+ __func__, np, name, PTR_ERR(clk));
else
cpg->data.clks[i] = clk;
}
diff --git a/drivers/clk/renesas/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c
index 5adcca4656c3..9f3b5522eef5 100644
--- a/drivers/clk/renesas/clk-r8a7779.c
+++ b/drivers/clk/renesas/clk-r8a7779.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7779 Core CPG Clocks
*
* Copyright (C) 2013, 2014 Horms Solutions Ltd.
*
* Contact: Simon Horman <horms@verge.net.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk-provider.h>
@@ -164,8 +161,8 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np)
clk = r8a7779_cpg_register_clock(np, cpg, config,
plla_mult, name);
if (IS_ERR(clk))
- pr_err("%s: failed to register %s %s clock (%ld)\n",
- __func__, np->name, name, PTR_ERR(clk));
+ pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
+ __func__, np, name, PTR_ERR(clk));
else
cpg->data.clks[i] = clk;
}
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index bccd62f2cb09..2913b4148157 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* rcar_gen2 Core CPG Clocks
*
* Copyright (C) 2013 Ideas On Board SPRL
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk-provider.h>
@@ -445,8 +442,8 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
clk = rcar_gen2_cpg_register_clock(np, cpg, config, name);
if (IS_ERR(clk))
- pr_err("%s: failed to register %s %s clock (%ld)\n",
- __func__, np->name, name, PTR_ERR(clk));
+ pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
+ __func__, np, name, PTR_ERR(clk));
else
cpg->data.clks[i] = clk;
}
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index ac2f86d626b6..3cda53a97f4e 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RZ/A1 Core CPG Clocks
*
* Copyright (C) 2013 Ideas On Board SPRL
* Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk-provider.h>
@@ -113,8 +110,8 @@ static void __init rz_cpg_clocks_init(struct device_node *np)
clk = rz_cpg_register_clock(np, cpg, name);
if (IS_ERR(clk))
- pr_err("%s: failed to register %s %s clock (%ld)\n",
- __func__, np->name, name, PTR_ERR(clk));
+ pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
+ __func__, np, name, PTR_ERR(clk));
else
cpg->data.clks[i] = clk;
}
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index bab33610eb6c..dc8ffc7c727a 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sh73a0 Core CPG Clocks
*
* Copyright (C) 2014 Ulrich Hecht
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk-provider.h>
@@ -206,8 +203,8 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np)
clk = sh73a0_cpg_register_clock(np, cpg, name);
if (IS_ERR(clk))
- pr_err("%s: failed to register %s %s clock (%ld)\n",
- __func__, np->name, name, PTR_ERR(clk));
+ pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
+ __func__, np, name, PTR_ERR(clk));
else
cpg->data.clks[i] = clk;
}
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
new file mode 100644
index 000000000000..5135f13ec628
--- /dev/null
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R7S9210 Clock Pulse Generator / Module Standby
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2018 Chris Brandt
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <dt-bindings/clock/r7s9210-cpg-mssr.h>
+#include "renesas-cpg-mssr.h"
+
+#define CPG_FRQCR 0x00
+
+static u8 cpg_mode;
+
+/* Internal Clock ratio table */
+static const struct {
+ unsigned int i;
+ unsigned int g;
+ unsigned int b;
+ unsigned int p1;
+ /* p0 is always 32 */;
+} ratio_tab[5] = { /* I, G, B, P1 */
+ { 2, 4, 8, 16}, /* FRQCR = 0x012 */
+ { 4, 4, 8, 16}, /* FRQCR = 0x112 */
+ { 8, 4, 8, 16}, /* FRQCR = 0x212 */
+ { 16, 8, 16, 16}, /* FRQCR = 0x322 */
+ { 16, 16, 32, 32}, /* FRQCR = 0x333 */
+ };
+
+enum rz_clk_types {
+ CLK_TYPE_RZA_MAIN = CLK_TYPE_CUSTOM,
+ CLK_TYPE_RZA_PLL,
+};
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R7S9210_CLK_P0,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static struct cpg_core_clk r7s9210_early_core_clks[] = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_RZA_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll", CLK_PLL, CLK_TYPE_RZA_PLL, CLK_MAIN),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("p1c", R7S9210_CLK_P1C, CLK_PLL, 16, 1),
+};
+
+static const struct mssr_mod_clk r7s9210_early_mod_clks[] __initconst = {
+ DEF_MOD_STB("ostm2", 34, R7S9210_CLK_P1C),
+ DEF_MOD_STB("ostm1", 35, R7S9210_CLK_P1C),
+ DEF_MOD_STB("ostm0", 36, R7S9210_CLK_P1C),
+};
+
+static struct cpg_core_clk r7s9210_core_clks[] = {
+ /* Core Clock Outputs */
+ DEF_FIXED("i", R7S9210_CLK_I, CLK_PLL, 2, 1),
+ DEF_FIXED("g", R7S9210_CLK_G, CLK_PLL, 4, 1),
+ DEF_FIXED("b", R7S9210_CLK_B, CLK_PLL, 8, 1),
+ DEF_FIXED("p1", R7S9210_CLK_P1, CLK_PLL, 16, 1),
+ DEF_FIXED("p0", R7S9210_CLK_P0, CLK_PLL, 32, 1),
+};
+
+static const struct mssr_mod_clk r7s9210_mod_clks[] __initconst = {
+ DEF_MOD_STB("scif4", 43, R7S9210_CLK_P1C),
+ DEF_MOD_STB("scif3", 44, R7S9210_CLK_P1C),
+ DEF_MOD_STB("scif2", 45, R7S9210_CLK_P1C),
+ DEF_MOD_STB("scif1", 46, R7S9210_CLK_P1C),
+ DEF_MOD_STB("scif0", 47, R7S9210_CLK_P1C),
+
+ DEF_MOD_STB("ether1", 64, R7S9210_CLK_B),
+ DEF_MOD_STB("ether0", 65, R7S9210_CLK_B),
+
+ DEF_MOD_STB("i2c3", 84, R7S9210_CLK_P1),
+ DEF_MOD_STB("i2c2", 85, R7S9210_CLK_P1),
+ DEF_MOD_STB("i2c1", 86, R7S9210_CLK_P1),
+ DEF_MOD_STB("i2c0", 87, R7S9210_CLK_P1),
+
+ DEF_MOD_STB("spi2", 95, R7S9210_CLK_P1),
+ DEF_MOD_STB("spi1", 96, R7S9210_CLK_P1),
+ DEF_MOD_STB("spi0", 97, R7S9210_CLK_P1),
+};
+
+/* The clock dividers in the table vary based on DT and register settings */
+static void __init r7s9210_update_clk_table(struct clk *extal_clk,
+ void __iomem *base)
+{
+ int i;
+ u16 frqcr;
+ u8 index;
+
+ /* If EXTAL is above 12MHz, then we know it is Mode 1 */
+ if (clk_get_rate(extal_clk) > 12000000)
+ cpg_mode = 1;
+
+ frqcr = clk_readl(base + CPG_FRQCR) & 0xFFF;
+ if (frqcr == 0x012)
+ index = 0;
+ else if (frqcr == 0x112)
+ index = 1;
+ else if (frqcr == 0x212)
+ index = 2;
+ else if (frqcr == 0x322)
+ index = 3;
+ else if (frqcr == 0x333)
+ index = 4;
+ else
+ BUG_ON(1); /* Illegal FRQCR value */
+
+ for (i = 0; i < ARRAY_SIZE(r7s9210_core_clks); i++) {
+ switch (r7s9210_core_clks[i].id) {
+ case R7S9210_CLK_I:
+ r7s9210_core_clks[i].div = ratio_tab[index].i;
+ break;
+ case R7S9210_CLK_G:
+ r7s9210_core_clks[i].div = ratio_tab[index].g;
+ break;
+ case R7S9210_CLK_B:
+ r7s9210_core_clks[i].div = ratio_tab[index].b;
+ break;
+ case R7S9210_CLK_P1:
+ case R7S9210_CLK_P1C:
+ r7s9210_core_clks[i].div = ratio_tab[index].p1;
+ break;
+ case R7S9210_CLK_P0:
+ r7s9210_core_clks[i].div = 32;
+ break;
+ }
+ }
+}
+
+struct clk * __init rza2_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers)
+{
+ struct clk *parent;
+ unsigned int mult = 1;
+ unsigned int div = 1;
+
+ parent = clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ switch (core->id) {
+ case CLK_MAIN:
+ break;
+
+ case CLK_PLL:
+ if (cpg_mode)
+ mult = 44; /* Divider 1 is 1/2 */
+ else
+ mult = 88; /* Divider 1 is 1 */
+ break;
+
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (core->id == CLK_MAIN)
+ r7s9210_update_clk_table(parent, base);
+
+ return clk_register_fixed_factor(NULL, core->name,
+ __clk_get_name(parent), 0, mult, div);
+}
+
+const struct cpg_mssr_info r7s9210_cpg_mssr_info __initconst = {
+ /* Early Clocks */
+ .early_core_clks = r7s9210_early_core_clks,
+ .num_early_core_clks = ARRAY_SIZE(r7s9210_early_core_clks),
+ .early_mod_clks = r7s9210_early_mod_clks,
+ .num_early_mod_clks = ARRAY_SIZE(r7s9210_early_mod_clks),
+
+ /* Core Clocks */
+ .core_clks = r7s9210_core_clks,
+ .num_core_clks = ARRAY_SIZE(r7s9210_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r7s9210_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r7s9210_mod_clks),
+ .num_hw_mod_clks = 11 * 32, /* includes STBCR0 which doesn't exist */
+
+ /* Callbacks */
+ .cpg_clk_register = rza2_cpg_clk_register,
+
+ /* RZ/A2 has Standby Control Registers */
+ .stbyctrl = true,
+};
+
+static void __init r7s9210_cpg_mssr_early_init(struct device_node *np)
+{
+ cpg_mssr_early_init(np, &r7s9210_cpg_mssr_info);
+}
+
+CLK_OF_DECLARE_DRIVER(cpg_mstp_clks, "renesas,r7s9210-cpg-mssr",
+ r7s9210_cpg_mssr_early_init);
diff --git a/drivers/clk/renesas/r8a7743-cpg-mssr.c b/drivers/clk/renesas/r8a7743-cpg-mssr.c
index 011c170ec3f9..c01d9af2525a 100644
--- a/drivers/clk/renesas/r8a7743-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7743-cpg-mssr.c
@@ -1,16 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7743 Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2016 Cogent Embedded Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation; of the License.
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a7743-cpg-mssr.h>
@@ -37,7 +35,7 @@ enum clk_ids {
MOD_CLK_BASE
};
-static const struct cpg_core_clk r8a7743_core_clks[] __initconst = {
+static struct cpg_core_clk r8a7743_core_clks[] __initdata = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
DEF_INPUT("usb_extal", CLK_USB_EXTAL),
@@ -238,6 +236,8 @@ static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
static int __init r8a7743_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ struct device_node *np = dev->of_node;
+ unsigned int i;
u32 cpg_mode;
int error;
@@ -247,6 +247,14 @@ static int __init r8a7743_cpg_mssr_init(struct device *dev)
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (of_device_is_compatible(np, "renesas,r8a7744-cpg-mssr")) {
+ /* RZ/G1N uses a 1/5 divider for ZG */
+ for (i = 0; i < ARRAY_SIZE(r8a7743_core_clks); i++)
+ if (r8a7743_core_clks[i].id == R8A7743_CLK_ZG) {
+ r8a7743_core_clks[i].div = 5;
+ break;
+ }
+ }
return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode);
}
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
index 4b0a9243b748..493874e5ebee 100644
--- a/drivers/clk/renesas/r8a7745-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7745 Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2016 Cogent Embedded Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation; of the License.
*/
#include <linux/device.h>
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
new file mode 100644
index 000000000000..b0da34217bdf
--- /dev/null
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a774a1 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ *
+ * Based on r8a7796-cpg-mssr.c
+ *
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a774a1-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A774A1_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL3,
+ CLK_PLL4,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_RINT,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+ DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+
+ DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
+
+ /* Core Clock Outputs */
+ DEF_BASE("z", R8A774A1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
+ DEF_BASE("z2", R8A774A1_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
+ DEF_FIXED("ztr", R8A774A1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A774A1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A774A1_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A774A1_CLK_ZX, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("s0d1", R8A774A1_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d2", R8A774A1_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A774A1_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A774A1_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6", R8A774A1_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d8", R8A774A1_CLK_S0D8, CLK_S0, 8, 1),
+ DEF_FIXED("s0d12", R8A774A1_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s1d2", R8A774A1_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A774A1_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A774A1_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A774A1_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A774A1_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A774A1_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A774A1_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A774A1_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_GEN3_SD("sd0", R8A774A1_CLK_SD0, CLK_SDSRC, 0x074),
+ DEF_GEN3_SD("sd1", R8A774A1_CLK_SD1, CLK_SDSRC, 0x078),
+ DEF_GEN3_SD("sd2", R8A774A1_CLK_SD2, CLK_SDSRC, 0x268),
+ DEF_GEN3_SD("sd3", R8A774A1_CLK_SD3, CLK_SDSRC, 0x26c),
+
+ DEF_FIXED("cl", R8A774A1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+ DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+ DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
+
+ DEF_GEN3_OSC("osc", R8A774A1_CLK_OSC, CLK_EXTAL, 8),
+
+ DEF_BASE("r", R8A774A1_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
+};
+
+static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
+ DEF_MOD("fdp1-0", 119, R8A774A1_CLK_S0D1),
+ DEF_MOD("scif5", 202, R8A774A1_CLK_S3D4),
+ DEF_MOD("scif4", 203, R8A774A1_CLK_S3D4),
+ DEF_MOD("scif3", 204, R8A774A1_CLK_S3D4),
+ DEF_MOD("scif1", 206, R8A774A1_CLK_S3D4),
+ DEF_MOD("scif0", 207, R8A774A1_CLK_S3D4),
+ DEF_MOD("msiof3", 208, R8A774A1_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A774A1_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A774A1_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A774A1_CLK_MSO),
+ DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S0D3),
+ DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S0D3),
+ DEF_MOD("sys-dmac0", 219, R8A774A1_CLK_S0D3),
+ DEF_MOD("cmt3", 300, R8A774A1_CLK_R),
+ DEF_MOD("cmt2", 301, R8A774A1_CLK_R),
+ DEF_MOD("cmt1", 302, R8A774A1_CLK_R),
+ DEF_MOD("cmt0", 303, R8A774A1_CLK_R),
+ DEF_MOD("scif2", 310, R8A774A1_CLK_S3D4),
+ DEF_MOD("sdif3", 311, R8A774A1_CLK_SD3),
+ DEF_MOD("sdif2", 312, R8A774A1_CLK_SD2),
+ DEF_MOD("sdif1", 313, R8A774A1_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A774A1_CLK_SD0),
+ DEF_MOD("pcie1", 318, R8A774A1_CLK_S3D1),
+ DEF_MOD("pcie0", 319, R8A774A1_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A774A1_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A774A1_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A774A1_CLK_S3D1),
+ DEF_MOD("rwdt", 402, R8A774A1_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A774A1_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A774A1_CLK_S0D3),
+ DEF_MOD("audmac1", 501, R8A774A1_CLK_S0D3),
+ DEF_MOD("audmac0", 502, R8A774A1_CLK_S0D3),
+ DEF_MOD("hscif4", 516, R8A774A1_CLK_S3D1),
+ DEF_MOD("hscif3", 517, R8A774A1_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A774A1_CLK_S3D1),
+ DEF_MOD("hscif1", 519, R8A774A1_CLK_S3D1),
+ DEF_MOD("hscif0", 520, R8A774A1_CLK_S3D1),
+ DEF_MOD("thermal", 522, R8A774A1_CLK_CP),
+ DEF_MOD("pwm", 523, R8A774A1_CLK_S0D12),
+ DEF_MOD("fcpvd2", 601, R8A774A1_CLK_S0D2),
+ DEF_MOD("fcpvd1", 602, R8A774A1_CLK_S0D2),
+ DEF_MOD("fcpvd0", 603, R8A774A1_CLK_S0D2),
+ DEF_MOD("fcpvb0", 607, R8A774A1_CLK_S0D1),
+ DEF_MOD("fcpvi0", 611, R8A774A1_CLK_S0D1),
+ DEF_MOD("fcpf0", 615, R8A774A1_CLK_S0D1),
+ DEF_MOD("fcpci0", 617, R8A774A1_CLK_S0D2),
+ DEF_MOD("fcpcs", 619, R8A774A1_CLK_S0D2),
+ DEF_MOD("vspd2", 621, R8A774A1_CLK_S0D2),
+ DEF_MOD("vspd1", 622, R8A774A1_CLK_S0D2),
+ DEF_MOD("vspd0", 623, R8A774A1_CLK_S0D2),
+ DEF_MOD("vspb", 626, R8A774A1_CLK_S0D1),
+ DEF_MOD("vspi0", 631, R8A774A1_CLK_S0D1),
+ DEF_MOD("ehci1", 702, R8A774A1_CLK_S3D4),
+ DEF_MOD("ehci0", 703, R8A774A1_CLK_S3D4),
+ DEF_MOD("hsusb", 704, R8A774A1_CLK_S3D4),
+ DEF_MOD("csi20", 714, R8A774A1_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A774A1_CLK_CSI0),
+ DEF_MOD("du2", 722, R8A774A1_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A774A1_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A774A1_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A774A1_CLK_S2D1),
+ DEF_MOD("hdmi0", 729, R8A774A1_CLK_HDMI),
+ DEF_MOD("vin7", 804, R8A774A1_CLK_S0D2),
+ DEF_MOD("vin6", 805, R8A774A1_CLK_S0D2),
+ DEF_MOD("vin5", 806, R8A774A1_CLK_S0D2),
+ DEF_MOD("vin4", 807, R8A774A1_CLK_S0D2),
+ DEF_MOD("vin3", 808, R8A774A1_CLK_S0D2),
+ DEF_MOD("vin2", 809, R8A774A1_CLK_S0D2),
+ DEF_MOD("vin1", 810, R8A774A1_CLK_S0D2),
+ DEF_MOD("vin0", 811, R8A774A1_CLK_S0D2),
+ DEF_MOD("etheravb", 812, R8A774A1_CLK_S0D6),
+ DEF_MOD("gpio7", 905, R8A774A1_CLK_S3D4),
+ DEF_MOD("gpio6", 906, R8A774A1_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A774A1_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A774A1_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A774A1_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4),
+ DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4),
+ DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6),
+ DEF_MOD("i2c5", 919, R8A774A1_CLK_S0D6),
+ DEF_MOD("i2c-dvfs", 926, R8A774A1_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A774A1_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A774A1_CLK_S0D6),
+ DEF_MOD("i2c2", 929, R8A774A1_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A774A1_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A774A1_CLK_S3D2),
+ DEF_MOD("ssi-all", 1005, R8A774A1_CLK_S3D4),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A774A1_CLK_S3D4),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a774a1_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 OSC
+ * 14 13 19 17 (MHz)
+ *-------------------------------------------------------------------------
+ * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 /16
+ * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 /16
+ * 0 0 1 0 Prohibited setting
+ * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 /16
+ * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 /19
+ * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 /19
+ * 0 1 1 0 Prohibited setting
+ * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 /19
+ * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 /24
+ * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 /24
+ * 1 0 1 0 Prohibited setting
+ * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 /24
+ * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 /32
+ * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 /32
+ * 1 1 1 0 Prohibited setting
+ * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 /32
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
+ (((md) & BIT(13)) >> 11) | \
+ (((md) & BIT(19)) >> 18) | \
+ (((md) & BIT(17)) >> 17))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 192, 1, 128, 1, 16, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 160, 1, 106, 1, 19, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 128, 1, 128, 1, 24, },
+ { 1, 128, 1, 84, 1, 24, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, 24, },
+ { 2, 192, 1, 192, 1, 32, },
+ { 2, 192, 1, 128, 1, 32, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, 32, },
+};
+
+static int __init r8a774a1_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a774a1_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a774a1_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a774a1_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a774a1_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a774a1_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a774a1_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a774a1_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a774a1_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
new file mode 100644
index 000000000000..10b96895d452
--- /dev/null
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a774c0 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ *
+ * Based on r8a77990-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A774C0_CLK_CPEX,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL0D4,
+ CLK_PLL0D8,
+ CLK_PLL0D20,
+ CLK_PLL0D24,
+ CLK_PLL1D2,
+ CLK_PE,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_RINT,
+ CLK_OCO,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100),
+ DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1),
+ DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1),
+ DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1),
+ DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1),
+ DEF_FIXED(".pll1d2", CLK_PLL1D2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pe", CLK_PE, CLK_PLL0D20, 1, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1),
+
+ DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+
+ DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("za2", R8A774C0_CLK_ZA2, CLK_PLL0D24, 1, 1),
+ DEF_FIXED("za8", R8A774C0_CLK_ZA8, CLK_PLL0D8, 1, 1),
+ DEF_FIXED("ztr", R8A774C0_CLK_ZTR, CLK_PLL1, 6, 1),
+ DEF_FIXED("zt", R8A774C0_CLK_ZT, CLK_PLL1, 4, 1),
+ DEF_FIXED("zx", R8A774C0_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("s0d1", R8A774C0_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d3", R8A774C0_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d6", R8A774C0_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12", R8A774C0_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s0d24", R8A774C0_CLK_S0D24, CLK_S0, 24, 1),
+ DEF_FIXED("s1d1", R8A774C0_CLK_S1D1, CLK_S1, 1, 1),
+ DEF_FIXED("s1d2", R8A774C0_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A774C0_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A774C0_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A774C0_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A774C0_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A774C0_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A774C0_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A774C0_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_GEN3_SD("sd0", R8A774C0_CLK_SD0, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SD("sd1", R8A774C0_CLK_SD1, CLK_SDSRC, 0x0078),
+ DEF_GEN3_SD("sd3", R8A774C0_CLK_SD3, CLK_SDSRC, 0x026c),
+
+ DEF_FIXED("cl", R8A774C0_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cp", R8A774C0_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A774C0_CLK_CPEX, CLK_EXTAL, 4, 1),
+
+ DEF_DIV6_RO("osc", R8A774C0_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
+
+ DEF_GEN3_PE("s0d6c", R8A774C0_CLK_S0D6C, CLK_S0, 6, CLK_PE, 2),
+ DEF_GEN3_PE("s3d1c", R8A774C0_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1),
+ DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
+ DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
+
+ DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c),
+ DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014),
+
+ DEF_GEN3_RCKSEL("r", R8A774C0_CLK_R, CLK_RINT, 1, CLK_OCO, 61 * 4),
+};
+
+static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
+ DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C),
+ DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C),
+ DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C),
+ DEF_MOD("scif1", 206, R8A774C0_CLK_S3D4C),
+ DEF_MOD("scif0", 207, R8A774C0_CLK_S3D4C),
+ DEF_MOD("msiof3", 208, R8A774C0_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A774C0_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A774C0_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A774C0_CLK_MSO),
+ DEF_MOD("sys-dmac2", 217, R8A774C0_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A774C0_CLK_S3D1),
+ DEF_MOD("sys-dmac0", 219, R8A774C0_CLK_S3D1),
+
+ DEF_MOD("cmt3", 300, R8A774C0_CLK_R),
+ DEF_MOD("cmt2", 301, R8A774C0_CLK_R),
+ DEF_MOD("cmt1", 302, R8A774C0_CLK_R),
+ DEF_MOD("cmt0", 303, R8A774C0_CLK_R),
+ DEF_MOD("scif2", 310, R8A774C0_CLK_S3D4C),
+ DEF_MOD("sdif3", 311, R8A774C0_CLK_SD3),
+ DEF_MOD("sdif1", 313, R8A774C0_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A774C0_CLK_SD0),
+ DEF_MOD("pcie0", 319, R8A774C0_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A774C0_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A774C0_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A774C0_CLK_S3D1),
+
+ DEF_MOD("rwdt", 402, R8A774C0_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3),
+
+ DEF_MOD("audmac0", 502, R8A774C0_CLK_S3D4),
+ DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C),
+ DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C),
+ DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C),
+ DEF_MOD("hscif1", 519, R8A774C0_CLK_S3D1C),
+ DEF_MOD("hscif0", 520, R8A774C0_CLK_S3D1C),
+ DEF_MOD("thermal", 522, R8A774C0_CLK_CP),
+ DEF_MOD("pwm", 523, R8A774C0_CLK_S3D4C),
+
+ DEF_MOD("fcpvd1", 602, R8A774C0_CLK_S1D2),
+ DEF_MOD("fcpvd0", 603, R8A774C0_CLK_S1D2),
+ DEF_MOD("fcpvb0", 607, R8A774C0_CLK_S0D1),
+ DEF_MOD("fcpvi0", 611, R8A774C0_CLK_S0D1),
+ DEF_MOD("fcpf0", 615, R8A774C0_CLK_S0D1),
+ DEF_MOD("fcpcs", 619, R8A774C0_CLK_S0D1),
+ DEF_MOD("vspd1", 622, R8A774C0_CLK_S1D2),
+ DEF_MOD("vspd0", 623, R8A774C0_CLK_S1D2),
+ DEF_MOD("vspb", 626, R8A774C0_CLK_S0D1),
+ DEF_MOD("vspi0", 631, R8A774C0_CLK_S0D1),
+
+ DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4),
+ DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4),
+ DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0),
+ DEF_MOD("du1", 723, R8A774C0_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A774C0_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1),
+
+ DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2),
+ DEF_MOD("vin4", 807, R8A774C0_CLK_S1D2),
+ DEF_MOD("etheravb", 812, R8A774C0_CLK_S3D2),
+
+ DEF_MOD("gpio6", 906, R8A774C0_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A774C0_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A774C0_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A774C0_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4),
+ DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4),
+ DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2),
+ DEF_MOD("i2c5", 919, R8A774C0_CLK_S3D2),
+ DEF_MOD("i2c-dvfs", 926, R8A774C0_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A774C0_CLK_S3D2),
+ DEF_MOD("i2c3", 928, R8A774C0_CLK_S3D2),
+ DEF_MOD("i2c2", 929, R8A774C0_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A774C0_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A774C0_CLK_S3D2),
+
+ DEF_MOD("i2c7", 1003, R8A774C0_CLK_S3D2),
+ DEF_MOD("ssi-all", 1005, R8A774C0_CLK_S3D4),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A774C0_CLK_S3D4),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a774c0_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD19 EXTAL (MHz) PLL0 PLL1 PLL3
+ *--------------------------------------------------------------------
+ * 0 48 x 1 x100/1 x100/3 x100/3
+ * 1 48 x 1 x100/1 x100/3 x58/3
+ */
+#define CPG_PLL_CONFIG_INDEX(md) (((md) & BIT(19)) >> 19)
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[2] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 100, 3, 100, 3, },
+ { 1, 100, 3, 58, 3, },
+};
+
+static int __init r8a774c0_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen3_cpg_init(cpg_pll_config, 0, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a774c0_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a774c0_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a774c0_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a774c0_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a774c0_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a774c0_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a774c0_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a774c0_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7790-cpg-mssr.c b/drivers/clk/renesas/r8a7790-cpg-mssr.c
index f936cb74b681..c57cb93f8315 100644
--- a/drivers/clk/renesas/r8a7790-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7790-cpg-mssr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7790 Clock Pulse Generator / Module Standby and Software Reset
*
@@ -6,10 +7,6 @@
* Based on clk-rcar-gen2.c
*
* Copyright (C) 2013 Ideas On Board SPRL
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/device.h>
diff --git a/drivers/clk/renesas/r8a7791-cpg-mssr.c b/drivers/clk/renesas/r8a7791-cpg-mssr.c
index 1b91f03b7598..65702debcabb 100644
--- a/drivers/clk/renesas/r8a7791-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7791-cpg-mssr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7791 Clock Pulse Generator / Module Standby and Software Reset
*
@@ -6,10 +7,6 @@
* Based on clk-rcar-gen2.c
*
* Copyright (C) 2013 Ideas On Board SPRL
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/device.h>
diff --git a/drivers/clk/renesas/r8a7792-cpg-mssr.c b/drivers/clk/renesas/r8a7792-cpg-mssr.c
index 493e07859f5f..cf8b84a3a060 100644
--- a/drivers/clk/renesas/r8a7792-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7792-cpg-mssr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7792 Clock Pulse Generator / Module Standby and Software Reset
*
@@ -6,10 +7,6 @@
* Based on clk-rcar-gen2.c
*
* Copyright (C) 2013 Ideas On Board SPRL
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/device.h>
diff --git a/drivers/clk/renesas/r8a7794-cpg-mssr.c b/drivers/clk/renesas/r8a7794-cpg-mssr.c
index 088f4b79fdfc..c1948693c5c1 100644
--- a/drivers/clk/renesas/r8a7794-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7794-cpg-mssr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7794 Clock Pulse Generator / Module Standby and Software Reset
*
@@ -6,10 +7,6 @@
* Based on clk-rcar-gen2.c
*
* Copyright (C) 2013 Ideas On Board SPRL
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/device.h>
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index a85dd50e8911..119c02440726 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7795 Clock Pulse Generator / Module Standby and Software Reset
*
@@ -6,10 +7,6 @@
* Based on clk-rcar-gen3.c
*
* Copyright (C) 2015 Renesas Electronics Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/device.h>
@@ -73,6 +70,8 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
+
/* Core Clock Outputs */
DEF_BASE("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
DEF_BASE("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
@@ -111,8 +110,7 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014),
DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
- DEF_DIV6_RO("osc", R8A7795_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
- DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+ DEF_GEN3_OSC("osc", R8A7795_CLK_OSC, CLK_EXTAL, 8),
DEF_BASE("r", R8A7795_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
};
@@ -283,25 +281,25 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = {
*/
/*
- * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4
+ * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 OSC
* 14 13 19 17 (MHz)
- *-------------------------------------------------------------------
- * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144
- * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144
+ *-------------------------------------------------------------------------
+ * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 /16
+ * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 /16
* 0 0 1 0 Prohibited setting
- * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144
- * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120
- * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120
+ * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 /16
+ * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 /19
+ * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 /19
* 0 1 1 0 Prohibited setting
- * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120
- * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96
- * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96
+ * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 /19
+ * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 /24
+ * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 /24
* 1 0 1 0 Prohibited setting
- * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96
- * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144
- * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144
+ * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 /24
+ * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 /32
+ * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 /32
* 1 1 1 0 Prohibited setting
- * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144
+ * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 /32
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
(((md) & BIT(13)) >> 11) | \
@@ -309,23 +307,23 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = {
(((md) & BIT(17)) >> 17))
static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
- /* EXTAL div PLL1 mult/div PLL3 mult/div */
- { 1, 192, 1, 192, 1, },
- { 1, 192, 1, 128, 1, },
- { 0, /* Prohibited setting */ },
- { 1, 192, 1, 192, 1, },
- { 1, 160, 1, 160, 1, },
- { 1, 160, 1, 106, 1, },
- { 0, /* Prohibited setting */ },
- { 1, 160, 1, 160, 1, },
- { 1, 128, 1, 128, 1, },
- { 1, 128, 1, 84, 1, },
- { 0, /* Prohibited setting */ },
- { 1, 128, 1, 128, 1, },
- { 2, 192, 1, 192, 1, },
- { 2, 192, 1, 128, 1, },
- { 0, /* Prohibited setting */ },
- { 2, 192, 1, 192, 1, },
+ /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 192, 1, 128, 1, 16, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 160, 1, 106, 1, 19, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 128, 1, 128, 1, 24, },
+ { 1, 128, 1, 84, 1, 24, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, 24, },
+ { 2, 192, 1, 192, 1, 32, },
+ { 2, 192, 1, 128, 1, 32, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, 32, },
};
static const struct soc_device_attribute r8a7795es1[] __initconst = {
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index dfb267a92f2a..10567386e6dd 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7796 Clock Pulse Generator / Module Standby and Software Reset
*
@@ -7,10 +8,6 @@
*
* Copyright (C) 2015 Glider bvba
* Copyright (C) 2015 Renesas Electronics Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/device.h>
@@ -73,6 +70,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
+
/* Core Clock Outputs */
DEF_BASE("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
DEF_BASE("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
@@ -110,8 +109,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_DIV6P1("mso", R8A7796_CLK_MSO, CLK_PLL1_DIV4, 0x014),
DEF_DIV6P1("hdmi", R8A7796_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
- DEF_DIV6_RO("osc", R8A7796_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
- DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+ DEF_GEN3_OSC("osc", R8A7796_CLK_OSC, CLK_EXTAL, 8),
DEF_BASE("r", R8A7796_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
};
@@ -255,25 +253,25 @@ static const unsigned int r8a7796_crit_mod_clks[] __initconst = {
*/
/*
- * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4
+ * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 OSC
* 14 13 19 17 (MHz)
- *-------------------------------------------------------------------
- * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144
- * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144
+ *-------------------------------------------------------------------------
+ * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 /16
+ * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 /16
* 0 0 1 0 Prohibited setting
- * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144
- * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120
- * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120
+ * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 /16
+ * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 /19
+ * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 /19
* 0 1 1 0 Prohibited setting
- * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120
- * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96
- * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96
+ * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 /19
+ * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 /24
+ * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 /24
* 1 0 1 0 Prohibited setting
- * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96
- * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144
- * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144
+ * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 /24
+ * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 /32
+ * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 /32
* 1 1 1 0 Prohibited setting
- * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144
+ * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 /32
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
(((md) & BIT(13)) >> 11) | \
@@ -281,23 +279,23 @@ static const unsigned int r8a7796_crit_mod_clks[] __initconst = {
(((md) & BIT(17)) >> 17))
static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
- /* EXTAL div PLL1 mult/div PLL3 mult/div */
- { 1, 192, 1, 192, 1, },
- { 1, 192, 1, 128, 1, },
- { 0, /* Prohibited setting */ },
- { 1, 192, 1, 192, 1, },
- { 1, 160, 1, 160, 1, },
- { 1, 160, 1, 106, 1, },
- { 0, /* Prohibited setting */ },
- { 1, 160, 1, 160, 1, },
- { 1, 128, 1, 128, 1, },
- { 1, 128, 1, 84, 1, },
- { 0, /* Prohibited setting */ },
- { 1, 128, 1, 128, 1, },
- { 2, 192, 1, 192, 1, },
- { 2, 192, 1, 128, 1, },
- { 0, /* Prohibited setting */ },
- { 2, 192, 1, 192, 1, },
+ /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 192, 1, 128, 1, 16, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 160, 1, 106, 1, 19, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 128, 1, 128, 1, 24, },
+ { 1, 128, 1, 84, 1, 24, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, 24, },
+ { 2, 192, 1, 192, 1, 32, },
+ { 2, 192, 1, 128, 1, 32, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, 32, },
};
static int __init r8a7796_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index 8fae5e9c4a77..1fcc411502da 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -68,6 +68,8 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
+
/* Core Clock Outputs */
DEF_BASE("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
DEF_FIXED("ztr", R8A77965_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
@@ -104,13 +106,13 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_DIV6P1("mso", R8A77965_CLK_MSO, CLK_PLL1_DIV4, 0x014),
DEF_DIV6P1("hdmi", R8A77965_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
- DEF_DIV6_RO("osc", R8A77965_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
- DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+ DEF_GEN3_OSC("osc", R8A77965_CLK_OSC, CLK_EXTAL, 8),
DEF_BASE("r", R8A77965_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
};
static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
+ DEF_MOD("fdp1-0", 119, R8A77965_CLK_S0D1),
DEF_MOD("scif5", 202, R8A77965_CLK_S3D4),
DEF_MOD("scif4", 203, R8A77965_CLK_S3D4),
DEF_MOD("scif3", 204, R8A77965_CLK_S3D4),
@@ -192,6 +194,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("vin1", 810, R8A77965_CLK_S0D2),
DEF_MOD("vin0", 811, R8A77965_CLK_S0D2),
DEF_MOD("etheravb", 812, R8A77965_CLK_S0D6),
+ DEF_MOD("sata0", 815, R8A77965_CLK_S3D2),
DEF_MOD("imr1", 822, R8A77965_CLK_S0D2),
DEF_MOD("imr0", 823, R8A77965_CLK_S0D2),
@@ -252,25 +255,25 @@ static const unsigned int r8a77965_crit_mod_clks[] __initconst = {
*/
/*
- * MD EXTAL PLL0 PLL1 PLL3 PLL4
+ * MD EXTAL PLL0 PLL1 PLL3 PLL4 OSC
* 14 13 19 17 (MHz)
- *-----------------------------------------------------------
- * 0 0 0 0 16.66 x 1 x180 x192 x192 x144
- * 0 0 0 1 16.66 x 1 x180 x192 x128 x144
+ *-----------------------------------------------------------------
+ * 0 0 0 0 16.66 x 1 x180 x192 x192 x144 /16
+ * 0 0 0 1 16.66 x 1 x180 x192 x128 x144 /16
* 0 0 1 0 Prohibited setting
- * 0 0 1 1 16.66 x 1 x180 x192 x192 x144
- * 0 1 0 0 20 x 1 x150 x160 x160 x120
- * 0 1 0 1 20 x 1 x150 x160 x106 x120
+ * 0 0 1 1 16.66 x 1 x180 x192 x192 x144 /16
+ * 0 1 0 0 20 x 1 x150 x160 x160 x120 /19
+ * 0 1 0 1 20 x 1 x150 x160 x106 x120 /19
* 0 1 1 0 Prohibited setting
- * 0 1 1 1 20 x 1 x150 x160 x160 x120
- * 1 0 0 0 25 x 1 x120 x128 x128 x96
- * 1 0 0 1 25 x 1 x120 x128 x84 x96
+ * 0 1 1 1 20 x 1 x150 x160 x160 x120 /19
+ * 1 0 0 0 25 x 1 x120 x128 x128 x96 /24
+ * 1 0 0 1 25 x 1 x120 x128 x84 x96 /24
* 1 0 1 0 Prohibited setting
- * 1 0 1 1 25 x 1 x120 x128 x128 x96
- * 1 1 0 0 33.33 / 2 x180 x192 x192 x144
- * 1 1 0 1 33.33 / 2 x180 x192 x128 x144
+ * 1 0 1 1 25 x 1 x120 x128 x128 x96 /24
+ * 1 1 0 0 33.33 / 2 x180 x192 x192 x144 /32
+ * 1 1 0 1 33.33 / 2 x180 x192 x128 x144 /32
* 1 1 1 0 Prohibited setting
- * 1 1 1 1 33.33 / 2 x180 x192 x192 x144
+ * 1 1 1 1 33.33 / 2 x180 x192 x192 x144 /32
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
(((md) & BIT(13)) >> 11) | \
@@ -278,23 +281,23 @@ static const unsigned int r8a77965_crit_mod_clks[] __initconst = {
(((md) & BIT(17)) >> 17))
static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
- /* EXTAL div PLL1 mult/div PLL3 mult/div */
- { 1, 192, 1, 192, 1, },
- { 1, 192, 1, 128, 1, },
- { 0, /* Prohibited setting */ },
- { 1, 192, 1, 192, 1, },
- { 1, 160, 1, 160, 1, },
- { 1, 160, 1, 106, 1, },
- { 0, /* Prohibited setting */ },
- { 1, 160, 1, 160, 1, },
- { 1, 128, 1, 128, 1, },
- { 1, 128, 1, 84, 1, },
- { 0, /* Prohibited setting */ },
- { 1, 128, 1, 128, 1, },
- { 2, 192, 1, 192, 1, },
- { 2, 192, 1, 128, 1, },
- { 0, /* Prohibited setting */ },
- { 2, 192, 1, 192, 1, },
+ /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 192, 1, 128, 1, 16, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 160, 1, 106, 1, 19, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 128, 1, 128, 1, 24, },
+ { 1, 128, 1, 84, 1, 24, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, 24, },
+ { 2, 192, 1, 192, 1, 32, },
+ { 2, 192, 1, 128, 1, 32, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, 32, },
};
static int __init r8a77965_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c
index f55842917e8d..2015e45543e9 100644
--- a/drivers/clk/renesas/r8a77970-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c
@@ -1,17 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a77970 Clock Pulse Generator / Module Standby and Software Reset
*
- * Copyright (C) 2017 Cogent Embedded Inc.
+ * Copyright (C) 2017-2018 Cogent Embedded Inc.
*
* Based on r8a7795-cpg-mssr.c
*
* Copyright (C) 2015 Glider bvba
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
+#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -22,6 +20,13 @@
#include "renesas-cpg-mssr.h"
#include "rcar-gen3-cpg.h"
+#define CPG_SD0CKCR 0x0074
+
+enum r8a77970_clk_types {
+ CLK_TYPE_R8A77970_SD0H = CLK_TYPE_GEN3_SOC_BASE,
+ CLK_TYPE_R8A77970_SD0,
+};
+
enum clk_ids {
/* Core Clock Outputs exported to DT */
LAST_DT_CORE_CLK = R8A77970_CLK_OSC,
@@ -42,6 +47,20 @@ enum clk_ids {
MOD_CLK_BASE
};
+static spinlock_t cpg_lock;
+
+static const struct clk_div_table cpg_sd0h_div_table[] = {
+ { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 },
+ { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
+ { 8, 24 }, { 10, 36 }, { 11, 48 }, { 0, 0 },
+};
+
+static const struct clk_div_table cpg_sd0_div_table[] = {
+ { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
+ { 8, 24 }, { 10, 36 }, { 11, 48 }, { 12, 10 },
+ { 0, 0 },
+};
+
static const struct cpg_core_clk r8a77970_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
@@ -68,6 +87,10 @@ static const struct cpg_core_clk r8a77970_core_clks[] __initconst = {
DEF_FIXED("s2d2", R8A77970_CLK_S2D2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("s2d4", R8A77970_CLK_S2D4, CLK_PLL1_DIV2, 24, 1),
+ DEF_BASE("sd0h", R8A77970_CLK_SD0H, CLK_TYPE_R8A77970_SD0H,
+ CLK_PLL1_DIV2),
+ DEF_BASE("sd0", R8A77970_CLK_SD0, CLK_TYPE_R8A77970_SD0, CLK_PLL1_DIV2),
+
DEF_FIXED("cl", R8A77970_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A77970_CLK_CP, CLK_EXTAL, 2, 1),
@@ -80,6 +103,11 @@ static const struct cpg_core_clk r8a77970_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a77970_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A77970_CLK_S2D2),
+ DEF_MOD("tmu3", 122, R8A77970_CLK_S2D2),
+ DEF_MOD("tmu2", 123, R8A77970_CLK_S2D2),
+ DEF_MOD("tmu1", 124, R8A77970_CLK_S2D2),
+ DEF_MOD("tmu0", 125, R8A77970_CLK_CP),
DEF_MOD("ivcp1e", 127, R8A77970_CLK_S2D1),
DEF_MOD("scif4", 203, R8A77970_CLK_S2D4),
DEF_MOD("scif3", 204, R8A77970_CLK_S2D4),
@@ -92,6 +120,12 @@ static const struct mssr_mod_clk r8a77970_mod_clks[] __initconst = {
DEF_MOD("mfis", 213, R8A77970_CLK_S2D2),
DEF_MOD("sys-dmac2", 217, R8A77970_CLK_S2D1),
DEF_MOD("sys-dmac1", 218, R8A77970_CLK_S2D1),
+ DEF_MOD("cmt3", 300, R8A77970_CLK_R),
+ DEF_MOD("cmt2", 301, R8A77970_CLK_R),
+ DEF_MOD("cmt1", 302, R8A77970_CLK_R),
+ DEF_MOD("cmt0", 303, R8A77970_CLK_R),
+ DEF_MOD("tpu0", 304, R8A77970_CLK_S2D4),
+ DEF_MOD("sd-if", 314, R8A77970_CLK_SD0),
DEF_MOD("rwdt", 402, R8A77970_CLK_R),
DEF_MOD("intc-ex", 407, R8A77970_CLK_CP),
DEF_MOD("intc-ap", 408, R8A77970_CLK_S2D1),
@@ -173,11 +207,46 @@ static int __init r8a77970_cpg_mssr_init(struct device *dev)
if (error)
return error;
+ spin_lock_init(&cpg_lock);
+
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
}
+static struct clk * __init r8a77970_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers)
+{
+ const struct clk_div_table *table;
+ const struct clk *parent;
+ unsigned int shift;
+
+ switch (core->type) {
+ case CLK_TYPE_R8A77970_SD0H:
+ table = cpg_sd0h_div_table;
+ shift = 8;
+ break;
+ case CLK_TYPE_R8A77970_SD0:
+ table = cpg_sd0_div_table;
+ shift = 4;
+ break;
+ default:
+ return rcar_gen3_cpg_clk_register(dev, core, info, clks, base,
+ notifiers);
+ }
+
+ parent = clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ return clk_register_divider_table(NULL, core->name,
+ __clk_get_name(parent), 0,
+ base + CPG_SD0CKCR,
+ shift, 4, 0, table, &cpg_lock);
+}
+
const struct cpg_mssr_info r8a77970_cpg_mssr_info __initconst = {
/* Core Clocks */
.core_clks = r8a77970_core_clks,
@@ -196,5 +265,5 @@ const struct cpg_mssr_info r8a77970_cpg_mssr_info __initconst = {
/* Callbacks */
.init = r8a77970_cpg_mssr_init,
- .cpg_clk_register = rcar_gen3_cpg_clk_register,
+ .cpg_clk_register = r8a77970_cpg_clk_register,
};
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index d7ebd9ec0059..25a3083b6764 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -41,6 +41,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_OCO,
/* Module Clocks */
MOD_CLK_BASE
@@ -64,6 +65,7 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_RATE(".oco", CLK_OCO, 32768),
/* Core Clock Outputs */
DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
@@ -96,6 +98,9 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
DEF_DIV6P1("canfd", R8A77980_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("csi0", R8A77980_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
DEF_DIV6P1("mso", R8A77980_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+
+ DEF_GEN3_OSC("osc", R8A77980_CLK_OSC, CLK_EXTAL, 8),
+ DEF_GEN3_MDSEL("r", R8A77980_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
};
static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
@@ -114,9 +119,14 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
DEF_MOD("msiof0", 211, R8A77980_CLK_MSO),
DEF_MOD("sys-dmac2", 217, R8A77980_CLK_S0D3),
DEF_MOD("sys-dmac1", 218, R8A77980_CLK_S0D3),
+ DEF_MOD("cmt3", 300, R8A77980_CLK_R),
+ DEF_MOD("cmt2", 301, R8A77980_CLK_R),
+ DEF_MOD("cmt1", 302, R8A77980_CLK_R),
+ DEF_MOD("cmt0", 303, R8A77980_CLK_R),
DEF_MOD("tpu0", 304, R8A77980_CLK_S3D4),
DEF_MOD("sdif", 314, R8A77980_CLK_SD0),
DEF_MOD("pciec0", 319, R8A77980_CLK_S2D2),
+ DEF_MOD("rwdt", 402, R8A77980_CLK_R),
DEF_MOD("intc-ex", 407, R8A77980_CLK_CP),
DEF_MOD("intc-ap", 408, R8A77980_CLK_S0D3),
DEF_MOD("hscif3", 517, R8A77980_CLK_S3D1),
@@ -171,23 +181,23 @@ static const unsigned int r8a77980_crit_mod_clks[] __initconst = {
*/
/*
- * MD EXTAL PLL2 PLL1 PLL3
+ * MD EXTAL PLL2 PLL1 PLL3 OSC
* 14 13 (MHz)
- * --------------------------------------------------
- * 0 0 16.66 x 1 x240 x192 x192
- * 0 1 20 x 1 x200 x160 x160
- * 1 0 27 x 1 x148 x118 x118
- * 1 1 33.33 / 2 x240 x192 x192
+ * --------------------------------------------------------
+ * 0 0 16.66 x 1 x240 x192 x192 /16
+ * 0 1 20 x 1 x200 x160 x160 /19
+ * 1 0 27 x 1 x148 x118 x118 /26
+ * 1 1 33.33 / 2 x240 x192 x192 /32
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[4] __initconst = {
- /* EXTAL div PLL1 mult/div PLL3 mult/div */
- { 1, 192, 1, 192, 1, },
- { 1, 160, 1, 160, 1, },
- { 1, 118, 1, 118, 1, },
- { 2, 192, 1, 192, 1, },
+ /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 118, 1, 118, 1, 26, },
+ { 2, 192, 1, 192, 1, 32, },
};
static int __init r8a77980_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index 9e14f1486fbb..9eb80180eea0 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -44,6 +44,8 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RINT,
+ CLK_OCO,
/* Module Clocks */
MOD_CLK_BASE
@@ -72,6 +74,10 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1),
+ DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+
+ DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
+
/* Core Clock Outputs */
DEF_FIXED("za2", R8A77990_CLK_ZA2, CLK_PLL0D24, 1, 1),
DEF_FIXED("za8", R8A77990_CLK_ZA8, CLK_PLL0D8, 1, 1),
@@ -100,8 +106,8 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cp", R8A77990_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A77990_CLK_CPEX, CLK_EXTAL, 4, 1),
- DEF_FIXED("osc", R8A77990_CLK_OSC, CLK_EXTAL, 384, 1),
- DEF_FIXED("r", R8A77990_CLK_R, CLK_EXTAL, 1536, 1),
+
+ DEF_DIV6_RO("osc", R8A77990_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
DEF_GEN3_PE("s0d6c", R8A77990_CLK_S0D6C, CLK_S0, 6, CLK_PE, 2),
DEF_GEN3_PE("s3d1c", R8A77990_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1),
@@ -111,6 +117,8 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
DEF_DIV6P1("canfd", R8A77990_CLK_CANFD, CLK_PLL0D6, 0x244),
DEF_DIV6P1("csi0", R8A77990_CLK_CSI0, CLK_PLL1D2, 0x00c),
DEF_DIV6P1("mso", R8A77990_CLK_MSO, CLK_PLL1D2, 0x014),
+
+ DEF_GEN3_RCKSEL("r", R8A77990_CLK_R, CLK_RINT, 1, CLK_OCO, 61 * 4),
};
static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
@@ -202,6 +210,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
DEF_MOD("i2c1", 930, R8A77990_CLK_S3D2),
DEF_MOD("i2c0", 931, R8A77990_CLK_S3D2),
+ DEF_MOD("i2c7", 1003, R8A77990_CLK_S3D2),
DEF_MOD("ssi-all", 1005, R8A77990_CLK_S3D4),
DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
@@ -241,8 +250,8 @@ static const unsigned int r8a77990_crit_mod_clks[] __initconst = {
/*
* MD19 EXTAL (MHz) PLL0 PLL1 PLL3
*--------------------------------------------------------------------
- * 0 48 x 1 x100/4 x100/3 x100/3
- * 1 48 x 1 x100/4 x100/3 x58/3
+ * 0 48 x 1 x100/1 x100/3 x100/3
+ * 1 48 x 1 x100/1 x100/3 x58/3
*/
#define CPG_PLL_CONFIG_INDEX(md) (((md) & BIT(19)) >> 19)
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index ea4cafbe6e85..47e60e3dbe05 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a77995 Clock Pulse Generator / Module Standby and Software Reset
*
@@ -7,10 +8,6 @@
*
* Copyright (C) 2015 Glider bvba
* Copyright (C) 2015 Renesas Electronics Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/device.h>
@@ -46,6 +43,8 @@ enum clk_ids {
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
+ CLK_RINT,
+ CLK_OCO,
/* Module Clocks */
MOD_CLK_BASE
@@ -72,6 +71,10 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1),
+ DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+
+ DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
+
/* Core Clock Outputs */
DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1),
DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1),
@@ -90,8 +93,8 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_FIXED("cl", R8A77995_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cp", R8A77995_CLK_CP, CLK_EXTAL, 2, 1),
- DEF_FIXED("osc", R8A77995_CLK_OSC, CLK_EXTAL, 384, 1),
- DEF_FIXED("r", R8A77995_CLK_R, CLK_EXTAL, 1536, 1),
+
+ DEF_DIV6_RO("osc", R8A77995_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
DEF_GEN3_PE("s1d4c", R8A77995_CLK_S1D4C, CLK_S1, 4, CLK_PE, 2),
DEF_GEN3_PE("s3d1c", R8A77995_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1),
@@ -102,6 +105,8 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_DIV6P1("canfd", R8A77995_CLK_CANFD, CLK_PLL0D3, 0x244),
DEF_DIV6P1("mso", R8A77995_CLK_MSO, CLK_PLL1D2, 0x014),
+
+ DEF_GEN3_RCKSEL("r", R8A77995_CLK_R, CLK_RINT, 1, CLK_OCO, 61 * 4),
};
static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index a0b6ecdc63dd..6d2b56891559 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -539,7 +539,8 @@ r9a06g032_div_round_rate(struct clk_hw *hw,
* several uarts attached to this divider, and changing this impacts
* everyone.
*/
- if (clk->index == R9A06G032_DIV_UART) {
+ if (clk->index == R9A06G032_DIV_UART ||
+ clk->index == R9A06G032_DIV_P2_PG) {
pr_devel("%s div uart hack!\n", __func__);
return clk_get_rate(hw->clk);
}
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
index daf88bc2cdae..f596a2dafcf4 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.c
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Gen2 Clock Pulse Generator
*
* Copyright (C) 2016 Cogent Embedded Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include <linux/bug.h>
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index 020a3baad015..bff9551c7a38 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* R-Car Gen2 Clock Pulse Generator
*
* Copyright (C) 2016 Cogent Embedded Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation; version 2 of the License.
*/
#ifndef __CLK_RENESAS_RCAR_GEN2_CPG_H__
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 628b63b85d3f..4ba38f98cc7b 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -1,15 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Gen3 Clock Pulse Generator
*
- * Copyright (C) 2015-2016 Glider bvba
+ * Copyright (C) 2015-2018 Glider bvba
*
* Based on clk-rcar-gen3.c
*
* Copyright (C) 2015 Renesas Electronics Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/bug.h>
@@ -31,6 +28,8 @@
#define CPG_PLL2CR 0x002c
#define CPG_PLL4CR 0x01f4
+#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */
+
struct cpg_simple_notifier {
struct notifier_block nb;
void __iomem *reg;
@@ -444,7 +443,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
unsigned int div = 1;
u32 value;
- parent = clks[core->parent & 0xffff]; /* CLK_TYPE_PE uses high bits */
+ parent = clks[core->parent & 0xffff]; /* some types use high bits */
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -524,7 +523,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
if (clk_get_rate(clks[cpg_clk_extalr])) {
parent = clks[cpg_clk_extalr];
- value |= BIT(15);
+ value |= CPG_RCKCR_CKSEL;
}
writel(value, csn->reg);
@@ -537,16 +536,14 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
parent = clks[cpg_clk_extalr];
break;
- case CLK_TYPE_GEN3_PE:
+ case CLK_TYPE_GEN3_MDSEL:
/*
- * Peripheral clock with a fixed divider, selectable between
- * clean and spread spectrum parents using MD12
+ * Clock selectable between two parents and two fixed dividers
+ * using a mode pin
*/
- if (cpg_mode & BIT(12)) {
- /* Clean */
+ if (cpg_mode & BIT(core->offset)) {
div = core->div & 0xffff;
} else {
- /* SCCG */
parent = clks[core->parent >> 16];
if (IS_ERR(parent))
return ERR_CAST(parent);
@@ -563,6 +560,28 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
return cpg_z_clk_register(core->name, __clk_get_name(parent),
base, CPG_FRQCRC_Z2FC_MASK);
+ case CLK_TYPE_GEN3_OSC:
+ /*
+ * Clock combining OSC EXTAL predivider and a fixed divider
+ */
+ div = cpg_pll_config->osc_prediv * core->div;
+ break;
+
+ case CLK_TYPE_GEN3_RCKSEL:
+ /*
+ * Clock selectable between two parents and two fixed dividers
+ * using RCKCR.CKSEL
+ */
+ if (readl(base + CPG_RCKCR) & CPG_RCKCR_CKSEL) {
+ div = core->div & 0xffff;
+ } else {
+ parent = clks[core->parent >> 16];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ div = core->div >> 16;
+ }
+ break;
+
default:
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index ea4f8fc3c4c9..f4fb6cf16688 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -1,11 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* R-Car Gen3 Clock Pulse Generator
*
- * Copyright (C) 2015-2016 Glider bvba
+ * Copyright (C) 2015-2018 Glider bvba
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#ifndef __CLK_RENESAS_RCAR_GEN3_CPG_H__
@@ -20,19 +18,35 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_PLL4,
CLK_TYPE_GEN3_SD,
CLK_TYPE_GEN3_R,
- CLK_TYPE_GEN3_PE,
+ CLK_TYPE_GEN3_MDSEL, /* Select parent/divider using mode pin */
CLK_TYPE_GEN3_Z,
CLK_TYPE_GEN3_Z2,
+ CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
+ CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
+
+ /* SoC specific definitions start here */
+ CLK_TYPE_GEN3_SOC_BASE,
};
#define DEF_GEN3_SD(_name, _id, _parent, _offset) \
DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
+#define DEF_GEN3_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_MDSEL, \
+ (_parent0) << 16 | (_parent1), \
+ .div = (_div0) << 16 | (_div1), .offset = _md)
+
#define DEF_GEN3_PE(_name, _id, _parent_sscg, _div_sscg, _parent_clean, \
_div_clean) \
- DEF_BASE(_name, _id, CLK_TYPE_GEN3_PE, \
- (_parent_sscg) << 16 | (_parent_clean), \
- .div = (_div_sscg) << 16 | (_div_clean))
+ DEF_GEN3_MDSEL(_name, _id, 12, _parent_sscg, _div_sscg, \
+ _parent_clean, _div_clean)
+
+#define DEF_GEN3_OSC(_name, _id, _parent, _div) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_OSC, _parent, .div = _div)
+
+#define DEF_GEN3_RCKSEL(_name, _id, _parent0, _div0, _parent1, _div1) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_RCKSEL, \
+ (_parent0) << 16 | (_parent1), .div = (_div0) << 16 | (_div1))
struct rcar_gen3_cpg_pll_config {
u8 extal_div;
@@ -40,6 +54,7 @@ struct rcar_gen3_cpg_pll_config {
u8 pll1_div;
u8 pll3_mult;
u8 pll3_div;
+ u8 osc_prediv;
};
#define CPG_RCKCR 0x240
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index 6cd030a58964..b241f9ca3d71 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car USB2.0 clock selector
*
@@ -6,10 +7,6 @@
* Based on renesas-cpg-mssr.c
*
* Copyright (C) 2015 Glider bvba
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index f4b013e9352d..f7bb817420b4 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Clock Pulse Generator / Module Standby and Software Reset
*
@@ -7,10 +8,6 @@
*
* Copyright (C) 2013 Ideas On Board SPRL
* Copyright (C) 2015 Renesas Electronics Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/clk.h>
@@ -73,6 +70,17 @@ static const u16 smstpcr[] = {
#define SMSTPCR(i) smstpcr[i]
+/*
+ * Standby Control Register offsets (RZ/A)
+ * Base address is FRQCR register
+ */
+
+static const u16 stbcr[] = {
+ 0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420,
+ 0x424, 0x428, 0x42C,
+};
+
+#define STBCR(i) stbcr[i]
/*
* Software Reset Register offsets
@@ -110,6 +118,7 @@ static const u16 srcr[] = {
* @notifiers: Notifier chain to save/restore clock state for system resume
* @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
* @smstpcr_saved[].val: Saved values of SMSTPCR[]
+ * @stbyctrl: This device has Standby Control Registers
*/
struct cpg_mssr_priv {
#ifdef CONFIG_RESET_CONTROLLER
@@ -118,11 +127,13 @@ struct cpg_mssr_priv {
struct device *dev;
void __iomem *base;
spinlock_t rmw_lock;
+ struct device_node *np;
struct clk **clks;
unsigned int num_core_clks;
unsigned int num_mod_clks;
unsigned int last_dt_core_clk;
+ bool stbyctrl;
struct raw_notifier_head notifiers;
struct {
@@ -131,6 +142,7 @@ struct cpg_mssr_priv {
} smstpcr_saved[ARRAY_SIZE(smstpcr)];
};
+static struct cpg_mssr_priv *cpg_mssr_priv;
/**
* struct mstp_clock - MSTP gating clock
@@ -162,16 +174,29 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
enable ? "ON" : "OFF");
spin_lock_irqsave(&priv->rmw_lock, flags);
- value = readl(priv->base + SMSTPCR(reg));
- if (enable)
- value &= ~bitmask;
- else
- value |= bitmask;
- writel(value, priv->base + SMSTPCR(reg));
+ if (priv->stbyctrl) {
+ value = readb(priv->base + STBCR(reg));
+ if (enable)
+ value &= ~bitmask;
+ else
+ value |= bitmask;
+ writeb(value, priv->base + STBCR(reg));
+
+ /* dummy read to ensure write has completed */
+ readb(priv->base + STBCR(reg));
+ barrier_data(priv->base + STBCR(reg));
+ } else {
+ value = readl(priv->base + SMSTPCR(reg));
+ if (enable)
+ value &= ~bitmask;
+ else
+ value |= bitmask;
+ writel(value, priv->base + SMSTPCR(reg));
+ }
spin_unlock_irqrestore(&priv->rmw_lock, flags);
- if (!enable)
+ if (!enable || priv->stbyctrl)
return 0;
for (i = 1000; i > 0; --i) {
@@ -205,7 +230,10 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
struct cpg_mssr_priv *priv = clock->priv;
u32 value;
- value = readl(priv->base + MSTPSR(clock->index / 32));
+ if (priv->stbyctrl)
+ value = readb(priv->base + STBCR(clock->index / 32));
+ else
+ value = readl(priv->base + MSTPSR(clock->index / 32));
return !(value & BIT(clock->index % 32));
}
@@ -226,6 +254,7 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
unsigned int idx;
const char *type;
struct clk *clk;
+ int range_check;
switch (clkspec->args[0]) {
case CPG_CORE:
@@ -240,8 +269,14 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
case CPG_MOD:
type = "module";
- idx = MOD_CLK_PACK(clkidx);
- if (clkidx % 100 > 31 || idx >= priv->num_mod_clks) {
+ if (priv->stbyctrl) {
+ idx = MOD_CLK_PACK_10(clkidx);
+ range_check = 7 - (clkidx % 10);
+ } else {
+ idx = MOD_CLK_PACK(clkidx);
+ range_check = 31 - (clkidx % 100);
+ }
+ if (range_check < 0 || idx >= priv->num_mod_clks) {
dev_err(dev, "Invalid %s clock index %u\n", type,
clkidx);
return ERR_PTR(-EINVAL);
@@ -283,7 +318,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
switch (core->type) {
case CLK_TYPE_IN:
- clk = of_clk_get_by_name(priv->dev->of_node, core->name);
+ clk = of_clk_get_by_name(priv->np, core->name);
break;
case CLK_TYPE_FF:
@@ -313,6 +348,11 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
}
break;
+ case CLK_TYPE_FR:
+ clk = clk_register_fixed_rate(NULL, core->name, NULL, 0,
+ core->mult);
+ break;
+
default:
if (info->cpg_clk_register)
clk = info->cpg_clk_register(dev, core, info,
@@ -641,11 +681,22 @@ static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
static const struct of_device_id cpg_mssr_match[] = {
+#ifdef CONFIG_CLK_R7S9210
+ {
+ .compatible = "renesas,r7s9210-cpg-mssr",
+ .data = &r7s9210_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A7743
{
.compatible = "renesas,r8a7743-cpg-mssr",
.data = &r8a7743_cpg_mssr_info,
},
+ /* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */
+ {
+ .compatible = "renesas,r8a7744-cpg-mssr",
+ .data = &r8a7743_cpg_mssr_info,
+ },
#endif
#ifdef CONFIG_CLK_R8A7745
{
@@ -659,6 +710,18 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a77470_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A774A1
+ {
+ .compatible = "renesas,r8a774a1-cpg-mssr",
+ .data = &r8a774a1_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R8A774C0
+ {
+ .compatible = "renesas,r8a774c0-cpg-mssr",
+ .data = &r8a774c0_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A7790
{
.compatible = "renesas,r8a7790-cpg-mssr",
@@ -780,13 +843,23 @@ static int cpg_mssr_resume_noirq(struct device *dev)
if (!mask)
continue;
- oldval = readl(priv->base + SMSTPCR(reg));
+ if (priv->stbyctrl)
+ oldval = readb(priv->base + STBCR(reg));
+ else
+ oldval = readl(priv->base + SMSTPCR(reg));
newval = oldval & ~mask;
newval |= priv->smstpcr_saved[reg].val & mask;
if (newval == oldval)
continue;
- writel(newval, priv->base + SMSTPCR(reg));
+ if (priv->stbyctrl) {
+ writeb(newval, priv->base + STBCR(reg));
+ /* dummy read to ensure write has completed */
+ readb(priv->base + STBCR(reg));
+ barrier_data(priv->base + STBCR(reg));
+ continue;
+ } else
+ writel(newval, priv->base + SMSTPCR(reg));
/* Wait until enabled clocks are really enabled */
mask &= ~priv->smstpcr_saved[reg].val;
@@ -817,61 +890,115 @@ static const struct dev_pm_ops cpg_mssr_pm = {
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
-static int __init cpg_mssr_probe(struct platform_device *pdev)
+static int __init cpg_mssr_common_init(struct device *dev,
+ struct device_node *np,
+ const struct cpg_mssr_info *info)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- const struct cpg_mssr_info *info;
struct cpg_mssr_priv *priv;
+ struct clk **clks = NULL;
unsigned int nclks, i;
- struct resource *res;
- struct clk **clks;
int error;
- info = of_device_get_match_data(dev);
if (info->init) {
error = info->init(dev);
if (error)
return error;
}
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->np = np;
priv->dev = dev;
spin_lock_init(&priv->rmw_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ priv->base = of_iomap(np, 0);
+ if (!priv->base) {
+ error = -ENOMEM;
+ goto out_err;
+ }
nclks = info->num_total_core_clks + info->num_hw_mod_clks;
- clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
- if (!clks)
- return -ENOMEM;
+ clks = kmalloc_array(nclks, sizeof(*clks), GFP_KERNEL);
+ if (!clks) {
+ error = -ENOMEM;
+ goto out_err;
+ }
- dev_set_drvdata(dev, priv);
+ cpg_mssr_priv = priv;
priv->clks = clks;
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
+ priv->stbyctrl = info->stbyctrl;
for (i = 0; i < nclks; i++)
clks[i] = ERR_PTR(-ENOENT);
+ error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
+ if (error)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ kfree(clks);
+ if (priv->base)
+ iounmap(priv->base);
+ kfree(priv);
+
+ return error;
+}
+
+void __init cpg_mssr_early_init(struct device_node *np,
+ const struct cpg_mssr_info *info)
+{
+ int error;
+ int i;
+
+ error = cpg_mssr_common_init(NULL, np, info);
+ if (error)
+ return;
+
+ for (i = 0; i < info->num_early_core_clks; i++)
+ cpg_mssr_register_core_clk(&info->early_core_clks[i], info,
+ cpg_mssr_priv);
+
+ for (i = 0; i < info->num_early_mod_clks; i++)
+ cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info,
+ cpg_mssr_priv);
+
+}
+
+static int __init cpg_mssr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct cpg_mssr_info *info;
+ struct cpg_mssr_priv *priv;
+ unsigned int i;
+ int error;
+
+ info = of_device_get_match_data(dev);
+
+ if (!cpg_mssr_priv) {
+ error = cpg_mssr_common_init(dev, dev->of_node, info);
+ if (error)
+ return error;
+ }
+
+ priv = cpg_mssr_priv;
+ priv->dev = dev;
+ dev_set_drvdata(dev, priv);
+
for (i = 0; i < info->num_core_clks; i++)
cpg_mssr_register_core_clk(&info->core_clks[i], info, priv);
for (i = 0; i < info->num_mod_clks; i++)
cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv);
- error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
- if (error)
- return error;
-
error = devm_add_action_or_reset(dev,
cpg_mssr_del_clk_provider,
np);
@@ -883,6 +1010,10 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
if (error)
return error;
+ /* Reset Controller not supported for Standby Control SoCs */
+ if (info->stbyctrl)
+ return 0;
+
error = cpg_mssr_reset_controller_register(priv);
if (error)
return error;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 642f720b9b05..c4ec9df146fd 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* Renesas Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2015 Glider bvba
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#ifndef __CLK_RENESAS_CPG_MSSR_H__
@@ -38,6 +35,7 @@ enum clk_types {
CLK_TYPE_FF, /* Fixed Factor Clock */
CLK_TYPE_DIV6P1, /* DIV6 Clock with 1 parent clock */
CLK_TYPE_DIV6_RO, /* DIV6 Clock read only with extra divisor */
+ CLK_TYPE_FR, /* Fixed Rate Clock */
/* Custom definitions start here */
CLK_TYPE_CUSTOM,
@@ -56,6 +54,8 @@ enum clk_types {
DEF_BASE(_name, _id, CLK_TYPE_DIV6P1, _parent, .offset = _offset)
#define DEF_DIV6_RO(_name, _id, _parent, _offset, _div) \
DEF_BASE(_name, _id, CLK_TYPE_DIV6_RO, _parent, .offset = _offset, .div = _div, .mult = 1)
+#define DEF_RATE(_name, _id, _rate) \
+ DEF_TYPE(_name, _id, CLK_TYPE_FR, .mult = _rate)
/*
* Definitions of Module Clocks
@@ -75,12 +75,24 @@ struct mssr_mod_clk {
#define DEF_MOD(_name, _mod, _parent...) \
{ .name = _name, .id = MOD_CLK_ID(_mod), .parent = _parent }
+/* Convert from sparse base-10 to packed index space */
+#define MOD_CLK_PACK_10(x) ((x / 10) * 32 + (x % 10))
+
+#define MOD_CLK_ID_10(x) (MOD_CLK_BASE + MOD_CLK_PACK_10(x))
+
+#define DEF_MOD_STB(_name, _mod, _parent...) \
+ { .name = _name, .id = MOD_CLK_ID_10(_mod), .parent = _parent }
struct device_node;
/**
* SoC-specific CPG/MSSR Description
*
+ * @early_core_clks: Array of Early Core Clock definitions
+ * @num_early_core_clks: Number of entries in early_core_clks[]
+ * @early_mod_clks: Array of Early Module Clock definitions
+ * @num_early_mod_clks: Number of entries in early_mod_clks[]
+ *
* @core_clks: Array of Core Clock definitions
* @num_core_clks: Number of entries in core_clks[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
@@ -100,14 +112,25 @@ struct device_node;
*
* @init: Optional callback to perform SoC-specific initialization
* @cpg_clk_register: Optional callback to handle special Core Clock types
+ *
+ * @stbyctrl: This device has Standby Control Registers which are 8-bits
+ * wide, no status registers (MSTPSR) and have different address
+ * offsets.
*/
struct cpg_mssr_info {
+ /* Early Clocks */
+ const struct cpg_core_clk *early_core_clks;
+ unsigned int num_early_core_clks;
+ const struct mssr_mod_clk *early_mod_clks;
+ unsigned int num_early_mod_clks;
+
/* Core Clocks */
const struct cpg_core_clk *core_clks;
unsigned int num_core_clks;
unsigned int last_dt_core_clk;
unsigned int num_total_core_clks;
+ bool stbyctrl;
/* Module Clocks */
const struct mssr_mod_clk *mod_clks;
@@ -131,9 +154,12 @@ struct cpg_mssr_info {
struct raw_notifier_head *notifiers);
};
+extern const struct cpg_mssr_info r7s9210_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7745_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77470_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a774a1_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a774c0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7790_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7791_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7792_cpg_mssr_info;
@@ -146,6 +172,8 @@ extern const struct cpg_mssr_info r8a77980_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77990_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
+void __init cpg_mssr_early_init(struct device_node *np,
+ const struct cpg_mssr_info *info);
/*
* Helpers for fixing up clock tables depending on SoC revision
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index e8075359366b..ebce5260068b 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -80,16 +80,12 @@ static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw,
static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
{
struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw);
- int num_parents = clk_hw_get_num_parents(hw);
u32 val;
val = clk_readl(ddrclk->reg_base +
ddrclk->mux_offset) >> ddrclk->mux_shift;
val &= GENMASK(ddrclk->mux_width - 1, 0);
- if (val >= num_parents)
- return -EINVAL;
-
return val;
}
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 67e73fd71f09..fa25e35ce7d5 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -645,7 +645,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
GATE(HCLK_CIF1, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
- GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
+ GATE(HCLK_HDMI, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(5), 14, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 450de24a1b42..5a67b7869960 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -83,22 +83,43 @@ static struct rockchip_pll_rate_table rk3288_pll_rates[] = {
RK3066_PLL_RATE( 768000000, 1, 64, 2),
RK3066_PLL_RATE( 742500000, 8, 495, 2),
RK3066_PLL_RATE( 696000000, 1, 58, 2),
+ RK3066_PLL_RATE_NB(621000000, 1, 207, 8, 1),
RK3066_PLL_RATE( 600000000, 1, 50, 2),
RK3066_PLL_RATE_NB(594000000, 1, 198, 8, 1),
RK3066_PLL_RATE( 552000000, 1, 46, 2),
RK3066_PLL_RATE( 504000000, 1, 84, 4),
RK3066_PLL_RATE( 500000000, 3, 125, 2),
RK3066_PLL_RATE( 456000000, 1, 76, 4),
+ RK3066_PLL_RATE( 428000000, 1, 107, 6),
RK3066_PLL_RATE( 408000000, 1, 68, 4),
RK3066_PLL_RATE( 400000000, 3, 100, 2),
+ RK3066_PLL_RATE_NB( 394000000, 1, 197, 12, 1),
RK3066_PLL_RATE( 384000000, 2, 128, 4),
RK3066_PLL_RATE( 360000000, 1, 60, 4),
+ RK3066_PLL_RATE_NB( 356000000, 1, 178, 12, 1),
+ RK3066_PLL_RATE_NB( 324000000, 1, 189, 14, 1),
RK3066_PLL_RATE( 312000000, 1, 52, 4),
- RK3066_PLL_RATE( 300000000, 1, 50, 4),
- RK3066_PLL_RATE( 297000000, 2, 198, 8),
+ RK3066_PLL_RATE_NB( 308000000, 1, 154, 12, 1),
+ RK3066_PLL_RATE_NB( 303000000, 1, 202, 16, 1),
+ RK3066_PLL_RATE( 300000000, 1, 75, 6),
+ RK3066_PLL_RATE_NB( 297750000, 2, 397, 16, 1),
+ RK3066_PLL_RATE_NB( 293250000, 2, 391, 16, 1),
+ RK3066_PLL_RATE_NB( 292500000, 1, 195, 16, 1),
+ RK3066_PLL_RATE( 273600000, 1, 114, 10),
+ RK3066_PLL_RATE_NB( 273000000, 1, 182, 16, 1),
+ RK3066_PLL_RATE_NB( 270000000, 1, 180, 16, 1),
+ RK3066_PLL_RATE_NB( 266250000, 2, 355, 16, 1),
+ RK3066_PLL_RATE_NB( 256500000, 1, 171, 16, 1),
RK3066_PLL_RATE( 252000000, 1, 84, 8),
- RK3066_PLL_RATE( 216000000, 1, 72, 8),
- RK3066_PLL_RATE( 148500000, 2, 99, 8),
+ RK3066_PLL_RATE_NB( 250500000, 1, 167, 16, 1),
+ RK3066_PLL_RATE_NB( 243428571, 1, 142, 14, 1),
+ RK3066_PLL_RATE( 238000000, 1, 119, 12),
+ RK3066_PLL_RATE_NB( 219750000, 2, 293, 16, 1),
+ RK3066_PLL_RATE_NB( 216000000, 1, 144, 16, 1),
+ RK3066_PLL_RATE_NB( 213000000, 1, 142, 16, 1),
+ RK3066_PLL_RATE( 195428571, 1, 114, 14),
+ RK3066_PLL_RATE( 160000000, 1, 80, 12),
+ RK3066_PLL_RATE( 157500000, 1, 105, 16),
RK3066_PLL_RATE( 126000000, 1, 84, 16),
RK3066_PLL_RATE( 48000000, 1, 64, 32),
{ /* sentinel */ },
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index 252366a5231f..2c5426607790 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -813,22 +813,22 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc",
RK3328_SDMMC_CON0, 1),
MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc",
- RK3328_SDMMC_CON1, 1),
+ RK3328_SDMMC_CON1, 0),
MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio",
RK3328_SDIO_CON0, 1),
MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio",
- RK3328_SDIO_CON1, 1),
+ RK3328_SDIO_CON1, 0),
MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc",
RK3328_EMMC_CON0, 1),
MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc",
- RK3328_EMMC_CON1, 1),
+ RK3328_EMMC_CON1, 0),
MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext",
RK3328_SDMMC_EXT_CON0, 1),
MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext",
- RK3328_SDMMC_EXT_CON1, 1),
+ RK3328_SDMMC_EXT_CON1, 0),
};
static const char *const rk3328_critical_clocks[] __initconst = {
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index d2c99d8916b8..a5fddebbe530 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -152,7 +152,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
struct exynos_cpuclk *cpuclk, void __iomem *base)
{
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
- unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
+ unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg;
unsigned long flags;
@@ -280,7 +280,7 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
struct exynos_cpuclk *cpuclk, void __iomem *base)
{
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
- unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
+ unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg;
unsigned long flags;
@@ -432,7 +432,7 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
else
cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
- cpuclk->alt_parent = __clk_lookup(alt_parent);
+ cpuclk->alt_parent = __clk_get_hw(__clk_lookup(alt_parent));
if (!cpuclk->alt_parent) {
pr_err("%s: could not lookup alternate parent %s\n",
__func__, alt_parent);
diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
index d4b6b517fe1b..bd38c6aa3897 100644
--- a/drivers/clk/samsung/clk-cpu.h
+++ b/drivers/clk/samsung/clk-cpu.h
@@ -49,7 +49,7 @@ struct exynos_cpuclk_cfg_data {
*/
struct exynos_cpuclk {
struct clk_hw hw;
- struct clk *alt_parent;
+ struct clk_hw *alt_parent;
void __iomem *ctrl_base;
spinlock_t *lock;
const struct exynos_cpuclk_cfg_data *cfg;
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index f659c5cbf1d5..8f8a0f9fc842 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -15,7 +15,6 @@
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/syscore_ops.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 27c9d23657b3..0e9a41a4cac8 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -12,7 +12,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
-#include <linux/syscore_ops.h>
#include <dt-bindings/clock/exynos3250.h>
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 0421960eb963..59d4d46667ce 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -16,7 +16,6 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
#include "clk.h"
#include "clk-cpu.h"
@@ -123,10 +122,6 @@
#define CLKOUT_CMU_CPU 0x14a00
#define PWR_CTRL1 0x15020
#define E4X12_PWR_CTRL2 0x15024
-#define E4X12_DIV_ISP0 0x18300
-#define E4X12_DIV_ISP1 0x18304
-#define E4X12_GATE_ISP0 0x18800
-#define E4X12_GATE_ISP1 0x18804
/* Below definitions are used for PWR_CTRL settings */
#define PWR_CTRL1_CORE2_DOWN_RATIO(x) (((x) & 0x7) << 28)
@@ -158,14 +153,6 @@ static void __iomem *reg_base;
static enum exynos4_soc exynos4_soc;
/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos4_save_common;
-static struct samsung_clk_reg_dump *exynos4_save_soc;
-static struct samsung_clk_reg_dump *exynos4_save_pll;
-
-/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
@@ -192,7 +179,7 @@ static const unsigned long exynos4x12_clk_save[] __initconst = {
E4X12_PWR_CTRL2,
};
-static const unsigned long exynos4_clk_pll_regs[] __initconst = {
+static const unsigned long exynos4_clk_regs[] __initconst = {
EPLL_LOCK,
VPLL_LOCK,
EPLL_CON0,
@@ -201,9 +188,6 @@ static const unsigned long exynos4_clk_pll_regs[] __initconst = {
VPLL_CON0,
VPLL_CON1,
VPLL_CON2,
-};
-
-static const unsigned long exynos4_clk_regs[] __initconst = {
SRC_LEFTBUS,
DIV_LEFTBUS,
GATE_IP_LEFTBUS,
@@ -276,6 +260,8 @@ static const unsigned long exynos4_clk_regs[] __initconst = {
};
static const struct samsung_clk_reg_dump src_mask_suspend[] = {
+ { .offset = VPLL_CON0, .value = 0x80600302, },
+ { .offset = EPLL_CON0, .value = 0x806F0302, },
{ .offset = SRC_MASK_TOP, .value = 0x00000001, },
{ .offset = SRC_MASK_CAM, .value = 0x11111111, },
{ .offset = SRC_MASK_TV, .value = 0x00000111, },
@@ -291,123 +277,6 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
{ .offset = E4210_SRC_MASK_LCD1, .value = 0x00001111, },
};
-#define PLL_ENABLED (1 << 31)
-#define PLL_LOCKED (1 << 29)
-
-static void exynos4_clk_enable_pll(u32 reg)
-{
- u32 pll_con = readl(reg_base + reg);
- pll_con |= PLL_ENABLED;
- writel(pll_con, reg_base + reg);
-
- while (!(pll_con & PLL_LOCKED)) {
- cpu_relax();
- pll_con = readl(reg_base + reg);
- }
-}
-
-static void exynos4_clk_wait_for_pll(u32 reg)
-{
- u32 pll_con;
-
- pll_con = readl(reg_base + reg);
- if (!(pll_con & PLL_ENABLED))
- return;
-
- while (!(pll_con & PLL_LOCKED)) {
- cpu_relax();
- pll_con = readl(reg_base + reg);
- }
-}
-
-static int exynos4_clk_suspend(void)
-{
- samsung_clk_save(reg_base, exynos4_save_common,
- ARRAY_SIZE(exynos4_clk_regs));
- samsung_clk_save(reg_base, exynos4_save_pll,
- ARRAY_SIZE(exynos4_clk_pll_regs));
-
- exynos4_clk_enable_pll(EPLL_CON0);
- exynos4_clk_enable_pll(VPLL_CON0);
-
- if (exynos4_soc == EXYNOS4210) {
- samsung_clk_save(reg_base, exynos4_save_soc,
- ARRAY_SIZE(exynos4210_clk_save));
- samsung_clk_restore(reg_base, src_mask_suspend_e4210,
- ARRAY_SIZE(src_mask_suspend_e4210));
- } else {
- samsung_clk_save(reg_base, exynos4_save_soc,
- ARRAY_SIZE(exynos4x12_clk_save));
- }
-
- samsung_clk_restore(reg_base, src_mask_suspend,
- ARRAY_SIZE(src_mask_suspend));
-
- return 0;
-}
-
-static void exynos4_clk_resume(void)
-{
- samsung_clk_restore(reg_base, exynos4_save_pll,
- ARRAY_SIZE(exynos4_clk_pll_regs));
-
- exynos4_clk_wait_for_pll(EPLL_CON0);
- exynos4_clk_wait_for_pll(VPLL_CON0);
-
- samsung_clk_restore(reg_base, exynos4_save_common,
- ARRAY_SIZE(exynos4_clk_regs));
-
- if (exynos4_soc == EXYNOS4210)
- samsung_clk_restore(reg_base, exynos4_save_soc,
- ARRAY_SIZE(exynos4210_clk_save));
- else
- samsung_clk_restore(reg_base, exynos4_save_soc,
- ARRAY_SIZE(exynos4x12_clk_save));
-}
-
-static struct syscore_ops exynos4_clk_syscore_ops = {
- .suspend = exynos4_clk_suspend,
- .resume = exynos4_clk_resume,
-};
-
-static void __init exynos4_clk_sleep_init(void)
-{
- exynos4_save_common = samsung_clk_alloc_reg_dump(exynos4_clk_regs,
- ARRAY_SIZE(exynos4_clk_regs));
- if (!exynos4_save_common)
- goto err_warn;
-
- if (exynos4_soc == EXYNOS4210)
- exynos4_save_soc = samsung_clk_alloc_reg_dump(
- exynos4210_clk_save,
- ARRAY_SIZE(exynos4210_clk_save));
- else
- exynos4_save_soc = samsung_clk_alloc_reg_dump(
- exynos4x12_clk_save,
- ARRAY_SIZE(exynos4x12_clk_save));
- if (!exynos4_save_soc)
- goto err_common;
-
- exynos4_save_pll = samsung_clk_alloc_reg_dump(exynos4_clk_pll_regs,
- ARRAY_SIZE(exynos4_clk_pll_regs));
- if (!exynos4_save_pll)
- goto err_soc;
-
- register_syscore_ops(&exynos4_clk_syscore_ops);
- return;
-
-err_soc:
- kfree(exynos4_save_soc);
-err_common:
- kfree(exynos4_save_common);
-err_warn:
- pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
- __func__);
-}
-#else
-static void __init exynos4_clk_sleep_init(void) {}
-#endif
-
/* list of all parent clock list */
PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
@@ -841,18 +710,6 @@ static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = {
DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
};
-static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
- DIV_F(CLK_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
- CLK_GET_RATE_NOCACHE, 0),
- DIV_F(CLK_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
- CLK_GET_RATE_NOCACHE, 0),
- DIV(0, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
- DIV_F(CLK_DIV_MCUISP0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
- 4, 3, CLK_GET_RATE_NOCACHE, 0),
- DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
- 8, 3, CLK_GET_RATE_NOCACHE, 0),
-};
-
/* list of gate clocks supported in all exynos4 soc's */
static const struct samsung_gate_clock exynos4_gate_clks[] __initconst = {
GATE(CLK_PPMULEFT, "ppmuleft", "aclk200", GATE_IP_LEFTBUS, 1, 0, 0),
@@ -1150,61 +1007,6 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
0),
};
-static struct samsung_gate_clock exynos4x12_isp_gate_clks[] = {
- GATE(CLK_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_FIMC_FD, "fd", "aclk200", E4X12_GATE_ISP0, 2,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_FIMC_LITE0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_FIMC_LITE1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_MCUISP, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_GICISP, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_SMMU_ISP, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_SMMU_DRC, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_SMMU_FD, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_SMMU_LITE0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_SMMU_LITE1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_PPMUISPMX, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_PPMUISPX, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_MCUCTL_ISP, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_MPWM_ISP, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_I2C0_ISP, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_I2C1_ISP, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_MTCADC_ISP, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_PWM_ISP, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_WDT_ISP, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_UART_ISP, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_ASYNCAXIM, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_SMMU_ISPCX, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_SPI0_ISP, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
- CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
-};
-
/*
* The parent of the fin_pll clock is selected by the XOM[0] bit. This bit
* resides in chipid register space, outside of the clock controller memory
@@ -1504,8 +1306,6 @@ static void __init exynos4_clk_init(struct device_node *np,
e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d),
CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
} else {
- struct resource res;
-
samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
ARRAY_SIZE(exynos4x12_mux_clks));
samsung_clk_register_div(ctx, exynos4x12_div_clks,
@@ -1516,14 +1316,6 @@ static void __init exynos4_clk_init(struct device_node *np,
exynos4x12_fixed_factor_clks,
ARRAY_SIZE(exynos4x12_fixed_factor_clks));
- of_address_to_resource(np, 0, &res);
- if (resource_size(&res) > 0x18000) {
- samsung_clk_register_div(ctx, exynos4x12_isp_div_clks,
- ARRAY_SIZE(exynos4x12_isp_div_clks));
- samsung_clk_register_gate(ctx, exynos4x12_isp_gate_clks,
- ARRAY_SIZE(exynos4x12_isp_gate_clks));
- }
-
exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
@@ -1532,7 +1324,17 @@ static void __init exynos4_clk_init(struct device_node *np,
if (soc == EXYNOS4X12)
exynos4x12_core_down_clock();
- exynos4_clk_sleep_init();
+
+ samsung_clk_extended_sleep_init(reg_base,
+ exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
+ src_mask_suspend, ARRAY_SIZE(src_mask_suspend));
+ if (exynos4_soc == EXYNOS4210)
+ samsung_clk_extended_sleep_init(reg_base,
+ exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save),
+ src_mask_suspend_e4210, ARRAY_SIZE(src_mask_suspend_e4210));
+ else
+ samsung_clk_sleep_init(reg_base, exynos4x12_clk_save,
+ ARRAY_SIZE(exynos4x12_clk_save));
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 347fd80c351b..f14139bcb0c1 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -14,7 +14,6 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
#include "clk.h"
#include "clk-cpu.h"
@@ -111,9 +110,6 @@ enum exynos5250_plls {
static void __iomem *reg_base;
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos5250_save;
-
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -172,41 +168,6 @@ static const unsigned long exynos5250_clk_regs[] __initconst = {
GATE_IP_ISP1,
};
-static int exynos5250_clk_suspend(void)
-{
- samsung_clk_save(reg_base, exynos5250_save,
- ARRAY_SIZE(exynos5250_clk_regs));
-
- return 0;
-}
-
-static void exynos5250_clk_resume(void)
-{
- samsung_clk_restore(reg_base, exynos5250_save,
- ARRAY_SIZE(exynos5250_clk_regs));
-}
-
-static struct syscore_ops exynos5250_clk_syscore_ops = {
- .suspend = exynos5250_clk_suspend,
- .resume = exynos5250_clk_resume,
-};
-
-static void __init exynos5250_clk_sleep_init(void)
-{
- exynos5250_save = samsung_clk_alloc_reg_dump(exynos5250_clk_regs,
- ARRAY_SIZE(exynos5250_clk_regs));
- if (!exynos5250_save) {
- pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
- __func__);
- return;
- }
-
- register_syscore_ops(&exynos5250_clk_syscore_ops);
-}
-#else
-static void __init exynos5250_clk_sleep_init(void) {}
-#endif
-
/* list of all parent clock list */
PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", };
@@ -882,7 +843,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
PWR_CTRL2_CORE2_UP_RATIO | PWR_CTRL2_CORE1_UP_RATIO);
__raw_writel(tmp, reg_base + PWR_CTRL2);
- exynos5250_clk_sleep_init();
+ samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
+ ARRAY_SIZE(exynos5250_clk_regs));
exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu);
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 95e1bf69449b..34cce3c5898f 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -15,7 +15,6 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
#include "clk.h"
#include "clk-cpu.h"
@@ -156,10 +155,6 @@ enum exynos5x_plls {
static void __iomem *reg_base;
static enum exynos5x_soc exynos5x_soc;
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos5x_save;
-static struct samsung_clk_reg_dump *exynos5800_save;
-
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -281,68 +276,9 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
{ .offset = GATE_BUS_TOP, .value = 0xffffffff, },
{ .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
{ .offset = GATE_IP_PERIC, .value = 0xffffffff, },
+ { .offset = GATE_IP_PERIS, .value = 0xffffffff, },
};
-static int exynos5420_clk_suspend(void)
-{
- samsung_clk_save(reg_base, exynos5x_save,
- ARRAY_SIZE(exynos5x_clk_regs));
-
- if (exynos5x_soc == EXYNOS5800)
- samsung_clk_save(reg_base, exynos5800_save,
- ARRAY_SIZE(exynos5800_clk_regs));
-
- samsung_clk_restore(reg_base, exynos5420_set_clksrc,
- ARRAY_SIZE(exynos5420_set_clksrc));
-
- return 0;
-}
-
-static void exynos5420_clk_resume(void)
-{
- samsung_clk_restore(reg_base, exynos5x_save,
- ARRAY_SIZE(exynos5x_clk_regs));
-
- if (exynos5x_soc == EXYNOS5800)
- samsung_clk_restore(reg_base, exynos5800_save,
- ARRAY_SIZE(exynos5800_clk_regs));
-}
-
-static struct syscore_ops exynos5420_clk_syscore_ops = {
- .suspend = exynos5420_clk_suspend,
- .resume = exynos5420_clk_resume,
-};
-
-static void __init exynos5420_clk_sleep_init(void)
-{
- exynos5x_save = samsung_clk_alloc_reg_dump(exynos5x_clk_regs,
- ARRAY_SIZE(exynos5x_clk_regs));
- if (!exynos5x_save) {
- pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
- __func__);
- return;
- }
-
- if (exynos5x_soc == EXYNOS5800) {
- exynos5800_save =
- samsung_clk_alloc_reg_dump(exynos5800_clk_regs,
- ARRAY_SIZE(exynos5800_clk_regs));
- if (!exynos5800_save)
- goto err_soc;
- }
-
- register_syscore_ops(&exynos5420_clk_syscore_ops);
- return;
-err_soc:
- kfree(exynos5x_save);
- pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
- __func__);
- return;
-}
-#else
-static void __init exynos5420_clk_sleep_init(void) {}
-#endif
-
/* list of all parent clocks */
PNAME(mout_mspll_cpu_p) = {"mout_sclk_cpll", "mout_sclk_dpll",
"mout_sclk_mpll", "mout_sclk_spll"};
@@ -633,6 +569,7 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
};
static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
+ GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
};
@@ -1162,8 +1099,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_IP_PERIS, 21, 0, 0),
GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_IP_PERIS, 22, 0, 0),
- GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
-
/* GEN Block */
GATE(CLK_ROTATOR, "rotator", "mout_user_aclk266", GATE_IP_GEN, 1, 0, 0),
GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0),
@@ -1540,7 +1475,12 @@ static void __init exynos5x_clk_init(struct device_node *np,
mout_kfc_p[0], mout_kfc_p[1], 0x28200,
exynos5420_kfcclk_d, ARRAY_SIZE(exynos5420_kfcclk_d), 0);
- exynos5420_clk_sleep_init();
+ samsung_clk_extended_sleep_init(reg_base,
+ exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
+ exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
+ if (soc == EXYNOS5800)
+ samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
+ ARRAY_SIZE(exynos5800_clk_regs));
exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
exynos5x_subcmus);
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 162de44df099..751e2c4fb65b 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -177,6 +177,17 @@ static const unsigned long top_clk_regs[] __initconst = {
ENABLE_CMU_TOP_DIV_STAT,
};
+static const struct samsung_clk_reg_dump top_suspend_regs[] = {
+ /* force all aclk clocks enabled */
+ { ENABLE_ACLK_TOP, 0x67ecffed },
+ /* force all sclk_uart clocks enabled */
+ { ENABLE_SCLK_TOP_PERIC, 0x38 },
+ /* ISP PLL has to be enabled for suspend: reset value + ENABLE bit */
+ { ISP_PLL_CON0, 0x85cc0502 },
+ /* ISP PLL has to be enabled for suspend: reset value + ENABLE bit */
+ { AUD_PLL_CON0, 0x84830202 },
+};
+
/* list of all parent clock list */
PNAME(mout_aud_pll_p) = { "oscclk", "fout_aud_pll", };
PNAME(mout_isp_pll_p) = { "oscclk", "fout_isp_pll", };
@@ -792,6 +803,8 @@ static const struct samsung_cmu_info top_cmu_info __initconst = {
.nr_clk_ids = TOP_NR_CLK,
.clk_regs = top_clk_regs,
.nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+ .suspend_regs = top_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(top_suspend_regs),
};
static void __init exynos5433_cmu_top_init(struct device_node *np)
@@ -822,6 +835,13 @@ static const unsigned long cpif_clk_regs[] __initconst = {
ENABLE_SCLK_CPIF,
};
+static const struct samsung_clk_reg_dump cpif_suspend_regs[] = {
+ /* force all sclk clocks enabled */
+ { ENABLE_SCLK_CPIF, 0x3ff },
+ /* MPHY PLL has to be enabled for suspend: reset value + ENABLE bit */
+ { MPHY_PLL_CON0, 0x81c70601 },
+};
+
/* list of all parent clock list */
PNAME(mout_mphy_pll_p) = { "oscclk", "fout_mphy_pll", };
@@ -862,6 +882,8 @@ static const struct samsung_cmu_info cpif_cmu_info __initconst = {
.nr_clk_ids = CPIF_NR_CLK,
.clk_regs = cpif_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cpif_clk_regs),
+ .suspend_regs = cpif_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(cpif_suspend_regs),
};
static void __init exynos5433_cmu_cpif_init(struct device_node *np)
@@ -1547,6 +1569,13 @@ static const unsigned long peric_clk_regs[] __initconst = {
ENABLE_IP_PERIC2,
};
+static const struct samsung_clk_reg_dump peric_suspend_regs[] = {
+ /* pclk: sci, pmu, sysreg, gpio_{finger, ese, touch, nfc}, uart2-0 */
+ { ENABLE_PCLK_PERIC0, 0xe00ff000 },
+ /* sclk: uart2-0 */
+ { ENABLE_SCLK_PERIC, 0x7 },
+};
+
static const struct samsung_div_clock peric_div_clks[] __initconst = {
/* DIV_PERIC */
DIV(CLK_DIV_SCLK_SCI, "div_sclk_sci", "oscclk", DIV_PERIC, 4, 4),
@@ -1705,6 +1734,8 @@ static const struct samsung_cmu_info peric_cmu_info __initconst = {
.nr_clk_ids = PERIC_NR_CLK,
.clk_regs = peric_clk_regs,
.nr_clk_regs = ARRAY_SIZE(peric_clk_regs),
+ .suspend_regs = peric_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(peric_suspend_regs),
};
static void __init exynos5433_cmu_peric_init(struct device_node *np)
@@ -5630,7 +5661,7 @@ static const struct of_device_id exynos5433_cmu_of_match[] = {
static const struct dev_pm_ops exynos5433_cmu_pm_ops = {
SET_RUNTIME_PM_OPS(exynos5433_cmu_suspend, exynos5433_cmu_resume,
NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index a9c887475054..8cb868f06257 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -11,7 +11,6 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
#include <dt-bindings/clock/s3c2410.h>
@@ -40,9 +39,6 @@ enum s3c2410_plls {
static void __iomem *reg_base;
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *s3c2410_save;
-
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -57,42 +53,6 @@ static unsigned long s3c2410_clk_regs[] __initdata = {
CAMDIVN,
};
-static int s3c2410_clk_suspend(void)
-{
- samsung_clk_save(reg_base, s3c2410_save,
- ARRAY_SIZE(s3c2410_clk_regs));
-
- return 0;
-}
-
-static void s3c2410_clk_resume(void)
-{
- samsung_clk_restore(reg_base, s3c2410_save,
- ARRAY_SIZE(s3c2410_clk_regs));
-}
-
-static struct syscore_ops s3c2410_clk_syscore_ops = {
- .suspend = s3c2410_clk_suspend,
- .resume = s3c2410_clk_resume,
-};
-
-static void __init s3c2410_clk_sleep_init(void)
-{
- s3c2410_save = samsung_clk_alloc_reg_dump(s3c2410_clk_regs,
- ARRAY_SIZE(s3c2410_clk_regs));
- if (!s3c2410_save) {
- pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
- __func__);
- return;
- }
-
- register_syscore_ops(&s3c2410_clk_syscore_ops);
- return;
-}
-#else
-static void __init s3c2410_clk_sleep_init(void) {}
-#endif
-
PNAME(fclk_p) = { "mpll", "div_slow" };
static struct samsung_mux_clock s3c2410_common_muxes[] __initdata = {
@@ -461,7 +421,8 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
ARRAY_SIZE(s3c244x_common_aliases));
}
- s3c2410_clk_sleep_init();
+ samsung_clk_sleep_init(reg_base, s3c2410_clk_regs,
+ ARRAY_SIZE(s3c2410_clk_regs));
samsung_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index 6bc94d3aff78..dd1159050a5a 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -11,7 +11,6 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <dt-bindings/clock/s3c2412.h>
@@ -29,9 +28,6 @@
static void __iomem *reg_base;
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *s3c2412_save;
-
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -45,42 +41,6 @@ static unsigned long s3c2412_clk_regs[] __initdata = {
CLKSRC,
};
-static int s3c2412_clk_suspend(void)
-{
- samsung_clk_save(reg_base, s3c2412_save,
- ARRAY_SIZE(s3c2412_clk_regs));
-
- return 0;
-}
-
-static void s3c2412_clk_resume(void)
-{
- samsung_clk_restore(reg_base, s3c2412_save,
- ARRAY_SIZE(s3c2412_clk_regs));
-}
-
-static struct syscore_ops s3c2412_clk_syscore_ops = {
- .suspend = s3c2412_clk_suspend,
- .resume = s3c2412_clk_resume,
-};
-
-static void __init s3c2412_clk_sleep_init(void)
-{
- s3c2412_save = samsung_clk_alloc_reg_dump(s3c2412_clk_regs,
- ARRAY_SIZE(s3c2412_clk_regs));
- if (!s3c2412_save) {
- pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
- __func__);
- return;
- }
-
- register_syscore_ops(&s3c2412_clk_syscore_ops);
- return;
-}
-#else
-static void __init s3c2412_clk_sleep_init(void) {}
-#endif
-
static struct clk_div_table divxti_d[] = {
{ .val = 0, .div = 1 },
{ .val = 1, .div = 2 },
@@ -278,7 +238,8 @@ void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
samsung_clk_register_alias(ctx, s3c2412_aliases,
ARRAY_SIZE(s3c2412_aliases));
- s3c2412_clk_sleep_init();
+ samsung_clk_sleep_init(reg_base, s3c2412_clk_regs,
+ ARRAY_SIZE(s3c2412_clk_regs));
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index c46e6d5bc9bc..884067e4f1a1 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -11,7 +11,6 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <dt-bindings/clock/s3c2443.h>
@@ -43,9 +42,6 @@ enum supported_socs {
static void __iomem *reg_base;
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *s3c2443_save;
-
/*
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
@@ -65,42 +61,6 @@ static unsigned long s3c2443_clk_regs[] __initdata = {
SCLKCON,
};
-static int s3c2443_clk_suspend(void)
-{
- samsung_clk_save(reg_base, s3c2443_save,
- ARRAY_SIZE(s3c2443_clk_regs));
-
- return 0;
-}
-
-static void s3c2443_clk_resume(void)
-{
- samsung_clk_restore(reg_base, s3c2443_save,
- ARRAY_SIZE(s3c2443_clk_regs));
-}
-
-static struct syscore_ops s3c2443_clk_syscore_ops = {
- .suspend = s3c2443_clk_suspend,
- .resume = s3c2443_clk_resume,
-};
-
-static void __init s3c2443_clk_sleep_init(void)
-{
- s3c2443_save = samsung_clk_alloc_reg_dump(s3c2443_clk_regs,
- ARRAY_SIZE(s3c2443_clk_regs));
- if (!s3c2443_save) {
- pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
- __func__);
- return;
- }
-
- register_syscore_ops(&s3c2443_clk_syscore_ops);
- return;
-}
-#else
-static void __init s3c2443_clk_sleep_init(void) {}
-#endif
-
PNAME(epllref_p) = { "mpllref", "mpllref", "xti", "ext" };
PNAME(esysclk_p) = { "epllref", "epll" };
PNAME(mpllref_p) = { "xti", "mdivclk" };
@@ -450,7 +410,8 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
break;
}
- s3c2443_clk_sleep_init();
+ samsung_clk_sleep_init(reg_base, s3c2443_clk_regs,
+ ARRAY_SIZE(s3c2443_clk_regs));
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 6db01cf5ab83..54916c7bdb06 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -12,7 +12,6 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
#include <dt-bindings/clock/samsung,s3c64xx-clock.h>
@@ -59,10 +58,6 @@
static void __iomem *reg_base;
static bool is_s3c6400;
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *s3c64xx_save_common;
-static struct samsung_clk_reg_dump *s3c64xx_save_soc;
-
/*
* List of controller registers to be saved and restored during
* a suspend/resume cycle.
@@ -89,60 +84,6 @@ static unsigned long s3c6410_clk_regs[] __initdata = {
MEM0_GATE,
};
-static int s3c64xx_clk_suspend(void)
-{
- samsung_clk_save(reg_base, s3c64xx_save_common,
- ARRAY_SIZE(s3c64xx_clk_regs));
-
- if (!is_s3c6400)
- samsung_clk_save(reg_base, s3c64xx_save_soc,
- ARRAY_SIZE(s3c6410_clk_regs));
-
- return 0;
-}
-
-static void s3c64xx_clk_resume(void)
-{
- samsung_clk_restore(reg_base, s3c64xx_save_common,
- ARRAY_SIZE(s3c64xx_clk_regs));
-
- if (!is_s3c6400)
- samsung_clk_restore(reg_base, s3c64xx_save_soc,
- ARRAY_SIZE(s3c6410_clk_regs));
-}
-
-static struct syscore_ops s3c64xx_clk_syscore_ops = {
- .suspend = s3c64xx_clk_suspend,
- .resume = s3c64xx_clk_resume,
-};
-
-static void __init s3c64xx_clk_sleep_init(void)
-{
- s3c64xx_save_common = samsung_clk_alloc_reg_dump(s3c64xx_clk_regs,
- ARRAY_SIZE(s3c64xx_clk_regs));
- if (!s3c64xx_save_common)
- goto err_warn;
-
- if (!is_s3c6400) {
- s3c64xx_save_soc = samsung_clk_alloc_reg_dump(s3c6410_clk_regs,
- ARRAY_SIZE(s3c6410_clk_regs));
- if (!s3c64xx_save_soc)
- goto err_soc;
- }
-
- register_syscore_ops(&s3c64xx_clk_syscore_ops);
- return;
-
-err_soc:
- kfree(s3c64xx_save_common);
-err_warn:
- pr_warn("%s: failed to allocate sleep save data, no sleep support!\n",
- __func__);
-}
-#else
-static void __init s3c64xx_clk_sleep_init(void) {}
-#endif
-
/* List of parent clocks common for all S3C64xx SoCs. */
PNAME(spi_mmc_p) = { "mout_epll", "dout_mpll", "fin_pll", "clk27m" };
PNAME(uart_p) = { "mout_epll", "dout_mpll" };
@@ -508,7 +449,12 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
samsung_clk_register_alias(ctx, s3c64xx_clock_aliases,
ARRAY_SIZE(s3c64xx_clock_aliases));
- s3c64xx_clk_sleep_init();
+
+ samsung_clk_sleep_init(reg_base, s3c64xx_clk_regs,
+ ARRAY_SIZE(s3c64xx_clk_regs));
+ if (!is_s3c6400)
+ samsung_clk_sleep_init(reg_base, s3c6410_clk_regs,
+ ARRAY_SIZE(s3c6410_clk_regs));
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index fd2725710a6f..41d2337fe030 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -14,7 +14,6 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
#include "clk.h"
#include "clk-pll.h"
@@ -83,9 +82,6 @@ enum {
static void __iomem *reg_base;
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *s5pv210_clk_dump;
-
/* List of registers that need to be preserved across suspend/resume. */
static unsigned long s5pv210_clk_regs[] __initdata = {
CLK_SRC0,
@@ -132,40 +128,6 @@ static unsigned long s5pv210_clk_regs[] __initdata = {
CLK_OUT,
};
-static int s5pv210_clk_suspend(void)
-{
- samsung_clk_save(reg_base, s5pv210_clk_dump,
- ARRAY_SIZE(s5pv210_clk_regs));
- return 0;
-}
-
-static void s5pv210_clk_resume(void)
-{
- samsung_clk_restore(reg_base, s5pv210_clk_dump,
- ARRAY_SIZE(s5pv210_clk_regs));
-}
-
-static struct syscore_ops s5pv210_clk_syscore_ops = {
- .suspend = s5pv210_clk_suspend,
- .resume = s5pv210_clk_resume,
-};
-
-static void s5pv210_clk_sleep_init(void)
-{
- s5pv210_clk_dump =
- samsung_clk_alloc_reg_dump(s5pv210_clk_regs,
- ARRAY_SIZE(s5pv210_clk_regs));
- if (!s5pv210_clk_dump) {
- pr_warn("%s: Failed to allocate sleep save data\n", __func__);
- return;
- }
-
- register_syscore_ops(&s5pv210_clk_syscore_ops);
-}
-#else
-static inline void s5pv210_clk_sleep_init(void) { }
-#endif
-
/* Mux parent lists. */
static const char *const fin_pll_p[] __initconst = {
"xxti",
@@ -822,7 +784,8 @@ static void __init __s5pv210_clk_init(struct device_node *np,
samsung_clk_register_alias(ctx, s5pv210_aliases,
ARRAY_SIZE(s5pv210_aliases));
- s5pv210_clk_sleep_init();
+ samsung_clk_sleep_init(reg_base, s5pv210_clk_regs,
+ ARRAY_SIZE(s5pv210_clk_regs));
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 8634884aa11c..1f6e47cd327d 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -290,9 +290,12 @@ static int samsung_clk_suspend(void)
{
struct samsung_clock_reg_cache *reg_cache;
- list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
+ list_for_each_entry(reg_cache, &clock_reg_cache_list, node) {
samsung_clk_save(reg_cache->reg_base, reg_cache->rdump,
reg_cache->rd_num);
+ samsung_clk_restore(reg_cache->reg_base, reg_cache->rsuspend,
+ reg_cache->rsuspend_num);
+ }
return 0;
}
@@ -310,9 +313,11 @@ static struct syscore_ops samsung_clk_syscore_ops = {
.resume = samsung_clk_resume,
};
-void samsung_clk_sleep_init(void __iomem *reg_base,
+void samsung_clk_extended_sleep_init(void __iomem *reg_base,
const unsigned long *rdump,
- unsigned long nr_rdump)
+ unsigned long nr_rdump,
+ const struct samsung_clk_reg_dump *rsuspend,
+ unsigned long nr_rsuspend)
{
struct samsung_clock_reg_cache *reg_cache;
@@ -330,13 +335,10 @@ void samsung_clk_sleep_init(void __iomem *reg_base,
reg_cache->reg_base = reg_base;
reg_cache->rd_num = nr_rdump;
+ reg_cache->rsuspend = rsuspend;
+ reg_cache->rsuspend_num = nr_rsuspend;
list_add_tail(&reg_cache->node, &clock_reg_cache_list);
}
-
-#else
-void samsung_clk_sleep_init(void __iomem *reg_base,
- const unsigned long *rdump,
- unsigned long nr_rdump) {}
#endif
/*
@@ -380,8 +382,9 @@ struct samsung_clk_provider * __init samsung_cmu_register_one(
samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
cmu->nr_fixed_factor_clks);
if (cmu->clk_regs)
- samsung_clk_sleep_init(reg_base, cmu->clk_regs,
- cmu->nr_clk_regs);
+ samsung_clk_extended_sleep_init(reg_base,
+ cmu->clk_regs, cmu->nr_clk_regs,
+ cmu->suspend_regs, cmu->nr_suspend_regs);
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 3880d2f9d582..c3f309d7100d 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -279,6 +279,8 @@ struct samsung_clock_reg_cache {
void __iomem *reg_base;
struct samsung_clk_reg_dump *rdump;
unsigned int rd_num;
+ const struct samsung_clk_reg_dump *rsuspend;
+ unsigned int rsuspend_num;
};
struct samsung_cmu_info {
@@ -358,9 +360,21 @@ extern struct samsung_clk_provider __init *samsung_cmu_register_one(
extern unsigned long _get_rate(const char *clk_name);
-extern void samsung_clk_sleep_init(void __iomem *reg_base,
+#ifdef CONFIG_PM_SLEEP
+extern void samsung_clk_extended_sleep_init(void __iomem *reg_base,
const unsigned long *rdump,
- unsigned long nr_rdump);
+ unsigned long nr_rdump,
+ const struct samsung_clk_reg_dump *rsuspend,
+ unsigned long nr_rsuspend);
+#else
+static inline void samsung_clk_extended_sleep_init(void __iomem *reg_base,
+ const unsigned long *rdump,
+ unsigned long nr_rdump,
+ const struct samsung_clk_reg_dump *rsuspend,
+ unsigned long nr_rsuspend) {}
+#endif
+#define samsung_clk_sleep_init(reg_base, rdump, nr_rdump) \
+ samsung_clk_extended_sleep_init(reg_base, rdump, nr_rdump, NULL, 0)
extern void samsung_clk_save(void __iomem *base,
struct samsung_clk_reg_dump *rd,
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index a79d81985c4e..cfa000007622 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -936,7 +936,7 @@ static void __init st_of_quadfs_setup(struct device_node *np,
if (!clk_parent_name)
return;
- pll_name = kasprintf(GFP_KERNEL, "%s.pll", np->name);
+ pll_name = kasprintf(GFP_KERNEL, "%pOFn.pll", np);
if (!pll_name)
return;
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index ee9c12cf3f08..5f80eb018014 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -64,17 +64,19 @@ static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
- "osc24M", 0x010,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video0_clk, "pll-video0",
+ "osc24M", 0x010,
+ 192000000, /* Minimum rate */
+ 1008000000, /* Maximum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
"osc24M", 0x018,
@@ -125,17 +127,19 @@ static struct ccu_nk pll_periph1_clk = {
},
};
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1",
- "osc24M", 0x030,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video1_clk, "pll-video1",
+ "osc24M", 0x030,
+ 192000000, /* Minimum rate */
+ 1008000000, /* Maximum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
"osc24M", 0x038,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 061b6fbb4f95..cd415b968e8c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -27,7 +27,9 @@
#define CLK_PLL_AUDIO_2X 4
#define CLK_PLL_AUDIO_4X 5
#define CLK_PLL_AUDIO_8X 6
-#define CLK_PLL_VIDEO0 7
+
+/* PLL_VIDEO0 exported for HDMI PHY */
+
#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index bdbfe78fe133..2193e1495086 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -224,7 +224,7 @@ static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
psi_ahb1_ahb2_parents,
0x510,
0, 5, /* M */
- 16, 2, /* P */
+ 8, 2, /* P */
24, 2, /* mux */
0);
@@ -233,19 +233,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
"pll-periph0" };
static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
0, 5, /* M */
- 16, 2, /* P */
+ 8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
0, 5, /* M */
- 16, 2, /* P */
+ 8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
0, 5, /* M */
- 16, 2, /* P */
+ 8, 2, /* P */
24, 2, /* mux */
0);
@@ -352,7 +352,7 @@ static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "psi-ahb1-ahb2",
static SUNXI_CCU_GATE(bus_psi_clk, "bus-psi", "psi-ahb1-ahb2",
0x79c, BIT(0), 0);
-static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x79c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x7ac, BIT(0), 0);
static SUNXI_CCU_GATE(bus_iommu_clk, "bus-iommu", "apb1", 0x7bc, BIT(0), 0);
@@ -408,26 +408,29 @@ static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb3", 0x82c, BIT(0), 0);
static const char * const mmc_parents[] = { "osc24M", "pll-periph0-2x",
"pll-periph1-2x" };
-static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc_parents, 0x830,
- 0, 4, /* M */
- 8, 2, /* N */
- 24, 3, /* mux */
- BIT(31),/* gate */
- 0);
-
-static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc_parents, 0x834,
- 0, 4, /* M */
- 8, 2, /* N */
- 24, 3, /* mux */
- BIT(31),/* gate */
- 0);
-
-static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc_parents, 0x838,
- 0, 4, /* M */
- 8, 2, /* N */
- 24, 3, /* mux */
- BIT(31),/* gate */
- 0);
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 7d08015b980d..2d6555d73170 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -108,6 +108,7 @@ static struct ccu_nkmp pll_video0_clk = {
.n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
.m = _SUNXI_CCU_DIV(16, 1), /* input divider */
.p = _SUNXI_CCU_DIV(0, 2), /* output divider */
+ .max_rate = 3000000000UL,
.common = {
.reg = 0x010,
.lock_reg = CCU_SUN8I_A83T_LOCK_REG,
@@ -220,6 +221,7 @@ static struct ccu_nkmp pll_video1_clk = {
.n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
.m = _SUNXI_CCU_DIV(16, 1), /* input divider */
.p = _SUNXI_CCU_DIV(0, 2), /* external divider p */
+ .max_rate = 3000000000UL,
.common = {
.reg = 0x04c,
.lock_reg = CCU_SUN8I_A83T_LOCK_REG,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 77ed0b0ba681..eb5c608428fa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -69,18 +69,19 @@ static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video_clk, "pll-video",
- "osc24M", 0x0010,
- 192000000, /* Minimum rate */
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video_clk, "pll-video",
+ "osc24M", 0x0010,
+ 192000000, /* Minimum rate */
+ 912000000, /* Maximum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
"osc24M", 0x0018,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 0f388f6944d5..582ebd41d20d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -65,19 +65,19 @@ static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
-/* TODO: The result of N/M is required to be in [8, 25] range. */
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video0_clk, "pll-video0",
- "osc24M", 0x0010,
- 192000000, /* Minimum rate */
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video0_clk, "pll-video0",
+ "osc24M", 0x0010,
+ 192000000, /* Minimum rate */
+ 1008000000, /* Maximum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
/* TODO: The result of N/M is required to be in [8, 25] range. */
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
@@ -152,19 +152,19 @@ static struct ccu_nk pll_periph1_clk = {
},
};
-/* TODO: The result of N/M is required to be in [8, 25] range. */
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video1_clk, "pll-video1",
- "osc24M", 0x030,
- 192000000, /* Minimum rate */
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video1_clk, "pll-video1",
+ "osc24M", 0x030,
+ 192000000, /* Minimum rate */
+ 1008000000, /* Maximum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static struct ccu_nkm pll_sata_clk = {
.enable = BIT(31),
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index ebd9436d2c7c..9b49adb20d07 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -137,6 +137,13 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate *= nkmp->fixed_post_div;
+ if (nkmp->max_rate && rate > nkmp->max_rate) {
+ rate = nkmp->max_rate;
+ if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= nkmp->fixed_post_div;
+ return rate;
+ }
+
_nkmp.min_n = nkmp->n.min ?: 1;
_nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
_nkmp.min_k = nkmp->k.min ?: 1;
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.h b/drivers/clk/sunxi-ng/ccu_nkmp.h
index 6940503e7fc4..a9f8c116a745 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.h
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.h
@@ -35,6 +35,7 @@ struct ccu_nkmp {
struct ccu_div_internal p;
unsigned int fixed_post_div;
+ unsigned int max_rate;
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 4e2073307f34..6fe3c14f7b2d 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -124,6 +124,13 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
return rate;
}
+ if (nm->max_rate && rate > nm->max_rate) {
+ rate = nm->max_rate;
+ if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= nm->fixed_post_div;
+ return rate;
+ }
+
if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= nm->fixed_post_div;
diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h
index 1d8b459c50b7..de232f2199a6 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.h
+++ b/drivers/clk/sunxi-ng/ccu_nm.h
@@ -38,6 +38,7 @@ struct ccu_nm {
unsigned int fixed_post_div;
unsigned int min_rate;
+ unsigned int max_rate;
struct ccu_common common;
};
@@ -115,6 +116,35 @@ struct ccu_nm {
}, \
}
+#define SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(_struct, _name, \
+ _parent, _reg, \
+ _min_rate, _max_rate, \
+ _nshift, _nwidth, \
+ _mshift, _mwidth, \
+ _frac_en, _frac_sel, \
+ _frac_rate_0, \
+ _frac_rate_1, \
+ _gate, _lock, _flags) \
+ struct ccu_nm _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .frac = _SUNXI_CCU_FRAC(_frac_en, _frac_sel, \
+ _frac_rate_0, \
+ _frac_rate_1), \
+ .min_rate = _min_rate, \
+ .max_rate = _max_rate, \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FRACTIONAL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nm_ops, \
+ _flags), \
+ }, \
+ }
+
#define SUNXI_CCU_NM_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \
_nshift, _nwidth, \
_mshift, _mwidth, \
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index a27c264cc9b4..fc0278a1acc7 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -140,8 +140,8 @@ static void __init sun9i_a80_mod0_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("Could not get registers for mod0-clk: %s\n",
- node->name);
+ pr_err("Could not get registers for mod0-clk: %pOFn\n",
+ node);
return;
}
@@ -306,7 +306,7 @@ static void __init sunxi_mmc_setup(struct device_node *node,
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("Couldn't map the %s clock registers\n", node->name);
+ pr_err("Couldn't map the %pOFn clock registers\n", node);
return;
}
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
index e9295c286d5d..7e21b2b10c94 100644
--- a/drivers/clk/sunxi/clk-sun9i-core.c
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -88,8 +88,8 @@ static void __init sun9i_a80_pll4_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("Could not get registers for a80-pll4-clk: %s\n",
- node->name);
+ pr_err("Could not get registers for a80-pll4-clk: %pOFn\n",
+ node);
return;
}
@@ -142,8 +142,8 @@ static void __init sun9i_a80_gt_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("Could not get registers for a80-gt-clk: %s\n",
- node->name);
+ pr_err("Could not get registers for a80-gt-clk: %pOFn\n",
+ node);
return;
}
@@ -197,8 +197,8 @@ static void __init sun9i_a80_ahb_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("Could not get registers for a80-ahb-clk: %s\n",
- node->name);
+ pr_err("Could not get registers for a80-ahb-clk: %pOFn\n",
+ node);
return;
}
@@ -223,8 +223,8 @@ static void __init sun9i_a80_apb0_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("Could not get registers for a80-apb0-clk: %s\n",
- node->name);
+ pr_err("Could not get registers for a80-apb0-clk: %pOFn\n",
+ node);
return;
}
@@ -280,8 +280,8 @@ static void __init sun9i_a80_apb1_setup(struct device_node *node)
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(reg)) {
- pr_err("Could not get registers for a80-apb1-clk: %s\n",
- node->name);
+ pr_err("Could not get registers for a80-apb1-clk: %pOFn\n",
+ node);
return;
}
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 012714d94b42..892c29030b7b 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -568,8 +568,8 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
reg = of_iomap(node, 0);
if (!reg) {
- pr_err("Could not get registers for factors-clk: %s\n",
- node->name);
+ pr_err("Could not get registers for factors-clk: %pOFn\n",
+ node);
return NULL;
}
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 48ee43734e05..ebb0e1b6bf01 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1609,8 +1609,12 @@ int tegra_dfll_register(struct platform_device *pdev,
td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
if (IS_ERR(td->vdd_reg)) {
- dev_err(td->dev, "couldn't get vdd_cpu regulator\n");
- return PTR_ERR(td->vdd_reg);
+ ret = PTR_ERR(td->vdd_reg);
+ if (ret != -EPROBE_DEFER)
+ dev_err(td->dev, "couldn't get vdd_cpu regulator: %d\n",
+ ret);
+
+ return ret;
}
td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 9eb1cb14fce1..88f1943bd2b5 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -27,6 +27,7 @@
#include <dt-bindings/clock/tegra210-car.h>
#include <dt-bindings/reset/tegra210-car.h>
#include <linux/iopoll.h>
+#include <linux/sizes.h>
#include <soc/tegra/pmc.h>
#include "clk.h"
@@ -2603,7 +2604,7 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = {
[TEGRA_POWERGATE_MPE] = {
.handle_lvl2_ovr = tegra210_generic_mbist_war,
.lvl2_offset = LVL2_CLK_GATE_OVRE,
- .lvl2_mask = BIT(2),
+ .lvl2_mask = BIT(29),
},
[TEGRA_POWERGATE_SOR] = {
.handle_lvl2_ovr = tegra210_generic_mbist_war,
@@ -2654,14 +2655,14 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = {
.num_clks = ARRAY_SIZE(nvdec_slcg_clkids),
.clk_init_data = nvdec_slcg_clkids,
.handle_lvl2_ovr = tegra210_generic_mbist_war,
- .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_offset = LVL2_CLK_GATE_OVRE,
.lvl2_mask = BIT(9) | BIT(31),
},
[TEGRA_POWERGATE_NVJPG] = {
.num_clks = ARRAY_SIZE(nvjpg_slcg_clkids),
.clk_init_data = nvjpg_slcg_clkids,
.handle_lvl2_ovr = tegra210_generic_mbist_war,
- .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_offset = LVL2_CLK_GATE_OVRE,
.lvl2_mask = BIT(9) | BIT(31),
},
[TEGRA_POWERGATE_AUD] = {
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index 5ab295d2a3cb..5ca1e39dd88a 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -6,7 +6,8 @@ clk-common = dpll.o composite.o divider.o gate.o \
fixed-factor.o mux.o apll.o \
clkt_dpll.o clkt_iclk.o clkt_dflt.o \
clkctrl.o
-obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o dpll3xxx.o
+obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o dpll3xxx.o \
+ clk-33xx-compat.o
obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-814x.o clk-816x.o
obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o
obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o \
@@ -16,8 +17,10 @@ obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o \
obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o \
dpll3xxx.o dpll44xx.o
obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \
- clk-dra7-atl.o dpll3xxx.o dpll44xx.o
-obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o
+ clk-dra7-atl.o dpll3xxx.o \
+ dpll44xx.o clk-7xx-compat.o
+obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o \
+ clk-43xx-compat.o
endif # CONFIG_ARCH_OMAP2PLUS
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 61c126a5d26a..222f68bc3f2a 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -143,8 +143,8 @@ static void __init omap_clk_register_apll(void *user,
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
- pr_debug("clk-ref for %s not ready, retry\n",
- node->name);
+ pr_debug("clk-ref for %pOFn not ready, retry\n",
+ node);
if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
return;
@@ -155,8 +155,8 @@ static void __init omap_clk_register_apll(void *user,
clk = of_clk_get(node, 1);
if (IS_ERR(clk)) {
- pr_debug("clk-bypass for %s not ready, retry\n",
- node->name);
+ pr_debug("clk-bypass for %pOFn not ready, retry\n",
+ node);
if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
return;
@@ -202,7 +202,7 @@ static void __init of_dra7_apll_setup(struct device_node *node)
init->num_parents = of_clk_get_parent_count(node);
if (init->num_parents < 1) {
- pr_err("dra7 apll %s must have parent(s)\n", node->name);
+ pr_err("dra7 apll %pOFn must have parent(s)\n", node);
goto cleanup;
}
@@ -366,7 +366,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
init->num_parents = of_clk_get_parent_count(node);
if (init->num_parents != 1) {
- pr_err("%s must have one parent\n", node->name);
+ pr_err("%pOFn must have one parent\n", node);
goto cleanup;
}
@@ -374,13 +374,13 @@ static void __init of_omap2_apll_setup(struct device_node *node)
init->parent_names = &parent_name;
if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
- pr_err("%s missing clock-frequency\n", node->name);
+ pr_err("%pOFn missing clock-frequency\n", node);
goto cleanup;
}
clk_hw->fixed_rate = val;
if (of_property_read_u32(node, "ti,bit-shift", &val)) {
- pr_err("%s missing bit-shift\n", node->name);
+ pr_err("%pOFn missing bit-shift\n", node);
goto cleanup;
}
@@ -389,7 +389,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
ad->autoidle_mask = 0x3 << val;
if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
- pr_err("%s missing idlest-shift\n", node->name);
+ pr_err("%pOFn missing idlest-shift\n", node);
goto cleanup;
}
diff --git a/drivers/clk/ti/clk-33xx-compat.c b/drivers/clk/ti/clk-33xx-compat.c
new file mode 100644
index 000000000000..3e07f127912a
--- /dev/null
+++ b/drivers/clk/ti/clk-33xx-compat.c
@@ -0,0 +1,218 @@
+/*
+ * AM33XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+#include <dt-bindings/clock/am3.h>
+
+#include "clock.h"
+
+static const char * const am3_gpio1_dbclk_parents[] __initconst = {
+ "l4_per_cm:clk:0138:0",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am3_gpio2_bit_data[] __initconst = {
+ { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am3_gpio3_bit_data[] __initconst = {
+ { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = {
+ { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = {
+ { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
+ { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" },
+ { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" },
+ { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" },
+ { AM3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM3_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
+ { AM3_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" },
+ { AM3_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM3_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" },
+ { AM3_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
+ { AM3_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
+ { AM3_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
+ { AM3_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
+ { AM3_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
+ { AM3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" },
+ { AM3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM3_GPIO2_CLKCTRL, am3_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_GPIO3_CLKCTRL, am3_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_GPIO4_CLKCTRL, am3_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM3_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
+ { AM3_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
+ { AM3_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM3_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" },
+ { AM3_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
+ { AM3_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
+ { AM3_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM3_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" },
+ { AM3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM3_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l4hs_clkdm" },
+ { AM3_OCPWP_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_CLKDIV32K_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ck", "clk_24mhz_clkdm" },
+ { 0 },
+};
+
+static const char * const am3_gpio0_dbclk_parents[] __initconst = {
+ "gpio0_dbclk_mux_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am3_gpio1_bit_data[] __initconst = {
+ { 18, TI_CLK_GATE, am3_gpio0_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const am3_dbg_sysclk_ck_parents[] __initconst = {
+ "sys_clkin_ck",
+ NULL,
+};
+
+static const char * const am3_trace_pmd_clk_mux_ck_parents[] __initconst = {
+ "l4_wkup_cm:clk:0010:19",
+ "l4_wkup_cm:clk:0010:30",
+ NULL,
+};
+
+static const char * const am3_trace_clk_div_ck_parents[] __initconst = {
+ "l4_wkup_cm:clk:0010:20",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data am3_trace_clk_div_ck_data __initconst = {
+ .max_div = 64,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const char * const am3_stm_clk_div_ck_parents[] __initconst = {
+ "l4_wkup_cm:clk:0010:22",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data am3_stm_clk_div_ck_data __initconst = {
+ .max_div = 64,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const char * const am3_dbg_clka_ck_parents[] __initconst = {
+ "dpll_core_m4_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am3_debugss_bit_data[] __initconst = {
+ { 19, TI_CLK_GATE, am3_dbg_sysclk_ck_parents, NULL },
+ { 20, TI_CLK_MUX, am3_trace_pmd_clk_mux_ck_parents, NULL },
+ { 22, TI_CLK_MUX, am3_trace_pmd_clk_mux_ck_parents, NULL },
+ { 24, TI_CLK_DIVIDER, am3_trace_clk_div_ck_parents, &am3_trace_clk_div_ck_data },
+ { 27, TI_CLK_DIVIDER, am3_stm_clk_div_ck_parents, &am3_stm_clk_div_ck_data },
+ { 30, TI_CLK_GATE, am3_dbg_clka_ck_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4_wkup_clkctrl_regs[] __initconst = {
+ { AM3_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
+ { AM3_GPIO1_CLKCTRL, am3_gpio1_bit_data, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
+ { AM3_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
+ { AM3_DEBUGSS_CLKCTRL, am3_debugss_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0010:24", "l3_aon_clkdm" },
+ { AM3_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "dpll_core_m4_div2_ck", "l4_wkup_aon_clkdm" },
+ { AM3_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM3_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM3_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" },
+ { AM3_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" },
+ { AM3_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" },
+ { AM3_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" },
+ { AM3_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = {
+ { AM3_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = {
+ { AM3_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_gfx_l3_clkctrl_regs[] __initconst = {
+ { AM3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4_cefuse_clkctrl_regs[] __initconst = {
+ { AM3_CEFUSE_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" },
+ { 0 },
+};
+
+const struct omap_clkctrl_data am3_clkctrl_compat_data[] __initconst = {
+ { 0x44e00014, am3_l4_per_clkctrl_regs },
+ { 0x44e00404, am3_l4_wkup_clkctrl_regs },
+ { 0x44e00604, am3_mpu_clkctrl_regs },
+ { 0x44e00800, am3_l4_rtc_clkctrl_regs },
+ { 0x44e00904, am3_gfx_l3_clkctrl_regs },
+ { 0x44e00a20, am3_l4_cefuse_clkctrl_regs },
+ { 0 },
+};
+
+struct ti_dt_clk am33xx_compat_clks[] = {
+ DT_CLK(NULL, "timer_32k_ck", "l4_per_cm:0138:0"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "clkdiv32k_ick", "l4_per_cm:0138:0"),
+ DT_CLK(NULL, "dbg_clka_ck", "l4_wkup_cm:0010:30"),
+ DT_CLK(NULL, "dbg_sysclk_ck", "l4_wkup_cm:0010:19"),
+ DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0004:18"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0098:18"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:009c:18"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:00a0:18"),
+ DT_CLK(NULL, "stm_clk_div_ck", "l4_wkup_cm:0010:27"),
+ DT_CLK(NULL, "stm_pmd_clock_mux_ck", "l4_wkup_cm:0010:22"),
+ DT_CLK(NULL, "trace_clk_div_ck", "l4_wkup_cm:0010:24"),
+ DT_CLK(NULL, "trace_pmd_clk_mux_ck", "l4_wkup_cm:0010:20"),
+ { .node_name = NULL },
+};
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 12e0a2d19911..a360d3109555 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -24,7 +24,7 @@
#include "clock.h"
static const char * const am3_gpio1_dbclk_parents[] __initconst = {
- "l4_per_cm:clk:0138:0",
+ "clk-24mhz-clkctrl:0000:0",
NULL,
};
@@ -43,58 +43,86 @@ static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = {
{ 0 },
};
-static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = {
- { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
- { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" },
- { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" },
- { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" },
- { AM3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM3_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
- { AM3_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" },
- { AM3_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM3_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
- { AM3_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM3_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM3_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM3_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM3_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" },
- { AM3_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM3_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM3_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM3_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM3_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
- { AM3_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
- { AM3_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
- { AM3_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
- { AM3_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
- { AM3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" },
- { AM3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM3_GPIO2_CLKCTRL, am3_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_GPIO3_CLKCTRL, am3_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_GPIO4_CLKCTRL, am3_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM3_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
- { AM3_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
- { AM3_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM3_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" },
- { AM3_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
- { AM3_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
- { AM3_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
- { AM3_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" },
- { AM3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM3_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l4hs_clkdm" },
- { AM3_OCPWP_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM3_CLKDIV32K_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ck", "clk_24mhz_clkdm" },
+static const struct omap_clkctrl_reg_data am3_l4ls_clkctrl_regs[] __initconst = {
+ { AM3_L4LS_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM3_L4LS_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
+ { AM3_L4LS_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
+ { AM3_L4LS_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
+ { AM3_L4LS_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
+ { AM3_L4LS_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
+ { AM3_L4LS_GPIO2_CLKCTRL, am3_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_GPIO3_CLKCTRL, am3_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_GPIO4_CLKCTRL, am3_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
+ { AM3_L4LS_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
+ { AM3_L4LS_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
+ { AM3_L4LS_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
+ { AM3_L4LS_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM3_L4LS_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_OCPWP_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l3s_clkctrl_regs[] __initconst = {
+ { AM3_L3S_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck" },
+ { AM3_L3S_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" },
+ { AM3_L3S_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck" },
+ { AM3_L3S_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck" },
+ { AM3_L3S_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l3_clkctrl_regs[] __initconst = {
+ { AM3_L3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck" },
+ { AM3_L3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck" },
+ { AM3_L3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4hs_clkctrl_regs[] __initconst = {
+ { AM3_L4HS_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_pruss_ocp_clkctrl_regs[] __initconst = {
+ { AM3_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_cpsw_125mhz_clkctrl_regs[] __initconst = {
+ { AM3_CPSW_125MHZ_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_lcdc_clkctrl_regs[] __initconst = {
+ { AM3_LCDC_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_clk_24mhz_clkctrl_regs[] __initconst = {
+ { AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ck" },
{ 0 },
};
@@ -108,19 +136,33 @@ static const struct omap_clkctrl_bit_data am3_gpio1_bit_data[] __initconst = {
{ 0 },
};
+static const struct omap_clkctrl_reg_data am3_l4_wkup_clkctrl_regs[] __initconst = {
+ { AM3_L4_WKUP_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
+ { AM3_L4_WKUP_GPIO1_CLKCTRL, am3_gpio1_bit_data, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
+ { AM3_L4_WKUP_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
+ { AM3_L4_WKUP_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM3_L4_WKUP_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM3_L4_WKUP_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" },
+ { AM3_L4_WKUP_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" },
+ { AM3_L4_WKUP_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" },
+ { AM3_L4_WKUP_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" },
+ { AM3_L4_WKUP_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" },
+ { 0 },
+};
+
static const char * const am3_dbg_sysclk_ck_parents[] __initconst = {
"sys_clkin_ck",
NULL,
};
static const char * const am3_trace_pmd_clk_mux_ck_parents[] __initconst = {
- "l4_wkup_cm:clk:0010:19",
- "l4_wkup_cm:clk:0010:30",
+ "l3-aon-clkctrl:0000:19",
+ "l3-aon-clkctrl:0000:30",
NULL,
};
static const char * const am3_trace_clk_div_ck_parents[] __initconst = {
- "l4_wkup_cm:clk:0010:20",
+ "l3-aon-clkctrl:0000:20",
NULL,
};
@@ -130,7 +172,7 @@ static const struct omap_clkctrl_div_data am3_trace_clk_div_ck_data __initconst
};
static const char * const am3_stm_clk_div_ck_parents[] __initconst = {
- "l4_wkup_cm:clk:0010:22",
+ "l3-aon-clkctrl:0000:22",
NULL,
};
@@ -154,66 +196,69 @@ static const struct omap_clkctrl_bit_data am3_debugss_bit_data[] __initconst = {
{ 0 },
};
-static const struct omap_clkctrl_reg_data am3_l4_wkup_clkctrl_regs[] __initconst = {
- { AM3_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
- { AM3_GPIO1_CLKCTRL, am3_gpio1_bit_data, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
- { AM3_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
- { AM3_DEBUGSS_CLKCTRL, am3_debugss_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0010:24", "l3_aon_clkdm" },
- { AM3_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "dpll_core_m4_div2_ck", "l4_wkup_aon_clkdm" },
- { AM3_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
- { AM3_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
- { AM3_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" },
- { AM3_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" },
- { AM3_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" },
- { AM3_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" },
- { AM3_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" },
+static const struct omap_clkctrl_reg_data am3_l3_aon_clkctrl_regs[] __initconst = {
+ { AM3_L3_AON_DEBUGSS_CLKCTRL, am3_debugss_bit_data, CLKF_SW_SUP, "l3-aon-clkctrl:0000:24" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4_wkup_aon_clkctrl_regs[] __initconst = {
+ { AM3_L4_WKUP_AON_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "dpll_core_m4_div2_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = {
- { AM3_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
+ { AM3_MPU_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = {
- { AM3_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+ { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data am3_gfx_l3_clkctrl_regs[] __initconst = {
- { AM3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
+ { AM3_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data am3_l4_cefuse_clkctrl_regs[] __initconst = {
- { AM3_CEFUSE_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" },
+ { AM3_L4_CEFUSE_CEFUSE_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" },
{ 0 },
};
const struct omap_clkctrl_data am3_clkctrl_data[] __initconst = {
- { 0x44e00014, am3_l4_per_clkctrl_regs },
- { 0x44e00404, am3_l4_wkup_clkctrl_regs },
- { 0x44e00604, am3_mpu_clkctrl_regs },
+ { 0x44e00038, am3_l4ls_clkctrl_regs },
+ { 0x44e0001c, am3_l3s_clkctrl_regs },
+ { 0x44e00024, am3_l3_clkctrl_regs },
+ { 0x44e00120, am3_l4hs_clkctrl_regs },
+ { 0x44e000e8, am3_pruss_ocp_clkctrl_regs },
+ { 0x44e00000, am3_cpsw_125mhz_clkctrl_regs },
+ { 0x44e00018, am3_lcdc_clkctrl_regs },
+ { 0x44e0014c, am3_clk_24mhz_clkctrl_regs },
+ { 0x44e00400, am3_l4_wkup_clkctrl_regs },
+ { 0x44e00414, am3_l3_aon_clkctrl_regs },
+ { 0x44e004b0, am3_l4_wkup_aon_clkctrl_regs },
+ { 0x44e00600, am3_mpu_clkctrl_regs },
{ 0x44e00800, am3_l4_rtc_clkctrl_regs },
- { 0x44e00904, am3_gfx_l3_clkctrl_regs },
- { 0x44e00a20, am3_l4_cefuse_clkctrl_regs },
+ { 0x44e00900, am3_gfx_l3_clkctrl_regs },
+ { 0x44e00a00, am3_l4_cefuse_clkctrl_regs },
{ 0 },
};
static struct ti_dt_clk am33xx_clks[] = {
- DT_CLK(NULL, "timer_32k_ck", "l4_per_cm:0138:0"),
+ DT_CLK(NULL, "timer_32k_ck", "clk-24mhz-clkctrl:0000:0"),
DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
- DT_CLK(NULL, "clkdiv32k_ick", "l4_per_cm:0138:0"),
- DT_CLK(NULL, "dbg_clka_ck", "l4_wkup_cm:0010:30"),
- DT_CLK(NULL, "dbg_sysclk_ck", "l4_wkup_cm:0010:19"),
- DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0004:18"),
- DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0098:18"),
- DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:009c:18"),
- DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:00a0:18"),
- DT_CLK(NULL, "stm_clk_div_ck", "l4_wkup_cm:0010:27"),
- DT_CLK(NULL, "stm_pmd_clock_mux_ck", "l4_wkup_cm:0010:22"),
- DT_CLK(NULL, "trace_clk_div_ck", "l4_wkup_cm:0010:24"),
- DT_CLK(NULL, "trace_pmd_clk_mux_ck", "l4_wkup_cm:0010:20"),
+ DT_CLK(NULL, "clkdiv32k_ick", "clk-24mhz-clkctrl:0000:0"),
+ DT_CLK(NULL, "dbg_clka_ck", "l3-aon-clkctrl:0000:30"),
+ DT_CLK(NULL, "dbg_sysclk_ck", "l3-aon-clkctrl:0000:19"),
+ DT_CLK(NULL, "gpio0_dbclk", "l4-wkup-clkctrl:0008:18"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4ls-clkctrl:0074:18"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4ls-clkctrl:0078:18"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4ls-clkctrl:007c:18"),
+ DT_CLK(NULL, "stm_clk_div_ck", "l3-aon-clkctrl:0000:27"),
+ DT_CLK(NULL, "stm_pmd_clock_mux_ck", "l3-aon-clkctrl:0000:22"),
+ DT_CLK(NULL, "trace_clk_div_ck", "l3-aon-clkctrl:0000:24"),
+ DT_CLK(NULL, "trace_pmd_clk_mux_ck", "l3-aon-clkctrl:0000:20"),
{ .node_name = NULL },
};
@@ -232,7 +277,10 @@ int __init am33xx_dt_clk_init(void)
{
struct clk *clk1, *clk2;
- ti_dt_clocks_register(am33xx_clks);
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
+ ti_dt_clocks_register(am33xx_compat_clks);
+ else
+ ti_dt_clocks_register(am33xx_clks);
omap2_clk_disable_autoidle_all();
diff --git a/drivers/clk/ti/clk-43xx-compat.c b/drivers/clk/ti/clk-43xx-compat.c
new file mode 100644
index 000000000000..513039843392
--- /dev/null
+++ b/drivers/clk/ti/clk-43xx-compat.c
@@ -0,0 +1,225 @@
+/*
+ * AM43XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+#include <dt-bindings/clock/am4.h>
+
+#include "clock.h"
+
+static const char * const am4_synctimer_32kclk_parents[] __initconst = {
+ "mux_synctimer32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am4_counter_32k_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_synctimer_32kclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const am4_gpio0_dbclk_parents[] __initconst = {
+ "gpio0_dbclk_mux_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio0_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l4_wkup_clkctrl_regs[] __initconst = {
+ { AM4_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck", "l3s_tsc_clkdm" },
+ { AM4_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" },
+ { AM4_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "sys_clkin_ck" },
+ { AM4_COUNTER_32K_CLKCTRL, am4_counter_32k_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0210:8" },
+ { AM4_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck", "l4_wkup_clkdm" },
+ { AM4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck", "l4_wkup_clkdm" },
+ { AM4_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" },
+ { AM4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" },
+ { AM4_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck", "l4_wkup_clkdm" },
+ { AM4_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck", "l4_wkup_clkdm" },
+ { AM4_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" },
+ { AM4_GPIO1_CLKCTRL, am4_gpio1_bit_data, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_mpu_clkctrl_regs[] __initconst = {
+ { AM4_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst = {
+ { AM4_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = {
+ { AM4_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+ { 0 },
+};
+
+static const char * const am4_usb_otg_ss0_refclk960m_parents[] __initconst = {
+ "dpll_per_clkdcoldo",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am4_usb_otg_ss0_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_usb_otg_ss0_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_usb_otg_ss1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_usb_otg_ss0_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const char * const am4_gpio1_dbclk_parents[] __initconst = {
+ "clkdiv32k_ick",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio3_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio4_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio5_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio6_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst = {
+ { AM4_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM4_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" },
+ { AM4_DES_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM4_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM4_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM4_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM4_VPFE0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" },
+ { AM4_VPFE1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" },
+ { AM4_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM4_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM4_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM4_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
+ { AM4_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l3_clkdm" },
+ { AM4_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
+ { AM4_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" },
+ { AM4_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" },
+ { AM4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" },
+ { AM4_QSPI_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
+ { AM4_USB_OTG_SS0_CLKCTRL, am4_usb_otg_ss0_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
+ { AM4_USB_OTG_SS1_CLKCTRL, am4_usb_otg_ss1_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
+ { AM4_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" },
+ { AM4_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
+ { AM4_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
+ { AM4_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_EPWMSS3_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_EPWMSS4_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_EPWMSS5_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_GPIO2_CLKCTRL, am4_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_GPIO3_CLKCTRL, am4_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_GPIO4_CLKCTRL, am4_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_GPIO5_CLKCTRL, am4_gpio5_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_GPIO6_CLKCTRL, am4_gpio6_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_clk" },
+ { AM4_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM4_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM4_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
+ { AM4_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_SPI2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_SPI3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_SPI4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
+ { AM4_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
+ { AM4_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
+ { AM4_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
+ { AM4_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
+ { AM4_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
+ { AM4_TIMER8_CLKCTRL, NULL, CLKF_SW_SUP, "timer8_fck" },
+ { AM4_TIMER9_CLKCTRL, NULL, CLKF_SW_SUP, "timer9_fck" },
+ { AM4_TIMER10_CLKCTRL, NULL, CLKF_SW_SUP, "timer10_fck" },
+ { AM4_TIMER11_CLKCTRL, NULL, CLKF_SW_SUP, "timer11_fck" },
+ { AM4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" },
+ { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" },
+ { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
+ { 0 },
+};
+
+const struct omap_clkctrl_data am4_clkctrl_compat_data[] __initconst = {
+ { 0x44df2820, am4_l4_wkup_clkctrl_regs },
+ { 0x44df8320, am4_mpu_clkctrl_regs },
+ { 0x44df8420, am4_gfx_l3_clkctrl_regs },
+ { 0x44df8520, am4_l4_rtc_clkctrl_regs },
+ { 0x44df8820, am4_l4_per_clkctrl_regs },
+ { 0 },
+};
+
+const struct omap_clkctrl_data am438x_clkctrl_compat_data[] __initconst = {
+ { 0x44df2820, am4_l4_wkup_clkctrl_regs },
+ { 0x44df8320, am4_mpu_clkctrl_regs },
+ { 0x44df8420, am4_gfx_l3_clkctrl_regs },
+ { 0x44df8820, am4_l4_per_clkctrl_regs },
+ { 0 },
+};
+
+struct ti_dt_clk am43xx_compat_clks[] = {
+ DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0348:8"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0458:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0460:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0468:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0470:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0478:8"),
+ DT_CLK(NULL, "synctimer_32kclk", "l4_wkup_cm:0210:8"),
+ DT_CLK(NULL, "usb_otg_ss0_refclk960m", "l4_per_cm:0240:8"),
+ DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l4_per_cm:0248:8"),
+ { .node_name = NULL },
+};
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 63c5ddb50187..2782d91838ac 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -23,6 +23,11 @@
#include "clock.h"
+static const struct omap_clkctrl_reg_data am4_l3s_tsc_clkctrl_regs[] __initconst = {
+ { AM4_L3S_TSC_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" },
+ { 0 },
+};
+
static const char * const am4_synctimer_32kclk_parents[] __initconst = {
"mux_synctimer32k_ck",
NULL,
@@ -33,6 +38,12 @@ static const struct omap_clkctrl_bit_data am4_counter_32k_bit_data[] __initconst
{ 0 },
};
+static const struct omap_clkctrl_reg_data am4_l4_wkup_aon_clkctrl_regs[] __initconst = {
+ { AM4_L4_WKUP_AON_WKUP_M3_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sys_clkin_ck" },
+ { AM4_L4_WKUP_AON_COUNTER_32K_CLKCTRL, am4_counter_32k_bit_data, CLKF_SW_SUP, "l4-wkup-aon-clkctrl:0008:8" },
+ { 0 },
+};
+
static const char * const am4_gpio0_dbclk_parents[] __initconst = {
"gpio0_dbclk_mux_ck",
NULL,
@@ -44,33 +55,45 @@ static const struct omap_clkctrl_bit_data am4_gpio1_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data am4_l4_wkup_clkctrl_regs[] __initconst = {
- { AM4_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck", "l3s_tsc_clkdm" },
- { AM4_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" },
- { AM4_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "sys_clkin_ck" },
- { AM4_COUNTER_32K_CLKCTRL, am4_counter_32k_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0210:8" },
- { AM4_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck", "l4_wkup_clkdm" },
- { AM4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck", "l4_wkup_clkdm" },
- { AM4_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" },
- { AM4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" },
- { AM4_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck", "l4_wkup_clkdm" },
- { AM4_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck", "l4_wkup_clkdm" },
- { AM4_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" },
- { AM4_GPIO1_CLKCTRL, am4_gpio1_bit_data, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" },
+ { AM4_L4_WKUP_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" },
+ { AM4_L4_WKUP_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" },
+ { AM4_L4_WKUP_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" },
+ { AM4_L4_WKUP_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM4_L4_WKUP_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM4_L4_WKUP_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" },
+ { AM4_L4_WKUP_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" },
+ { AM4_L4_WKUP_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" },
+ { AM4_L4_WKUP_GPIO1_CLKCTRL, am4_gpio1_bit_data, CLKF_SW_SUP, "sys_clkin_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data am4_mpu_clkctrl_regs[] __initconst = {
- { AM4_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
+ { AM4_MPU_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst = {
- { AM4_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
+ { AM4_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = {
- { AM4_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+ { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l3_clkctrl_regs[] __initconst = {
+ { AM4_L3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck" },
+ { AM4_L3_DES_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk" },
{ 0 },
};
@@ -89,6 +112,24 @@ static const struct omap_clkctrl_bit_data am4_usb_otg_ss1_bit_data[] __initconst
{ 0 },
};
+static const struct omap_clkctrl_reg_data am4_l3s_clkctrl_regs[] __initconst = {
+ { AM4_L3S_VPFE0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3S_VPFE1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3S_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" },
+ { AM4_L3S_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck" },
+ { AM4_L3S_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck" },
+ { AM4_L3S_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM4_L3S_QSPI_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" },
+ { AM4_L3S_USB_OTG_SS0_CLKCTRL, am4_usb_otg_ss0_bit_data, CLKF_SW_SUP, "l3s_gclk" },
+ { AM4_L3S_USB_OTG_SS1_CLKCTRL, am4_usb_otg_ss1_bit_data, CLKF_SW_SUP, "l3s_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_pruss_ocp_clkctrl_regs[] __initconst = {
+ { AM4_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk" },
+ { 0 },
+};
+
static const char * const am4_gpio1_dbclk_parents[] __initconst = {
"clkdiv32k_ick",
NULL,
@@ -119,108 +160,115 @@ static const struct omap_clkctrl_bit_data am4_gpio6_bit_data[] __initconst = {
{ 0 },
};
-static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst = {
- { AM4_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM4_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" },
- { AM4_DES_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM4_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM4_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM4_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM4_VPFE0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" },
- { AM4_VPFE1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" },
- { AM4_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM4_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM4_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM4_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
- { AM4_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l3_clkdm" },
- { AM4_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
- { AM4_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" },
- { AM4_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" },
- { AM4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" },
- { AM4_QSPI_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
- { AM4_USB_OTG_SS0_CLKCTRL, am4_usb_otg_ss0_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
- { AM4_USB_OTG_SS1_CLKCTRL, am4_usb_otg_ss1_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" },
- { AM4_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" },
- { AM4_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
- { AM4_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
- { AM4_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_EPWMSS3_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_EPWMSS4_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_EPWMSS5_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_GPIO2_CLKCTRL, am4_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_GPIO3_CLKCTRL, am4_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_GPIO4_CLKCTRL, am4_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_GPIO5_CLKCTRL, am4_gpio5_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_GPIO6_CLKCTRL, am4_gpio6_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_clk" },
- { AM4_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
- { AM4_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
- { AM4_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
- { AM4_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_SPI2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_SPI3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_SPI4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
- { AM4_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
- { AM4_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
- { AM4_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
- { AM4_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
- { AM4_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
- { AM4_TIMER8_CLKCTRL, NULL, CLKF_SW_SUP, "timer8_fck" },
- { AM4_TIMER9_CLKCTRL, NULL, CLKF_SW_SUP, "timer9_fck" },
- { AM4_TIMER10_CLKCTRL, NULL, CLKF_SW_SUP, "timer10_fck" },
- { AM4_TIMER11_CLKCTRL, NULL, CLKF_SW_SUP, "timer11_fck" },
- { AM4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
- { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
- { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" },
- { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" },
- { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
+static const struct omap_clkctrl_reg_data am4_l4ls_clkctrl_regs[] __initconst = {
+ { AM4_L4LS_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
+ { AM4_L4LS_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
+ { AM4_L4LS_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS3_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS4_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS5_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO2_CLKCTRL, am4_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO3_CLKCTRL, am4_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO4_CLKCTRL, am4_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO5_CLKCTRL, am4_gpio5_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO6_CLKCTRL, am4_gpio6_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_clk" },
+ { AM4_L4LS_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM4_L4LS_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM4_L4LS_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
+ { AM4_L4LS_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPI2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPI3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPI4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
+ { AM4_L4LS_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
+ { AM4_L4LS_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
+ { AM4_L4LS_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
+ { AM4_L4LS_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
+ { AM4_L4LS_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
+ { AM4_L4LS_TIMER8_CLKCTRL, NULL, CLKF_SW_SUP, "timer8_fck" },
+ { AM4_L4LS_TIMER9_CLKCTRL, NULL, CLKF_SW_SUP, "timer9_fck" },
+ { AM4_L4LS_TIMER10_CLKCTRL, NULL, CLKF_SW_SUP, "timer10_fck" },
+ { AM4_L4LS_TIMER11_CLKCTRL, NULL, CLKF_SW_SUP, "timer11_fck" },
+ { AM4_L4LS_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_emif_clkctrl_regs[] __initconst = {
+ { AM4_EMIF_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_dss_clkctrl_regs[] __initconst = {
+ { AM4_DSS_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_cpsw_125mhz_clkctrl_regs[] __initconst = {
+ { AM4_CPSW_125MHZ_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" },
{ 0 },
};
const struct omap_clkctrl_data am4_clkctrl_data[] __initconst = {
- { 0x44df2820, am4_l4_wkup_clkctrl_regs },
+ { 0x44df2920, am4_l3s_tsc_clkctrl_regs },
+ { 0x44df2a28, am4_l4_wkup_aon_clkctrl_regs },
+ { 0x44df2a20, am4_l4_wkup_clkctrl_regs },
{ 0x44df8320, am4_mpu_clkctrl_regs },
{ 0x44df8420, am4_gfx_l3_clkctrl_regs },
{ 0x44df8520, am4_l4_rtc_clkctrl_regs },
- { 0x44df8820, am4_l4_per_clkctrl_regs },
+ { 0x44df8820, am4_l3_clkctrl_regs },
+ { 0x44df8868, am4_l3s_clkctrl_regs },
+ { 0x44df8b20, am4_pruss_ocp_clkctrl_regs },
+ { 0x44df8c20, am4_l4ls_clkctrl_regs },
+ { 0x44df8f20, am4_emif_clkctrl_regs },
+ { 0x44df9220, am4_dss_clkctrl_regs },
+ { 0x44df9320, am4_cpsw_125mhz_clkctrl_regs },
{ 0 },
};
const struct omap_clkctrl_data am438x_clkctrl_data[] __initconst = {
- { 0x44df2820, am4_l4_wkup_clkctrl_regs },
+ { 0x44df2920, am4_l3s_tsc_clkctrl_regs },
+ { 0x44df2a28, am4_l4_wkup_aon_clkctrl_regs },
+ { 0x44df2a20, am4_l4_wkup_clkctrl_regs },
{ 0x44df8320, am4_mpu_clkctrl_regs },
{ 0x44df8420, am4_gfx_l3_clkctrl_regs },
- { 0x44df8820, am4_l4_per_clkctrl_regs },
+ { 0x44df8820, am4_l3_clkctrl_regs },
+ { 0x44df8868, am4_l3s_clkctrl_regs },
+ { 0x44df8b20, am4_pruss_ocp_clkctrl_regs },
+ { 0x44df8c20, am4_l4ls_clkctrl_regs },
+ { 0x44df8f20, am4_emif_clkctrl_regs },
+ { 0x44df9220, am4_dss_clkctrl_regs },
+ { 0x44df9320, am4_cpsw_125mhz_clkctrl_regs },
{ 0 },
};
static struct ti_dt_clk am43xx_clks[] = {
DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
- DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0348:8"),
- DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0458:8"),
- DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0460:8"),
- DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0468:8"),
- DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0470:8"),
- DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0478:8"),
- DT_CLK(NULL, "synctimer_32kclk", "l4_wkup_cm:0210:8"),
- DT_CLK(NULL, "usb_otg_ss0_refclk960m", "l4_per_cm:0240:8"),
- DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l4_per_cm:0248:8"),
+ DT_CLK(NULL, "gpio0_dbclk", "l4-wkup-clkctrl:0148:8"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4ls-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4ls-clkctrl:0060:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4ls-clkctrl:0068:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4ls-clkctrl:0070:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4ls-clkctrl:0078:8"),
+ DT_CLK(NULL, "synctimer_32kclk", "l4-wkup-aon-clkctrl:0008:8"),
+ DT_CLK(NULL, "usb_otg_ss0_refclk960m", "l3s-clkctrl:01f8:8"),
+ DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3s-clkctrl:0200:8"),
{ .node_name = NULL },
};
@@ -228,7 +276,10 @@ int __init am43xx_dt_clk_init(void)
{
struct clk *clk1, *clk2;
- ti_dt_clocks_register(am43xx_clks);
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
+ ti_dt_clocks_register(am43xx_compat_clks);
+ else
+ ti_dt_clocks_register(am43xx_clks);
omap2_clk_disable_autoidle_all();
diff --git a/drivers/clk/ti/clk-7xx-compat.c b/drivers/clk/ti/clk-7xx-compat.c
new file mode 100644
index 000000000000..e3cb7f0b03ae
--- /dev/null
+++ b/drivers/clk/ti/clk-7xx-compat.c
@@ -0,0 +1,823 @@
+/*
+ * DRA7 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+#include <dt-bindings/clock/dra7.h>
+
+#include "clock.h"
+
+#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
+#define DRA7_DPLL_USB_DEFFREQ 960000000
+
+static const struct omap_clkctrl_reg_data dra7_mpu_clkctrl_regs[] __initconst = {
+ { DRA7_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const char * const dra7_mcasp1_aux_gfclk_mux_parents[] __initconst = {
+ "per_abe_x1_gfclk2_div",
+ "video1_clk2_div",
+ "video2_clk2_div",
+ "hdmi_clk2_div",
+ NULL,
+};
+
+static const char * const dra7_mcasp1_ahclkx_mux_parents[] __initconst = {
+ "abe_24m_fclk",
+ "abe_sys_clk_div",
+ "func_24m_clk",
+ "atl_clkin3_ck",
+ "atl_clkin2_ck",
+ "atl_clkin1_ck",
+ "atl_clkin0_ck",
+ "sys_clkin2",
+ "ref_clkin0_ck",
+ "ref_clkin1_ck",
+ "ref_clkin2_ck",
+ "ref_clkin3_ck",
+ "mlb_clk",
+ "mlbp_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp1_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_timer5_gfclk_mux_parents[] __initconst = {
+ "timer_sys_clk_div",
+ "sys_32k_ck",
+ "sys_clkin2",
+ "ref_clkin0_ck",
+ "ref_clkin1_ck",
+ "ref_clkin2_ck",
+ "ref_clkin3_ck",
+ "abe_giclk_div",
+ "video1_div_clk",
+ "video2_div_clk",
+ "hdmi_div_clk",
+ "clkoutmux0_clk_mux",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer5_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer6_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer7_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer8_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_uart6_gfclk_mux_parents[] __initconst = {
+ "func_48m_fclk",
+ "dpll_per_m2x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart6_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_ipu_clkctrl_regs[] __initconst = {
+ { DRA7_MCASP1_CLKCTRL, dra7_mcasp1_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0010:22" },
+ { DRA7_TIMER5_CLKCTRL, dra7_timer5_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0018:24" },
+ { DRA7_TIMER6_CLKCTRL, dra7_timer6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0020:24" },
+ { DRA7_TIMER7_CLKCTRL, dra7_timer7_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0028:24" },
+ { DRA7_TIMER8_CLKCTRL, dra7_timer8_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0030:24" },
+ { DRA7_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_UART6_CLKCTRL, dra7_uart6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0040:24" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst = {
+ { DRA7_RTCSS_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = {
+ { DRA7_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
+ { DRA7_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l3main1_clkctrl_regs[] __initconst = {
+ { DRA7_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_TPCC_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_TPTC0_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_TPTC1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_VCP1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_VCP2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_dma_clkctrl_regs[] __initconst = {
+ { DRA7_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_emif_clkctrl_regs[] __initconst = {
+ { DRA7_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const char * const dra7_atl_dpll_clk_mux_parents[] __initconst = {
+ "sys_32k_ck",
+ "video1_clkin_ck",
+ "video2_clkin_ck",
+ "hdmi_clkin_ck",
+ NULL,
+};
+
+static const char * const dra7_atl_gfclk_mux_parents[] __initconst = {
+ "l3_iclk_div",
+ "dpll_abe_m2_ck",
+ "atl_cm:clk:0000:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_atl_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_atl_dpll_clk_mux_parents, NULL },
+ { 26, TI_CLK_MUX, dra7_atl_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_atl_clkctrl_regs[] __initconst = {
+ { DRA7_ATL_CLKCTRL, dra7_atl_bit_data, CLKF_SW_SUP, "atl_cm:clk:0000:26" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4cfg_clkctrl_regs[] __initconst = {
+ { DRA7_L4_CFG_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_SPINLOCK_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX3_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX4_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX5_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX6_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX7_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX8_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX9_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX10_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX11_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX12_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_MAILBOX13_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l3instr_clkctrl_regs[] __initconst = {
+ { DRA7_L3_MAIN_2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { 0 },
+};
+
+static const char * const dra7_dss_dss_clk_parents[] __initconst = {
+ "dpll_per_h12x2_ck",
+ NULL,
+};
+
+static const char * const dra7_dss_48mhz_clk_parents[] __initconst = {
+ "func_48m_fclk",
+ NULL,
+};
+
+static const char * const dra7_dss_hdmi_clk_parents[] __initconst = {
+ "hdmi_dpll_clk_mux",
+ NULL,
+};
+
+static const char * const dra7_dss_32khz_clk_parents[] __initconst = {
+ "sys_32k_ck",
+ NULL,
+};
+
+static const char * const dra7_dss_video1_clk_parents[] __initconst = {
+ "video1_dpll_clk_mux",
+ NULL,
+};
+
+static const char * const dra7_dss_video2_clk_parents[] __initconst = {
+ "video2_dpll_clk_mux",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_dss_core_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_dss_clk_parents, NULL },
+ { 9, TI_CLK_GATE, dra7_dss_48mhz_clk_parents, NULL },
+ { 10, TI_CLK_GATE, dra7_dss_hdmi_clk_parents, NULL },
+ { 11, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 12, TI_CLK_GATE, dra7_dss_video1_clk_parents, NULL },
+ { 13, TI_CLK_GATE, dra7_dss_video2_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst = {
+ { DRA7_DSS_CORE_CLKCTRL, dra7_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" },
+ { DRA7_BB2D_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_h24x2_ck" },
+ { 0 },
+};
+
+static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = {
+ "func_128m_clk",
+ "dpll_per_m2x2_ck",
+ NULL,
+};
+
+static const char * const dra7_mmc1_fclk_div_parents[] __initconst = {
+ "l3init_cm:clk:0008:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_mmc1_fclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmc1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_mmc1_fclk_div_parents, &dra7_mmc1_fclk_div_data },
+ { 0 },
+};
+
+static const char * const dra7_mmc2_fclk_div_parents[] __initconst = {
+ "l3init_cm:clk:0010:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_mmc2_fclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmc2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_mmc2_fclk_div_parents, &dra7_mmc2_fclk_div_data },
+ { 0 },
+};
+
+static const char * const dra7_usb_otg_ss2_refclk960m_parents[] __initconst = {
+ "l3init_960m_gfclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_usb_otg_ss2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_sata_ref_clk_parents[] __initconst = {
+ "sys_clkin1",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_sata_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_sata_ref_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_optfclk_pciephy1_clk_parents[] __initconst = {
+ "apll_pcie_ck",
+ NULL,
+};
+
+static const char * const dra7_optfclk_pciephy1_div_clk_parents[] __initconst = {
+ "optfclk_pciephy_div",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_pcie1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL },
+ { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_pcie2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL },
+ { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_rmii_50mhz_clk_mux_parents[] __initconst = {
+ "dpll_gmac_h11x2_ck",
+ "rmii_clk_ck",
+ NULL,
+};
+
+static const char * const dra7_gmac_rft_clk_mux_parents[] __initconst = {
+ "video1_clkin_ck",
+ "video2_clkin_ck",
+ "dpll_abe_m2_ck",
+ "hdmi_clkin_ck",
+ "l3_iclk_div",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_rmii_50mhz_clk_mux_parents, NULL },
+ { 25, TI_CLK_MUX, dra7_gmac_rft_clk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_usb_otg_ss1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst = {
+ { DRA7_MMC1_CLKCTRL, dra7_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" },
+ { DRA7_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" },
+ { DRA7_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
+ { DRA7_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
+ { DRA7_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck", "gmac_clkdm" },
+ { DRA7_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { DRA7_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { DRA7_USB_OTG_SS1_CLKCTRL, dra7_usb_otg_ss1_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { 0 },
+};
+
+static const char * const dra7_timer10_gfclk_mux_parents[] __initconst = {
+ "timer_sys_clk_div",
+ "sys_32k_ck",
+ "sys_clkin2",
+ "ref_clkin0_ck",
+ "ref_clkin1_ck",
+ "ref_clkin2_ck",
+ "ref_clkin3_ck",
+ "abe_giclk_div",
+ "video1_div_clk",
+ "video2_div_clk",
+ "hdmi_div_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer10_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer11_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer9_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio3_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio4_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio5_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio6_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer13_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer14_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer15_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio7_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio8_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_mmc3_gfclk_div_parents[] __initconst = {
+ "l4per_cm:clk:0120:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_mmc3_gfclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmc3_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_mmc3_gfclk_div_parents, &dra7_mmc3_gfclk_div_data },
+ { 0 },
+};
+
+static const char * const dra7_mmc4_gfclk_div_parents[] __initconst = {
+ "l4per_cm:clk:0128:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_mmc4_gfclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmc4_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_mmc4_gfclk_div_parents, &dra7_mmc4_gfclk_div_data },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer16_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_qspi_gfclk_mux_parents[] __initconst = {
+ "func_128m_clk",
+ "dpll_per_h13x2_ck",
+ NULL,
+};
+
+static const char * const dra7_qspi_gfclk_div_parents[] __initconst = {
+ "l4per_cm:clk:0138:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_qspi_gfclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_qspi_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_qspi_gfclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_qspi_gfclk_div_parents, &dra7_qspi_gfclk_div_data },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp2_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp3_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart5_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp5_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp8_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp4_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart7_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart8_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart9_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp6_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp7_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst = {
+ { DRA7_L4_PER2_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per2_clkdm" },
+ { DRA7_L4_PER3_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per3_clkdm" },
+ { DRA7_TIMER10_CLKCTRL, dra7_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" },
+ { DRA7_TIMER11_CLKCTRL, dra7_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" },
+ { DRA7_TIMER2_CLKCTRL, dra7_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0038:24" },
+ { DRA7_TIMER3_CLKCTRL, dra7_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0040:24" },
+ { DRA7_TIMER4_CLKCTRL, dra7_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0048:24" },
+ { DRA7_TIMER9_CLKCTRL, dra7_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0050:24" },
+ { DRA7_ELM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_GPIO2_CLKCTRL, dra7_gpio2_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_GPIO3_CLKCTRL, dra7_gpio3_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_GPIO4_CLKCTRL, dra7_gpio4_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_GPIO5_CLKCTRL, dra7_gpio5_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_GPIO6_CLKCTRL, dra7_gpio6_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" },
+ { DRA7_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" },
+ { DRA7_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" },
+ { DRA7_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_L4_PER1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" },
+ { DRA7_TIMER13_CLKCTRL, dra7_timer13_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00c8:24", "l4per3_clkdm" },
+ { DRA7_TIMER14_CLKCTRL, dra7_timer14_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d0:24", "l4per3_clkdm" },
+ { DRA7_TIMER15_CLKCTRL, dra7_timer15_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d8:24", "l4per3_clkdm" },
+ { DRA7_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_GPIO7_CLKCTRL, dra7_gpio7_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_GPIO8_CLKCTRL, dra7_gpio8_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_MMC3_CLKCTRL, dra7_mmc3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0120:25" },
+ { DRA7_MMC4_CLKCTRL, dra7_mmc4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0128:25" },
+ { DRA7_TIMER16_CLKCTRL, dra7_timer16_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0130:24", "l4per3_clkdm" },
+ { DRA7_QSPI_CLKCTRL, dra7_qspi_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0138:25", "l4per2_clkdm" },
+ { DRA7_UART1_CLKCTRL, dra7_uart1_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0140:24" },
+ { DRA7_UART2_CLKCTRL, dra7_uart2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0148:24" },
+ { DRA7_UART3_CLKCTRL, dra7_uart3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0150:24" },
+ { DRA7_UART4_CLKCTRL, dra7_uart4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0158:24" },
+ { DRA7_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0160:22", "l4per2_clkdm" },
+ { DRA7_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0168:22", "l4per2_clkdm" },
+ { DRA7_UART5_CLKCTRL, dra7_uart5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0170:24" },
+ { DRA7_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0178:22", "l4per2_clkdm" },
+ { DRA7_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0190:24", "l4per2_clkdm" },
+ { DRA7_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0198:22", "l4per2_clkdm" },
+ { DRA7_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
+ { DRA7_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
+ { DRA7_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
+ { DRA7_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
+ { DRA7_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
+ { DRA7_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01d0:24", "l4per2_clkdm" },
+ { DRA7_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e0:24", "l4per2_clkdm" },
+ { DRA7_UART9_CLKCTRL, dra7_uart9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e8:24", "l4per2_clkdm" },
+ { DRA7_DCAN2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin1", "l4per2_clkdm" },
+ { DRA7_MCASP6_CLKCTRL, dra7_mcasp6_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0204:22", "l4per2_clkdm" },
+ { DRA7_MCASP7_CLKCTRL, dra7_mcasp7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0208:22", "l4per2_clkdm" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart10_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_dcan1_sys_clk_mux_parents[] __initconst = {
+ "sys_clkin1",
+ "sys_clkin2",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_dcan1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_dcan1_sys_clk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initconst = {
+ { DRA7_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
+ { DRA7_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { DRA7_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
+ { DRA7_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" },
+ { DRA7_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" },
+ { DRA7_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
+ { DRA7_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0060:24" },
+ { DRA7_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0068:24" },
+ { DRA7_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk"},
+ { 0 },
+};
+
+const struct omap_clkctrl_data dra7_clkctrl_compat_data[] __initconst = {
+ { 0x4a005320, dra7_mpu_clkctrl_regs },
+ { 0x4a005540, dra7_ipu_clkctrl_regs },
+ { 0x4a005740, dra7_rtc_clkctrl_regs },
+ { 0x4a008620, dra7_coreaon_clkctrl_regs },
+ { 0x4a008720, dra7_l3main1_clkctrl_regs },
+ { 0x4a008a20, dra7_dma_clkctrl_regs },
+ { 0x4a008b20, dra7_emif_clkctrl_regs },
+ { 0x4a008c00, dra7_atl_clkctrl_regs },
+ { 0x4a008d20, dra7_l4cfg_clkctrl_regs },
+ { 0x4a008e20, dra7_l3instr_clkctrl_regs },
+ { 0x4a009120, dra7_dss_clkctrl_regs },
+ { 0x4a009320, dra7_l3init_clkctrl_regs },
+ { 0x4a009700, dra7_l4per_clkctrl_regs },
+ { 0x4ae07820, dra7_wkupaon_clkctrl_regs },
+ { 0 },
+};
+
+struct ti_dt_clk dra7xx_compat_clks[] = {
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "timer_sys_clk_div"),
+ DT_CLK(NULL, "sys_clkin", "sys_clkin1"),
+ DT_CLK(NULL, "atl_dpll_clk_mux", "atl_cm:0000:24"),
+ DT_CLK(NULL, "atl_gfclk_mux", "atl_cm:0000:26"),
+ DT_CLK(NULL, "dcan1_sys_clk_mux", "wkupaon_cm:0068:24"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"),
+ DT_CLK(NULL, "dss_hdmi_clk", "dss_cm:0000:10"),
+ DT_CLK(NULL, "dss_video1_clk", "dss_cm:0000:12"),
+ DT_CLK(NULL, "dss_video2_clk", "dss_cm:0000:13"),
+ DT_CLK(NULL, "gmac_rft_clk_mux", "l3init_cm:00b0:25"),
+ DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0060:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0068:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0070:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0078:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0080:8"),
+ DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:0110:8"),
+ DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:0118:8"),
+ DT_CLK(NULL, "mcasp1_ahclkr_mux", "ipu_cm:0010:28"),
+ DT_CLK(NULL, "mcasp1_ahclkx_mux", "ipu_cm:0010:24"),
+ DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "ipu_cm:0010:22"),
+ DT_CLK(NULL, "mcasp2_ahclkr_mux", "l4per_cm:0160:28"),
+ DT_CLK(NULL, "mcasp2_ahclkx_mux", "l4per_cm:0160:24"),
+ DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "l4per_cm:0160:22"),
+ DT_CLK(NULL, "mcasp3_ahclkx_mux", "l4per_cm:0168:24"),
+ DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "l4per_cm:0168:22"),
+ DT_CLK(NULL, "mcasp4_ahclkx_mux", "l4per_cm:0198:24"),
+ DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "l4per_cm:0198:22"),
+ DT_CLK(NULL, "mcasp5_ahclkx_mux", "l4per_cm:0178:24"),
+ DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "l4per_cm:0178:22"),
+ DT_CLK(NULL, "mcasp6_ahclkx_mux", "l4per_cm:0204:24"),
+ DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per_cm:0204:22"),
+ DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per_cm:0208:24"),
+ DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per_cm:0208:22"),
+ DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per_cm:0190:22"),
+ DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per_cm:0190:24"),
+ DT_CLK(NULL, "mmc1_clk32k", "l3init_cm:0008:8"),
+ DT_CLK(NULL, "mmc1_fclk_div", "l3init_cm:0008:25"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"),
+ DT_CLK(NULL, "mmc2_clk32k", "l3init_cm:0010:8"),
+ DT_CLK(NULL, "mmc2_fclk_div", "l3init_cm:0010:25"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"),
+ DT_CLK(NULL, "mmc3_clk32k", "l4per_cm:0120:8"),
+ DT_CLK(NULL, "mmc3_gfclk_div", "l4per_cm:0120:25"),
+ DT_CLK(NULL, "mmc3_gfclk_mux", "l4per_cm:0120:24"),
+ DT_CLK(NULL, "mmc4_clk32k", "l4per_cm:0128:8"),
+ DT_CLK(NULL, "mmc4_gfclk_div", "l4per_cm:0128:25"),
+ DT_CLK(NULL, "mmc4_gfclk_mux", "l4per_cm:0128:24"),
+ DT_CLK(NULL, "optfclk_pciephy1_32khz", "l3init_cm:0090:8"),
+ DT_CLK(NULL, "optfclk_pciephy1_clk", "l3init_cm:0090:9"),
+ DT_CLK(NULL, "optfclk_pciephy1_div_clk", "l3init_cm:0090:10"),
+ DT_CLK(NULL, "optfclk_pciephy2_32khz", "l3init_cm:0098:8"),
+ DT_CLK(NULL, "optfclk_pciephy2_clk", "l3init_cm:0098:9"),
+ DT_CLK(NULL, "optfclk_pciephy2_div_clk", "l3init_cm:0098:10"),
+ DT_CLK(NULL, "qspi_gfclk_div", "l4per_cm:0138:25"),
+ DT_CLK(NULL, "qspi_gfclk_mux", "l4per_cm:0138:24"),
+ DT_CLK(NULL, "rmii_50mhz_clk_mux", "l3init_cm:00b0:24"),
+ DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0028:24"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0030:24"),
+ DT_CLK(NULL, "timer13_gfclk_mux", "l4per_cm:00c8:24"),
+ DT_CLK(NULL, "timer14_gfclk_mux", "l4per_cm:00d0:24"),
+ DT_CLK(NULL, "timer15_gfclk_mux", "l4per_cm:00d8:24"),
+ DT_CLK(NULL, "timer16_gfclk_mux", "l4per_cm:0130:24"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0038:24"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0040:24"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0048:24"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "ipu_cm:0018:24"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "ipu_cm:0020:24"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "ipu_cm:0028:24"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "ipu_cm:0030:24"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0050:24"),
+ DT_CLK(NULL, "uart10_gfclk_mux", "wkupaon_cm:0060:24"),
+ DT_CLK(NULL, "uart1_gfclk_mux", "l4per_cm:0140:24"),
+ DT_CLK(NULL, "uart2_gfclk_mux", "l4per_cm:0148:24"),
+ DT_CLK(NULL, "uart3_gfclk_mux", "l4per_cm:0150:24"),
+ DT_CLK(NULL, "uart4_gfclk_mux", "l4per_cm:0158:24"),
+ DT_CLK(NULL, "uart5_gfclk_mux", "l4per_cm:0170:24"),
+ DT_CLK(NULL, "uart6_gfclk_mux", "ipu_cm:0040:24"),
+ DT_CLK(NULL, "uart7_gfclk_mux", "l4per_cm:01d0:24"),
+ DT_CLK(NULL, "uart8_gfclk_mux", "l4per_cm:01e0:24"),
+ DT_CLK(NULL, "uart9_gfclk_mux", "l4per_cm:01e8:24"),
+ DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3init_cm:00d0:8"),
+ DT_CLK(NULL, "usb_otg_ss2_refclk960m", "l3init_cm:0020:8"),
+ { .node_name = NULL },
+};
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 71a122b2dc67..597fb4a59318 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -23,7 +23,28 @@
#define DRA7_DPLL_USB_DEFFREQ 960000000
static const struct omap_clkctrl_reg_data dra7_mpu_clkctrl_regs[] __initconst = {
- { DRA7_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" },
+ { DRA7_MPU_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_dsp1_clkctrl_regs[] __initconst = {
+ { DRA7_DSP1_MMU0_DSP1_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_dsp_m2_ck" },
+ { 0 },
+};
+
+static const char * const dra7_ipu1_gfclk_mux_parents[] __initconst = {
+ "dpll_abe_m2x2_ck",
+ "dpll_core_h22x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmu_ipu1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_ipu1_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_ipu1_clkctrl_regs[] __initconst = {
+ { DRA7_IPU1_MMU_IPU1_CLKCTRL, dra7_mmu_ipu1_bit_data, CLKF_HW_SUP, "ipu1-clkctrl:0000:24" },
{ 0 },
};
@@ -108,45 +129,55 @@ static const struct omap_clkctrl_bit_data dra7_uart6_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data dra7_ipu_clkctrl_regs[] __initconst = {
- { DRA7_MCASP1_CLKCTRL, dra7_mcasp1_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0010:22" },
- { DRA7_TIMER5_CLKCTRL, dra7_timer5_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0018:24" },
- { DRA7_TIMER6_CLKCTRL, dra7_timer6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0020:24" },
- { DRA7_TIMER7_CLKCTRL, dra7_timer7_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0028:24" },
- { DRA7_TIMER8_CLKCTRL, dra7_timer8_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0030:24" },
- { DRA7_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
- { DRA7_UART6_CLKCTRL, dra7_uart6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0040:24" },
+ { DRA7_IPU_MCASP1_CLKCTRL, dra7_mcasp1_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0000:22" },
+ { DRA7_IPU_TIMER5_CLKCTRL, dra7_timer5_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0008:24" },
+ { DRA7_IPU_TIMER6_CLKCTRL, dra7_timer6_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0010:24" },
+ { DRA7_IPU_TIMER7_CLKCTRL, dra7_timer7_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0018:24" },
+ { DRA7_IPU_TIMER8_CLKCTRL, dra7_timer8_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0020:24" },
+ { DRA7_IPU_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_IPU_UART6_CLKCTRL, dra7_uart6_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0030:24" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_dsp2_clkctrl_regs[] __initconst = {
+ { DRA7_DSP2_MMU0_DSP2_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_dsp_m2_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst = {
- { DRA7_RTCSS_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { DRA7_RTC_RTCSS_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = {
- { DRA7_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
- { DRA7_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
+ { DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
+ { DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
{ 0 },
};
static const struct omap_clkctrl_reg_data dra7_l3main1_clkctrl_regs[] __initconst = {
- { DRA7_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_TPCC_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_TPTC0_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_TPTC1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_VCP1_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_VCP2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L3MAIN1_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3MAIN1_TPCC_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L3MAIN1_TPTC0_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3MAIN1_TPTC1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3MAIN1_VCP1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L3MAIN1_VCP2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_ipu2_clkctrl_regs[] __initconst = {
+ { DRA7_IPU2_MMU_IPU2_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h22x2_ck" },
{ 0 },
};
static const struct omap_clkctrl_reg_data dra7_dma_clkctrl_regs[] __initconst = {
- { DRA7_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_DMA_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" },
{ 0 },
};
static const struct omap_clkctrl_reg_data dra7_emif_clkctrl_regs[] __initconst = {
- { DRA7_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_EMIF_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" },
{ 0 },
};
@@ -161,7 +192,7 @@ static const char * const dra7_atl_dpll_clk_mux_parents[] __initconst = {
static const char * const dra7_atl_gfclk_mux_parents[] __initconst = {
"l3_iclk_div",
"dpll_abe_m2_ck",
- "atl_cm:clk:0000:24",
+ "atl-clkctrl:0000:24",
NULL,
};
@@ -172,32 +203,32 @@ static const struct omap_clkctrl_bit_data dra7_atl_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data dra7_atl_clkctrl_regs[] __initconst = {
- { DRA7_ATL_CLKCTRL, dra7_atl_bit_data, CLKF_SW_SUP, "atl_cm:clk:0000:26" },
+ { DRA7_ATL_ATL_CLKCTRL, dra7_atl_bit_data, CLKF_SW_SUP, "atl-clkctrl:0000:26" },
{ 0 },
};
static const struct omap_clkctrl_reg_data dra7_l4cfg_clkctrl_regs[] __initconst = {
- { DRA7_L4_CFG_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_SPINLOCK_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX1_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX2_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX3_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX4_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX5_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX6_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX7_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX8_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX9_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX10_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX11_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX12_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_MAILBOX13_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_L4_CFG_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_SPINLOCK_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX3_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX4_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX5_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX6_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX7_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX8_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX9_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX10_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX11_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX12_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX13_CLKCTRL, NULL, 0, "l3_iclk_div" },
{ 0 },
};
static const struct omap_clkctrl_reg_data dra7_l3instr_clkctrl_regs[] __initconst = {
- { DRA7_L3_MAIN_2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3INSTR_L3_MAIN_2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3INSTR_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ 0 },
};
@@ -242,8 +273,8 @@ static const struct omap_clkctrl_bit_data dra7_dss_core_bit_data[] __initconst =
};
static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst = {
- { DRA7_DSS_CORE_CLKCTRL, dra7_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" },
- { DRA7_BB2D_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_h24x2_ck" },
+ { DRA7_DSS_DSS_CORE_CLKCTRL, dra7_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" },
+ { DRA7_DSS_BB2D_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_h24x2_ck" },
{ 0 },
};
@@ -254,7 +285,7 @@ static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = {
};
static const char * const dra7_mmc1_fclk_div_parents[] __initconst = {
- "l3init_cm:clk:0008:24",
+ "l3init-clkctrl:0008:24",
NULL,
};
@@ -271,7 +302,7 @@ static const struct omap_clkctrl_bit_data dra7_mmc1_bit_data[] __initconst = {
};
static const char * const dra7_mmc2_fclk_div_parents[] __initconst = {
- "l3init_cm:clk:0010:24",
+ "l3init-clkctrl:0010:24",
NULL,
};
@@ -307,6 +338,24 @@ static const struct omap_clkctrl_bit_data dra7_sata_bit_data[] __initconst = {
{ 0 },
};
+static const struct omap_clkctrl_bit_data dra7_usb_otg_ss1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst = {
+ { DRA7_L3INIT_MMC1_CLKCTRL, dra7_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" },
+ { DRA7_L3INIT_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" },
+ { DRA7_L3INIT_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_L3INIT_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_L3INIT_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_L3INIT_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L3INIT_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { DRA7_L3INIT_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { DRA7_L3INIT_USB_OTG_SS1_CLKCTRL, dra7_usb_otg_ss1_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { 0 },
+};
+
static const char * const dra7_optfclk_pciephy1_clk_parents[] __initconst = {
"apll_pcie_ck",
NULL,
@@ -331,6 +380,12 @@ static const struct omap_clkctrl_bit_data dra7_pcie2_bit_data[] __initconst = {
{ 0 },
};
+static const struct omap_clkctrl_reg_data dra7_pcie_clkctrl_regs[] __initconst = {
+ { DRA7_PCIE_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div" },
+ { DRA7_PCIE_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div" },
+ { 0 },
+};
+
static const char * const dra7_rmii_50mhz_clk_mux_parents[] __initconst = {
"dpll_gmac_h11x2_ck",
"rmii_clk_ck",
@@ -352,24 +407,8 @@ static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = {
{ 0 },
};
-static const struct omap_clkctrl_bit_data dra7_usb_otg_ss1_bit_data[] __initconst = {
- { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL },
- { 0 },
-};
-
-static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst = {
- { DRA7_MMC1_CLKCTRL, dra7_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" },
- { DRA7_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" },
- { DRA7_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
- { DRA7_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
- { DRA7_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
- { DRA7_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
- { DRA7_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
- { DRA7_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
- { DRA7_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck", "gmac_clkdm" },
- { DRA7_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
- { DRA7_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
- { DRA7_USB_OTG_SS1_CLKCTRL, dra7_usb_otg_ss1_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = {
+ { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck" },
{ 0 },
};
@@ -443,21 +482,6 @@ static const struct omap_clkctrl_bit_data dra7_gpio6_bit_data[] __initconst = {
{ 0 },
};
-static const struct omap_clkctrl_bit_data dra7_timer13_bit_data[] __initconst = {
- { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
- { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer14_bit_data[] __initconst = {
- { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
- { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_timer15_bit_data[] __initconst = {
- { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
- { 0 },
-};
-
static const struct omap_clkctrl_bit_data dra7_gpio7_bit_data[] __initconst = {
{ 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
{ 0 },
@@ -469,7 +493,7 @@ static const struct omap_clkctrl_bit_data dra7_gpio8_bit_data[] __initconst = {
};
static const char * const dra7_mmc3_gfclk_div_parents[] __initconst = {
- "l4per_cm:clk:0120:24",
+ "l4per-clkctrl:00f8:24",
NULL,
};
@@ -486,7 +510,7 @@ static const struct omap_clkctrl_bit_data dra7_mmc3_bit_data[] __initconst = {
};
static const char * const dra7_mmc4_gfclk_div_parents[] __initconst = {
- "l4per_cm:clk:0128:24",
+ "l4per-clkctrl:0100:24",
NULL,
};
@@ -502,8 +526,72 @@ static const struct omap_clkctrl_bit_data dra7_mmc4_bit_data[] __initconst = {
{ 0 },
};
-static const struct omap_clkctrl_bit_data dra7_timer16_bit_data[] __initconst = {
- { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+static const struct omap_clkctrl_bit_data dra7_uart1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart5_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst = {
+ { DRA7_L4PER_TIMER10_CLKCTRL, dra7_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0000:24" },
+ { DRA7_L4PER_TIMER11_CLKCTRL, dra7_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" },
+ { DRA7_L4PER_TIMER2_CLKCTRL, dra7_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" },
+ { DRA7_L4PER_TIMER3_CLKCTRL, dra7_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" },
+ { DRA7_L4PER_TIMER4_CLKCTRL, dra7_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" },
+ { DRA7_L4PER_TIMER9_CLKCTRL, dra7_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" },
+ { DRA7_L4PER_ELM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO2_CLKCTRL, dra7_gpio2_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO3_CLKCTRL, dra7_gpio3_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO4_CLKCTRL, dra7_gpio4_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO5_CLKCTRL, dra7_gpio5_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO6_CLKCTRL, dra7_gpio6_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" },
+ { DRA7_L4PER_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_L4PER_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_L4PER_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_L4PER_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_L4PER_L4_PER1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4PER_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L4PER_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L4PER_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L4PER_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L4PER_GPIO7_CLKCTRL, dra7_gpio7_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO8_CLKCTRL, dra7_gpio8_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_MMC3_CLKCTRL, dra7_mmc3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:00f8:25" },
+ { DRA7_L4PER_MMC4_CLKCTRL, dra7_mmc4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0100:25" },
+ { DRA7_L4PER_UART1_CLKCTRL, dra7_uart1_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0118:24" },
+ { DRA7_L4PER_UART2_CLKCTRL, dra7_uart2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0120:24" },
+ { DRA7_L4PER_UART3_CLKCTRL, dra7_uart3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0128:24" },
+ { DRA7_L4PER_UART4_CLKCTRL, dra7_uart4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0130:24" },
+ { DRA7_L4PER_UART5_CLKCTRL, dra7_uart5_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0148:24" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst = {
+ { DRA7_L4SEC_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4SEC_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+ { DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ 0 },
};
@@ -514,7 +602,7 @@ static const char * const dra7_qspi_gfclk_mux_parents[] __initconst = {
};
static const char * const dra7_qspi_gfclk_div_parents[] __initconst = {
- "l4per_cm:clk:0138:24",
+ "l4per2-clkctrl:012c:24",
NULL,
};
@@ -529,26 +617,6 @@ static const struct omap_clkctrl_bit_data dra7_qspi_bit_data[] __initconst = {
{ 0 },
};
-static const struct omap_clkctrl_bit_data dra7_uart1_bit_data[] __initconst = {
- { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
- { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart2_bit_data[] __initconst = {
- { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
- { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart3_bit_data[] __initconst = {
- { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
- { 0 },
-};
-
-static const struct omap_clkctrl_bit_data dra7_uart4_bit_data[] __initconst = {
- { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
- { 0 },
-};
-
static const struct omap_clkctrl_bit_data dra7_mcasp2_bit_data[] __initconst = {
{ 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
{ 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
@@ -562,11 +630,6 @@ static const struct omap_clkctrl_bit_data dra7_mcasp3_bit_data[] __initconst = {
{ 0 },
};
-static const struct omap_clkctrl_bit_data dra7_uart5_bit_data[] __initconst = {
- { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
- { 0 },
-};
-
static const struct omap_clkctrl_bit_data dra7_mcasp5_bit_data[] __initconst = {
{ 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
{ 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
@@ -612,64 +675,54 @@ static const struct omap_clkctrl_bit_data dra7_mcasp7_bit_data[] __initconst = {
{ 0 },
};
-static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst = {
- { DRA7_L4_PER2_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per2_clkdm" },
- { DRA7_L4_PER3_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per3_clkdm" },
- { DRA7_TIMER10_CLKCTRL, dra7_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" },
- { DRA7_TIMER11_CLKCTRL, dra7_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" },
- { DRA7_TIMER2_CLKCTRL, dra7_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0038:24" },
- { DRA7_TIMER3_CLKCTRL, dra7_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0040:24" },
- { DRA7_TIMER4_CLKCTRL, dra7_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0048:24" },
- { DRA7_TIMER9_CLKCTRL, dra7_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0050:24" },
- { DRA7_ELM_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_GPIO2_CLKCTRL, dra7_gpio2_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_GPIO3_CLKCTRL, dra7_gpio3_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_GPIO4_CLKCTRL, dra7_gpio4_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_GPIO5_CLKCTRL, dra7_gpio5_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_GPIO6_CLKCTRL, dra7_gpio6_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" },
- { DRA7_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" },
- { DRA7_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" },
- { DRA7_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
- { DRA7_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
- { DRA7_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
- { DRA7_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
- { DRA7_L4_PER1_CLKCTRL, NULL, 0, "l3_iclk_div" },
- { DRA7_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" },
- { DRA7_TIMER13_CLKCTRL, dra7_timer13_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00c8:24", "l4per3_clkdm" },
- { DRA7_TIMER14_CLKCTRL, dra7_timer14_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d0:24", "l4per3_clkdm" },
- { DRA7_TIMER15_CLKCTRL, dra7_timer15_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d8:24", "l4per3_clkdm" },
- { DRA7_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
- { DRA7_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
- { DRA7_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
- { DRA7_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
- { DRA7_GPIO7_CLKCTRL, dra7_gpio7_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_GPIO8_CLKCTRL, dra7_gpio8_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_MMC3_CLKCTRL, dra7_mmc3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0120:25" },
- { DRA7_MMC4_CLKCTRL, dra7_mmc4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0128:25" },
- { DRA7_TIMER16_CLKCTRL, dra7_timer16_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0130:24", "l4per3_clkdm" },
- { DRA7_QSPI_CLKCTRL, dra7_qspi_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0138:25", "l4per2_clkdm" },
- { DRA7_UART1_CLKCTRL, dra7_uart1_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0140:24" },
- { DRA7_UART2_CLKCTRL, dra7_uart2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0148:24" },
- { DRA7_UART3_CLKCTRL, dra7_uart3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0150:24" },
- { DRA7_UART4_CLKCTRL, dra7_uart4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0158:24" },
- { DRA7_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0160:22", "l4per2_clkdm" },
- { DRA7_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0168:22", "l4per2_clkdm" },
- { DRA7_UART5_CLKCTRL, dra7_uart5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0170:24" },
- { DRA7_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0178:22", "l4per2_clkdm" },
- { DRA7_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0190:24", "l4per2_clkdm" },
- { DRA7_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0198:22", "l4per2_clkdm" },
- { DRA7_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
- { DRA7_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
- { DRA7_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
- { DRA7_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
- { DRA7_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
- { DRA7_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01d0:24", "l4per2_clkdm" },
- { DRA7_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e0:24", "l4per2_clkdm" },
- { DRA7_UART9_CLKCTRL, dra7_uart9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e8:24", "l4per2_clkdm" },
- { DRA7_DCAN2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin1", "l4per2_clkdm" },
- { DRA7_MCASP6_CLKCTRL, dra7_mcasp6_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0204:22", "l4per2_clkdm" },
- { DRA7_MCASP7_CLKCTRL, dra7_mcasp7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0208:22", "l4per2_clkdm" },
+static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst = {
+ { DRA7_L4PER2_L4_PER2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4PER2_PRUSS1_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { DRA7_L4PER2_PRUSS2_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { DRA7_L4PER2_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" },
+ { DRA7_L4PER2_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" },
+ { DRA7_L4PER2_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" },
+ { DRA7_L4PER2_QSPI_CLKCTRL, dra7_qspi_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:012c:25" },
+ { DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" },
+ { DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" },
+ { DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" },
+ { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:24" },
+ { DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" },
+ { DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" },
+ { DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" },
+ { DRA7_L4PER2_UART9_CLKCTRL, dra7_uart9_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01dc:24" },
+ { DRA7_L4PER2_DCAN2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin1" },
+ { DRA7_L4PER2_MCASP6_CLKCTRL, dra7_mcasp6_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01f8:22" },
+ { DRA7_L4PER2_MCASP7_CLKCTRL, dra7_mcasp7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01fc:22" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer13_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer14_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer15_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer16_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4per3_clkctrl_regs[] __initconst = {
+ { DRA7_L4PER3_L4_PER3_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4PER3_TIMER13_CLKCTRL, dra7_timer13_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00b4:24" },
+ { DRA7_L4PER3_TIMER14_CLKCTRL, dra7_timer14_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00bc:24" },
+ { DRA7_L4PER3_TIMER15_CLKCTRL, dra7_timer15_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00c4:24" },
+ { DRA7_L4PER3_TIMER16_CLKCTRL, dra7_timer16_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:011c:24" },
{ 0 },
};
@@ -700,24 +753,28 @@ static const struct omap_clkctrl_bit_data dra7_dcan1_bit_data[] __initconst = {
};
static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initconst = {
- { DRA7_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
- { DRA7_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
- { DRA7_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
- { DRA7_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" },
- { DRA7_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" },
- { DRA7_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
- { DRA7_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0060:24" },
- { DRA7_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0068:24" },
- { DRA7_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk"},
+ { DRA7_WKUPAON_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
+ { DRA7_WKUPAON_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { DRA7_WKUPAON_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
+ { DRA7_WKUPAON_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" },
+ { DRA7_WKUPAON_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" },
+ { DRA7_WKUPAON_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
+ { DRA7_WKUPAON_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0060:24" },
+ { DRA7_WKUPAON_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0068:24" },
+ { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk" },
{ 0 },
};
const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
{ 0x4a005320, dra7_mpu_clkctrl_regs },
- { 0x4a005540, dra7_ipu_clkctrl_regs },
- { 0x4a005740, dra7_rtc_clkctrl_regs },
+ { 0x4a005420, dra7_dsp1_clkctrl_regs },
+ { 0x4a005520, dra7_ipu1_clkctrl_regs },
+ { 0x4a005550, dra7_ipu_clkctrl_regs },
+ { 0x4a005620, dra7_dsp2_clkctrl_regs },
+ { 0x4a005720, dra7_rtc_clkctrl_regs },
{ 0x4a008620, dra7_coreaon_clkctrl_regs },
{ 0x4a008720, dra7_l3main1_clkctrl_regs },
+ { 0x4a008920, dra7_ipu2_clkctrl_regs },
{ 0x4a008a20, dra7_dma_clkctrl_regs },
{ 0x4a008b20, dra7_emif_clkctrl_regs },
{ 0x4a008c00, dra7_atl_clkctrl_regs },
@@ -725,7 +782,12 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
{ 0x4a008e20, dra7_l3instr_clkctrl_regs },
{ 0x4a009120, dra7_dss_clkctrl_regs },
{ 0x4a009320, dra7_l3init_clkctrl_regs },
- { 0x4a009700, dra7_l4per_clkctrl_regs },
+ { 0x4a0093b0, dra7_pcie_clkctrl_regs },
+ { 0x4a0093d0, dra7_gmac_clkctrl_regs },
+ { 0x4a009728, dra7_l4per_clkctrl_regs },
+ { 0x4a0098a0, dra7_l4sec_clkctrl_regs },
+ { 0x4a00970c, dra7_l4per2_clkctrl_regs },
+ { 0x4a009714, dra7_l4per3_clkctrl_regs },
{ 0x4ae07820, dra7_wkupaon_clkctrl_regs },
{ 0 },
};
@@ -734,91 +796,92 @@ static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
DT_CLK(NULL, "sys_clkin_ck", "timer_sys_clk_div"),
DT_CLK(NULL, "sys_clkin", "sys_clkin1"),
- DT_CLK(NULL, "atl_dpll_clk_mux", "atl_cm:0000:24"),
- DT_CLK(NULL, "atl_gfclk_mux", "atl_cm:0000:26"),
- DT_CLK(NULL, "dcan1_sys_clk_mux", "wkupaon_cm:0068:24"),
- DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"),
- DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"),
- DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"),
- DT_CLK(NULL, "dss_hdmi_clk", "dss_cm:0000:10"),
- DT_CLK(NULL, "dss_video1_clk", "dss_cm:0000:12"),
- DT_CLK(NULL, "dss_video2_clk", "dss_cm:0000:13"),
- DT_CLK(NULL, "gmac_rft_clk_mux", "l3init_cm:00b0:25"),
- DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"),
- DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0060:8"),
- DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0068:8"),
- DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0070:8"),
- DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0078:8"),
- DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0080:8"),
- DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:0110:8"),
- DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:0118:8"),
- DT_CLK(NULL, "mcasp1_ahclkr_mux", "ipu_cm:0010:28"),
- DT_CLK(NULL, "mcasp1_ahclkx_mux", "ipu_cm:0010:24"),
- DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "ipu_cm:0010:22"),
- DT_CLK(NULL, "mcasp2_ahclkr_mux", "l4per_cm:0160:28"),
- DT_CLK(NULL, "mcasp2_ahclkx_mux", "l4per_cm:0160:24"),
- DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "l4per_cm:0160:22"),
- DT_CLK(NULL, "mcasp3_ahclkx_mux", "l4per_cm:0168:24"),
- DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "l4per_cm:0168:22"),
- DT_CLK(NULL, "mcasp4_ahclkx_mux", "l4per_cm:0198:24"),
- DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "l4per_cm:0198:22"),
- DT_CLK(NULL, "mcasp5_ahclkx_mux", "l4per_cm:0178:24"),
- DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "l4per_cm:0178:22"),
- DT_CLK(NULL, "mcasp6_ahclkx_mux", "l4per_cm:0204:24"),
- DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per_cm:0204:22"),
- DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per_cm:0208:24"),
- DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per_cm:0208:22"),
- DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per_cm:0190:22"),
- DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per_cm:0190:24"),
- DT_CLK(NULL, "mmc1_clk32k", "l3init_cm:0008:8"),
- DT_CLK(NULL, "mmc1_fclk_div", "l3init_cm:0008:25"),
- DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"),
- DT_CLK(NULL, "mmc2_clk32k", "l3init_cm:0010:8"),
- DT_CLK(NULL, "mmc2_fclk_div", "l3init_cm:0010:25"),
- DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"),
- DT_CLK(NULL, "mmc3_clk32k", "l4per_cm:0120:8"),
- DT_CLK(NULL, "mmc3_gfclk_div", "l4per_cm:0120:25"),
- DT_CLK(NULL, "mmc3_gfclk_mux", "l4per_cm:0120:24"),
- DT_CLK(NULL, "mmc4_clk32k", "l4per_cm:0128:8"),
- DT_CLK(NULL, "mmc4_gfclk_div", "l4per_cm:0128:25"),
- DT_CLK(NULL, "mmc4_gfclk_mux", "l4per_cm:0128:24"),
- DT_CLK(NULL, "optfclk_pciephy1_32khz", "l3init_cm:0090:8"),
- DT_CLK(NULL, "optfclk_pciephy1_clk", "l3init_cm:0090:9"),
- DT_CLK(NULL, "optfclk_pciephy1_div_clk", "l3init_cm:0090:10"),
- DT_CLK(NULL, "optfclk_pciephy2_32khz", "l3init_cm:0098:8"),
- DT_CLK(NULL, "optfclk_pciephy2_clk", "l3init_cm:0098:9"),
- DT_CLK(NULL, "optfclk_pciephy2_div_clk", "l3init_cm:0098:10"),
- DT_CLK(NULL, "qspi_gfclk_div", "l4per_cm:0138:25"),
- DT_CLK(NULL, "qspi_gfclk_mux", "l4per_cm:0138:24"),
- DT_CLK(NULL, "rmii_50mhz_clk_mux", "l3init_cm:00b0:24"),
- DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"),
- DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0028:24"),
- DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0030:24"),
- DT_CLK(NULL, "timer13_gfclk_mux", "l4per_cm:00c8:24"),
- DT_CLK(NULL, "timer14_gfclk_mux", "l4per_cm:00d0:24"),
- DT_CLK(NULL, "timer15_gfclk_mux", "l4per_cm:00d8:24"),
- DT_CLK(NULL, "timer16_gfclk_mux", "l4per_cm:0130:24"),
- DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"),
- DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0038:24"),
- DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0040:24"),
- DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0048:24"),
- DT_CLK(NULL, "timer5_gfclk_mux", "ipu_cm:0018:24"),
- DT_CLK(NULL, "timer6_gfclk_mux", "ipu_cm:0020:24"),
- DT_CLK(NULL, "timer7_gfclk_mux", "ipu_cm:0028:24"),
- DT_CLK(NULL, "timer8_gfclk_mux", "ipu_cm:0030:24"),
- DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0050:24"),
- DT_CLK(NULL, "uart10_gfclk_mux", "wkupaon_cm:0060:24"),
- DT_CLK(NULL, "uart1_gfclk_mux", "l4per_cm:0140:24"),
- DT_CLK(NULL, "uart2_gfclk_mux", "l4per_cm:0148:24"),
- DT_CLK(NULL, "uart3_gfclk_mux", "l4per_cm:0150:24"),
- DT_CLK(NULL, "uart4_gfclk_mux", "l4per_cm:0158:24"),
- DT_CLK(NULL, "uart5_gfclk_mux", "l4per_cm:0170:24"),
- DT_CLK(NULL, "uart6_gfclk_mux", "ipu_cm:0040:24"),
- DT_CLK(NULL, "uart7_gfclk_mux", "l4per_cm:01d0:24"),
- DT_CLK(NULL, "uart8_gfclk_mux", "l4per_cm:01e0:24"),
- DT_CLK(NULL, "uart9_gfclk_mux", "l4per_cm:01e8:24"),
- DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3init_cm:00d0:8"),
- DT_CLK(NULL, "usb_otg_ss2_refclk960m", "l3init_cm:0020:8"),
+ DT_CLK(NULL, "atl_dpll_clk_mux", "atl-clkctrl:0000:24"),
+ DT_CLK(NULL, "atl_gfclk_mux", "atl-clkctrl:0000:26"),
+ DT_CLK(NULL, "dcan1_sys_clk_mux", "wkupaon-clkctrl:0068:24"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_hdmi_clk", "dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "dss_video1_clk", "dss-clkctrl:0000:12"),
+ DT_CLK(NULL, "dss_video2_clk", "dss-clkctrl:0000:13"),
+ DT_CLK(NULL, "gmac_rft_clk_mux", "gmac-clkctrl:0000:25"),
+ DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0038:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00e8:8"),
+ DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f0:8"),
+ DT_CLK(NULL, "ipu1_gfclk_mux", "ipu1-clkctrl:0000:24"),
+ DT_CLK(NULL, "mcasp1_ahclkr_mux", "ipu-clkctrl:0000:28"),
+ DT_CLK(NULL, "mcasp1_ahclkx_mux", "ipu-clkctrl:0000:24"),
+ DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "ipu-clkctrl:0000:22"),
+ DT_CLK(NULL, "mcasp2_ahclkr_mux", "l4per2-clkctrl:0154:28"),
+ DT_CLK(NULL, "mcasp2_ahclkx_mux", "l4per2-clkctrl:0154:24"),
+ DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "l4per2-clkctrl:0154:22"),
+ DT_CLK(NULL, "mcasp3_ahclkx_mux", "l4per2-clkctrl:015c:24"),
+ DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "l4per2-clkctrl:015c:22"),
+ DT_CLK(NULL, "mcasp4_ahclkx_mux", "l4per2-clkctrl:018c:24"),
+ DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "l4per2-clkctrl:018c:22"),
+ DT_CLK(NULL, "mcasp5_ahclkx_mux", "l4per2-clkctrl:016c:24"),
+ DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "l4per2-clkctrl:016c:22"),
+ DT_CLK(NULL, "mcasp6_ahclkx_mux", "l4per2-clkctrl:01f8:24"),
+ DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"),
+ DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"),
+ DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"),
+ DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:22"),
+ DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:24"),
+ DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"),
+ DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
+ DT_CLK(NULL, "mmc2_clk32k", "l3init-clkctrl:0010:8"),
+ DT_CLK(NULL, "mmc2_fclk_div", "l3init-clkctrl:0010:25"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
+ DT_CLK(NULL, "mmc3_clk32k", "l4per-clkctrl:00f8:8"),
+ DT_CLK(NULL, "mmc3_gfclk_div", "l4per-clkctrl:00f8:25"),
+ DT_CLK(NULL, "mmc3_gfclk_mux", "l4per-clkctrl:00f8:24"),
+ DT_CLK(NULL, "mmc4_clk32k", "l4per-clkctrl:0100:8"),
+ DT_CLK(NULL, "mmc4_gfclk_div", "l4per-clkctrl:0100:25"),
+ DT_CLK(NULL, "mmc4_gfclk_mux", "l4per-clkctrl:0100:24"),
+ DT_CLK(NULL, "optfclk_pciephy1_32khz", "pcie-clkctrl:0000:8"),
+ DT_CLK(NULL, "optfclk_pciephy1_clk", "pcie-clkctrl:0000:9"),
+ DT_CLK(NULL, "optfclk_pciephy1_div_clk", "pcie-clkctrl:0000:10"),
+ DT_CLK(NULL, "optfclk_pciephy2_32khz", "pcie-clkctrl:0008:8"),
+ DT_CLK(NULL, "optfclk_pciephy2_clk", "pcie-clkctrl:0008:9"),
+ DT_CLK(NULL, "optfclk_pciephy2_div_clk", "pcie-clkctrl:0008:10"),
+ DT_CLK(NULL, "qspi_gfclk_div", "l4per2-clkctrl:012c:25"),
+ DT_CLK(NULL, "qspi_gfclk_mux", "l4per2-clkctrl:012c:24"),
+ DT_CLK(NULL, "rmii_50mhz_clk_mux", "gmac-clkctrl:0000:24"),
+ DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0000:24"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0008:24"),
+ DT_CLK(NULL, "timer13_gfclk_mux", "l4per3-clkctrl:00b4:24"),
+ DT_CLK(NULL, "timer14_gfclk_mux", "l4per3-clkctrl:00bc:24"),
+ DT_CLK(NULL, "timer15_gfclk_mux", "l4per3-clkctrl:00c4:24"),
+ DT_CLK(NULL, "timer16_gfclk_mux", "l4per3-clkctrl:011c:24"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0010:24"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0018:24"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "ipu-clkctrl:0008:24"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "ipu-clkctrl:0010:24"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "ipu-clkctrl:0018:24"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "ipu-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0028:24"),
+ DT_CLK(NULL, "uart10_gfclk_mux", "wkupaon-clkctrl:0060:24"),
+ DT_CLK(NULL, "uart1_gfclk_mux", "l4per-clkctrl:0118:24"),
+ DT_CLK(NULL, "uart2_gfclk_mux", "l4per-clkctrl:0120:24"),
+ DT_CLK(NULL, "uart3_gfclk_mux", "l4per-clkctrl:0128:24"),
+ DT_CLK(NULL, "uart4_gfclk_mux", "l4per-clkctrl:0130:24"),
+ DT_CLK(NULL, "uart5_gfclk_mux", "l4per-clkctrl:0148:24"),
+ DT_CLK(NULL, "uart6_gfclk_mux", "ipu-clkctrl:0030:24"),
+ DT_CLK(NULL, "uart7_gfclk_mux", "l4per2-clkctrl:01c4:24"),
+ DT_CLK(NULL, "uart8_gfclk_mux", "l4per2-clkctrl:01d4:24"),
+ DT_CLK(NULL, "uart9_gfclk_mux", "l4per2-clkctrl:01dc:24"),
+ DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3init-clkctrl:00d0:8"),
+ DT_CLK(NULL, "usb_otg_ss2_refclk960m", "l3init-clkctrl:0020:8"),
{ .node_name = NULL },
};
@@ -827,7 +890,10 @@ int __init dra7xx_dt_clk_init(void)
int rc;
struct clk *dpll_ck, *hdcp_ck;
- ti_dt_clocks_register(dra7xx_clks);
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
+ ti_dt_clocks_register(dra7xx_compat_clks);
+ else
+ ti_dt_clocks_register(dra7xx_clks);
omap2_clk_disable_autoidle_all();
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 148815470431..a01ca9395179 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -190,8 +190,8 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
init.num_parents = of_clk_get_parent_count(node);
if (init.num_parents != 1) {
- pr_err("%s: atl clock %s must have 1 parent\n", __func__,
- node->name);
+ pr_err("%s: atl clock %pOFn must have 1 parent\n", __func__,
+ node);
goto cleanup;
}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 7d22e1af2247..d0cd58534781 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -23,7 +23,7 @@
#include <linux/of_address.h>
#include <linux/list.h>
#include <linux/regmap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/device.h>
#include "clock.h"
@@ -34,7 +34,7 @@
struct ti_clk_ll_ops *ti_clk_ll_ops;
static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];
-static struct ti_clk_features ti_clk_features;
+struct ti_clk_features ti_clk_features;
struct clk_iomap {
struct regmap *regmap;
@@ -129,7 +129,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
{
struct ti_dt_clk *c;
- struct device_node *node;
+ struct device_node *node, *parent;
struct clk *clk;
struct of_phandle_args clkspec;
char buf[64];
@@ -140,6 +140,9 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
int ret;
static bool clkctrl_nodes_missing;
static bool has_clkctrl_data;
+ static bool compat_mode;
+
+ compat_mode = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
for (c = oclks; c->node_name != NULL; c++) {
strcpy(buf, c->node_name);
@@ -164,8 +167,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
continue;
node = of_find_node_by_name(NULL, buf);
- if (num_args)
- node = of_find_node_by_name(node, "clk");
+ if (num_args && compat_mode) {
+ parent = node;
+ node = of_get_child_by_name(parent, "clk");
+ of_node_put(parent);
+ }
+
clkspec.np = node;
clkspec.args_count = num_args;
for (i = 0; i < num_args; i++) {
@@ -173,11 +180,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
if (ret) {
pr_warn("Bad tag in %s at %d: %s\n",
c->node_name, i, tags[i]);
+ of_node_put(node);
return;
}
}
clk = of_clk_get_from_provider(&clkspec);
-
+ of_node_put(node);
if (!IS_ERR(clk)) {
c->lk.clk = clk;
clkdev_add(&c->lk);
@@ -223,7 +231,7 @@ int __init ti_clk_retry_init(struct device_node *node, void *user,
{
struct clk_init_item *retry;
- pr_debug("%s: adding to retry list...\n", node->name);
+ pr_debug("%pOFn: adding to retry list...\n", node);
retry = kzalloc(sizeof(*retry), GFP_KERNEL);
if (!retry)
return -ENOMEM;
@@ -258,14 +266,14 @@ int ti_clk_get_reg_addr(struct device_node *node, int index,
}
if (i == CLK_MAX_MEMMAPS) {
- pr_err("clk-provider not found for %s!\n", node->name);
+ pr_err("clk-provider not found for %pOFn!\n", node);
return -ENOENT;
}
reg->index = i;
if (of_property_read_u32_index(node, "reg", index, &val)) {
- pr_err("%s must have reg[%d]!\n", node->name, index);
+ pr_err("%pOFn must have reg[%d]!\n", node, index);
return -EINVAL;
}
@@ -312,7 +320,7 @@ int __init omap2_clk_provider_init(struct device_node *parent, int index,
/* get clocks for this parent */
clocks = of_get_child_by_name(parent, "clocks");
if (!clocks) {
- pr_err("%s missing 'clocks' child node.\n", parent->name);
+ pr_err("%pOFn missing 'clocks' child node.\n", parent);
return -EINVAL;
}
@@ -342,7 +350,7 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
{
struct clk_iomap *io;
- io = memblock_virt_alloc(sizeof(*io), 0);
+ io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES);
io->mem = mem;
@@ -365,7 +373,7 @@ void ti_dt_clk_init_retry_clks(void)
while (!list_empty(&retry_list) && retries) {
list_for_each_entry_safe(retry, tmp, &retry_list, link) {
- pr_debug("retry-init: %s\n", retry->node->name);
+ pr_debug("retry-init: %pOFn\n", retry->node);
retry->func(retry->user, retry->node);
list_del(&retry->link);
kfree(retry);
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 421b05392220..469f560ae1cf 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -259,8 +259,13 @@ _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
struct omap_clkctrl_clk *clkctrl_clk;
int ret = 0;
- init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", node->parent->name,
- node->name, offset, bit);
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
+ init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
+ node->parent, node, offset,
+ bit);
+ else
+ init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node,
+ offset, bit);
clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
if (!init.name || !clkctrl_clk) {
ret = -ENOMEM;
@@ -440,6 +445,11 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
const __be32 *addrp;
u32 addr;
int ret;
+ char *c;
+
+ if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
+ !strcmp(node->name, "clk"))
+ ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
addrp = of_get_address(node, 0, NULL, NULL);
addr = (u32)of_translate_address(node, addrp);
@@ -453,18 +463,35 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
data = omap5_clkctrl_data;
#endif
#ifdef CONFIG_SOC_DRA7XX
- if (of_machine_is_compatible("ti,dra7"))
- data = dra7_clkctrl_data;
+ if (of_machine_is_compatible("ti,dra7")) {
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
+ data = dra7_clkctrl_compat_data;
+ else
+ data = dra7_clkctrl_data;
+ }
#endif
#ifdef CONFIG_SOC_AM33XX
- if (of_machine_is_compatible("ti,am33xx"))
- data = am3_clkctrl_data;
+ if (of_machine_is_compatible("ti,am33xx")) {
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
+ data = am3_clkctrl_compat_data;
+ else
+ data = am3_clkctrl_data;
+ }
#endif
#ifdef CONFIG_SOC_AM43XX
- if (of_machine_is_compatible("ti,am4372"))
- data = am4_clkctrl_data;
- if (of_machine_is_compatible("ti,am438x"))
- data = am438x_clkctrl_data;
+ if (of_machine_is_compatible("ti,am4372")) {
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
+ data = am4_clkctrl_compat_data;
+ else
+ data = am4_clkctrl_data;
+ }
+
+ if (of_machine_is_compatible("ti,am438x")) {
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
+ data = am438x_clkctrl_compat_data;
+ else
+ data = am438x_clkctrl_data;
+ }
#endif
#ifdef CONFIG_SOC_TI81XX
if (of_machine_is_compatible("ti,dm814"))
@@ -492,21 +519,43 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
provider->base = of_iomap(node, 0);
- provider->clkdm_name = kmalloc(strlen(node->parent->name) + 3,
- GFP_KERNEL);
- if (!provider->clkdm_name) {
- kfree(provider);
- return;
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) {
+ provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
+ if (!provider->clkdm_name) {
+ kfree(provider);
+ return;
+ }
+
+ /*
+ * Create default clkdm name, replace _cm from end of parent
+ * node name with _clkdm
+ */
+ provider->clkdm_name[strlen(provider->clkdm_name) - 5] = 0;
+ } else {
+ provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
+ if (!provider->clkdm_name) {
+ kfree(provider);
+ return;
+ }
+
+ /*
+ * Create default clkdm name, replace _clkctrl from end of
+ * node name with _clkdm
+ */
+ provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
}
- /*
- * Create default clkdm name, replace _cm from end of parent node
- * name with _clkdm
- */
- strcpy(provider->clkdm_name, node->parent->name);
- provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
strcat(provider->clkdm_name, "clkdm");
+ /* Replace any dash from the clkdm name with underscore */
+ c = provider->clkdm_name;
+
+ while (*c) {
+ if (*c == '-')
+ *c = '_';
+ c++;
+ }
+
INIT_LIST_HEAD(&provider->clocks);
/* Generate clocks */
@@ -539,9 +588,13 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
init.flags = 0;
if (reg_data->flags & CLKF_SET_RATE_PARENT)
init.flags |= CLK_SET_RATE_PARENT;
- init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",
- node->parent->name, node->name,
- reg_data->offset, 0);
+ if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
+ init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
+ node->parent, node,
+ reg_data->offset, 0);
+ else
+ init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d",
+ node, reg_data->offset, 0);
clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
if (!init.name || !clkctrl_clk)
goto cleanup;
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index b58278077226..9f312a219510 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -24,6 +24,7 @@ struct clk_omap_divider {
u8 flags;
s8 latch;
const struct clk_div_table *table;
+ u32 context;
};
#define to_clk_omap_divider(_hw) container_of(_hw, struct clk_omap_divider, hw)
@@ -36,6 +37,7 @@ struct clk_omap_mux {
u8 shift;
s8 latch;
u8 flags;
+ u8 saved_parent;
};
#define to_clk_omap_mux(_hw) container_of(_hw, struct clk_omap_mux, hw)
@@ -184,9 +186,16 @@ struct omap_clkctrl_data {
extern const struct omap_clkctrl_data omap4_clkctrl_data[];
extern const struct omap_clkctrl_data omap5_clkctrl_data[];
extern const struct omap_clkctrl_data dra7_clkctrl_data[];
+extern const struct omap_clkctrl_data dra7_clkctrl_compat_data[];
+extern struct ti_dt_clk dra7xx_compat_clks[];
extern const struct omap_clkctrl_data am3_clkctrl_data[];
+extern const struct omap_clkctrl_data am3_clkctrl_compat_data[];
+extern struct ti_dt_clk am33xx_compat_clks[];
extern const struct omap_clkctrl_data am4_clkctrl_data[];
+extern const struct omap_clkctrl_data am4_clkctrl_compat_data[];
+extern struct ti_dt_clk am43xx_compat_clks[];
extern const struct omap_clkctrl_data am438x_clkctrl_data[];
+extern const struct omap_clkctrl_data am438x_clkctrl_compat_data[];
extern const struct omap_clkctrl_data dm814_clkctrl_data[];
extern const struct omap_clkctrl_data dm816_clkctrl_data[];
@@ -233,6 +242,8 @@ extern const struct clk_ops ti_clk_divider_ops;
extern const struct clk_ops ti_clk_mux_ops;
extern const struct clk_ops omap_gate_clk_ops;
+extern struct ti_clk_features ti_clk_features;
+
void omap2_init_clk_clkdm(struct clk_hw *hw);
int omap2_clkops_enable_clkdm(struct clk_hw *hw);
void omap2_clkops_disable_clkdm(struct clk_hw *hw);
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 030e8b2c1050..6a89936ba03a 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -135,8 +135,8 @@ static void __init _register_composite(void *user,
comp = _lookup_component(cclk->comp_nodes[i]);
if (!comp) {
- pr_debug("component %s not ready for %s, retry\n",
- cclk->comp_nodes[i]->name, node->name);
+ pr_debug("component %s not ready for %pOFn, retry\n",
+ cclk->comp_nodes[i]->name, node);
if (!ti_clk_retry_init(node, hw,
_register_composite))
return;
@@ -144,8 +144,8 @@ static void __init _register_composite(void *user,
goto cleanup;
}
if (cclk->comp_clks[comp->type] != NULL) {
- pr_err("duplicate component types for %s (%s)!\n",
- node->name, component_clk_types[comp->type]);
+ pr_err("duplicate component types for %pOFn (%s)!\n",
+ node, component_clk_types[comp->type]);
goto cleanup;
}
@@ -168,7 +168,7 @@ static void __init _register_composite(void *user,
}
if (!num_parents) {
- pr_err("%s: no parents found for %s!\n", __func__, node->name);
+ pr_err("%s: no parents found for %pOFn!\n", __func__, node);
goto cleanup;
}
@@ -212,7 +212,7 @@ static void __init of_ti_composite_clk_setup(struct device_node *node)
num_clks = of_clk_get_parent_count(node);
if (!num_clks) {
- pr_err("composite clk %s must have component(s)\n", node->name);
+ pr_err("composite clk %pOFn must have component(s)\n", node);
return;
}
@@ -248,7 +248,7 @@ int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
num_parents = of_clk_get_parent_count(node);
if (!num_parents) {
- pr_err("component-clock %s must have parent(s)\n", node->name);
+ pr_err("component-clock %pOFn must have parent(s)\n", node);
return -EINVAL;
}
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ccfb4d9a152a..8d77090ad94a 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -268,10 +268,46 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+/**
+ * clk_divider_save_context - Save the divider value
+ * @hw: pointer struct clk_hw
+ *
+ * Save the divider value
+ */
+static int clk_divider_save_context(struct clk_hw *hw)
+{
+ struct clk_omap_divider *divider = to_clk_omap_divider(hw);
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(&divider->reg) >> divider->shift;
+ divider->context = val & div_mask(divider);
+
+ return 0;
+}
+
+/**
+ * clk_divider_restore_context - restore the saved the divider value
+ * @hw: pointer struct clk_hw
+ *
+ * Restore the saved the divider value
+ */
+static void clk_divider_restore_context(struct clk_hw *hw)
+{
+ struct clk_omap_divider *divider = to_clk_omap_divider(hw);
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(&divider->reg);
+ val &= ~(div_mask(divider) << divider->shift);
+ val |= divider->context << divider->shift;
+ ti_clk_ll_ops->clk_writel(val, &divider->reg);
+}
+
const struct clk_ops ti_clk_divider_ops = {
.recalc_rate = ti_clk_divider_recalc_rate,
.round_rate = ti_clk_divider_round_rate,
.set_rate = ti_clk_divider_set_rate,
+ .save_context = clk_divider_save_context,
+ .restore_context = clk_divider_restore_context,
};
static struct clk *_register_divider(struct device *dev, const char *name,
@@ -492,7 +528,7 @@ __init ti_clk_get_div_table(struct device_node *node)
}
if (!valid_div) {
- pr_err("no valid dividers for %s table\n", node->name);
+ pr_err("no valid dividers for %pOFn table\n", node);
return ERR_PTR(-EINVAL);
}
@@ -530,7 +566,7 @@ static int _get_divider_width(struct device_node *node,
min_div = 1;
if (of_property_read_u32(node, "ti,max-div", &max_div)) {
- pr_err("no max-div for %s!\n", node->name);
+ pr_err("no max-div for %pOFn!\n", node);
return -EINVAL;
}
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index dc86d07d0921..92e28af7afba 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -39,6 +39,8 @@ static const struct clk_ops dpll_m4xen_ck_ops = {
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap4_dpll_regm4xen_determine_rate,
.get_parent = &omap2_init_dpll_parent,
+ .save_context = &omap3_core_dpll_save_context,
+ .restore_context = &omap3_core_dpll_restore_context,
};
#else
static const struct clk_ops dpll_m4xen_ck_ops = {};
@@ -62,6 +64,8 @@ static const struct clk_ops dpll_ck_ops = {
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
.get_parent = &omap2_init_dpll_parent,
+ .save_context = &omap3_noncore_dpll_save_context,
+ .restore_context = &omap3_noncore_dpll_restore_context,
};
static const struct clk_ops dpll_no_gate_ck_ops = {
@@ -72,6 +76,8 @@ static const struct clk_ops dpll_no_gate_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
+ .save_context = &omap3_noncore_dpll_save_context,
+ .restore_context = &omap3_noncore_dpll_restore_context
};
#else
static const struct clk_ops dpll_core_ck_ops = {};
@@ -162,8 +168,8 @@ static void __init _register_dpll(void *user,
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
- pr_debug("clk-ref missing for %s, retry later\n",
- node->name);
+ pr_debug("clk-ref missing for %pOFn, retry later\n",
+ node);
if (!ti_clk_retry_init(node, hw, _register_dpll))
return;
@@ -175,8 +181,8 @@ static void __init _register_dpll(void *user,
clk = of_clk_get(node, 1);
if (IS_ERR(clk)) {
- pr_debug("clk-bypass missing for %s, retry later\n",
- node->name);
+ pr_debug("clk-bypass missing for %pOFn, retry later\n",
+ node);
if (!ti_clk_retry_init(node, hw, _register_dpll))
return;
@@ -226,7 +232,7 @@ static void _register_dpll_x2(struct device_node *node,
parent_name = of_clk_get_parent_name(node, 0);
if (!parent_name) {
- pr_err("%s must have parent\n", node->name);
+ pr_err("%pOFn must have parent\n", node);
return;
}
@@ -305,7 +311,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
init->num_parents = of_clk_get_parent_count(node);
if (!init->num_parents) {
- pr_err("%s must have parent(s)\n", node->name);
+ pr_err("%pOFn must have parent(s)\n", node);
goto cleanup;
}
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 4534de2ef455..44b6b6403753 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -782,6 +782,130 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
return rate;
}
+/**
+ * omap3_core_dpll_save_context - Save the m and n values of the divider
+ * @hw: pointer struct clk_hw
+ *
+ * Before the dpll registers are lost save the last rounded rate m and n
+ * and the enable mask.
+ */
+int omap3_core_dpll_save_context(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ u32 v;
+
+ dd = clk->dpll_data;
+
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
+
+ if (clk->context == DPLL_LOCKED) {
+ v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+ dd->last_rounded_m = (v & dd->mult_mask) >>
+ __ffs(dd->mult_mask);
+ dd->last_rounded_n = ((v & dd->div1_mask) >>
+ __ffs(dd->div1_mask)) + 1;
+ }
+
+ return 0;
+}
+
+/**
+ * omap3_core_dpll_restore_context - restore the m and n values of the divider
+ * @hw: pointer struct clk_hw
+ *
+ * Restore the last rounded rate m and n
+ * and the enable mask.
+ */
+void omap3_core_dpll_restore_context(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ const struct dpll_data *dd;
+ u32 v;
+
+ dd = clk->dpll_data;
+
+ if (clk->context == DPLL_LOCKED) {
+ _omap3_dpll_write_clken(clk, 0x4);
+ _omap3_wait_dpll_status(clk, 0);
+
+ v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+ v &= ~(dd->mult_mask | dd->div1_mask);
+ v |= dd->last_rounded_m << __ffs(dd->mult_mask);
+ v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
+ ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOCKED);
+ _omap3_wait_dpll_status(clk, 1);
+ } else {
+ _omap3_dpll_write_clken(clk, clk->context);
+ }
+}
+
+/**
+ * omap3_non_core_dpll_save_context - Save the m and n values of the divider
+ * @hw: pointer struct clk_hw
+ *
+ * Before the dpll registers are lost save the last rounded rate m and n
+ * and the enable mask.
+ */
+int omap3_noncore_dpll_save_context(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ u32 v;
+
+ dd = clk->dpll_data;
+
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
+
+ if (clk->context == DPLL_LOCKED) {
+ v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+ dd->last_rounded_m = (v & dd->mult_mask) >>
+ __ffs(dd->mult_mask);
+ dd->last_rounded_n = ((v & dd->div1_mask) >>
+ __ffs(dd->div1_mask)) + 1;
+ }
+
+ return 0;
+}
+
+/**
+ * omap3_core_dpll_restore_context - restore the m and n values of the divider
+ * @hw: pointer struct clk_hw
+ *
+ * Restore the last rounded rate m and n
+ * and the enable mask.
+ */
+void omap3_noncore_dpll_restore_context(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ const struct dpll_data *dd;
+ u32 ctrl, mult_div1;
+
+ dd = clk->dpll_data;
+
+ ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+
+ if (clk->context == ((ctrl & dd->enable_mask) >>
+ __ffs(dd->enable_mask)) &&
+ dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >>
+ __ffs(dd->mult_mask)) &&
+ dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >>
+ __ffs(dd->div1_mask)) + 1) {
+ /* nothing to be done */
+ return;
+ }
+
+ if (clk->context == DPLL_LOCKED)
+ omap3_noncore_dpll_program(clk, 0);
+ else
+ _omap3_dpll_write_clken(clk, clk->context);
+}
+
/* OMAP3/4 non-CORE DPLL clkops */
const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
.allow_idle = omap3_dpll_allow_idle,
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 071af44b1ba8..ed24f20f63c7 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -555,7 +555,7 @@ static void __init ti_fapll_setup(struct device_node *node)
init->num_parents = of_clk_get_parent_count(node);
if (init->num_parents != 2) {
- pr_err("%s must have two parents\n", node->name);
+ pr_err("%pOFn must have two parents\n", node);
goto free;
}
@@ -564,19 +564,19 @@ static void __init ti_fapll_setup(struct device_node *node)
fd->clk_ref = of_clk_get(node, 0);
if (IS_ERR(fd->clk_ref)) {
- pr_err("%s could not get clk_ref\n", node->name);
+ pr_err("%pOFn could not get clk_ref\n", node);
goto free;
}
fd->clk_bypass = of_clk_get(node, 1);
if (IS_ERR(fd->clk_bypass)) {
- pr_err("%s could not get clk_bypass\n", node->name);
+ pr_err("%pOFn could not get clk_bypass\n", node);
goto free;
}
fd->base = of_iomap(node, 0);
if (!fd->base) {
- pr_err("%s could not get IO base\n", node->name);
+ pr_err("%pOFn could not get IO base\n", node);
goto free;
}
diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
index 0174a51a4ba6..7cbe896db071 100644
--- a/drivers/clk/ti/fixed-factor.c
+++ b/drivers/clk/ti/fixed-factor.c
@@ -42,12 +42,12 @@ static void __init of_ti_fixed_factor_clk_setup(struct device_node *node)
u32 flags = 0;
if (of_property_read_u32(node, "ti,clock-div", &div)) {
- pr_err("%s must have a clock-div property\n", node->name);
+ pr_err("%pOFn must have a clock-div property\n", node);
return;
}
if (of_property_read_u32(node, "ti,clock-mult", &mult)) {
- pr_err("%s must have a clock-mult property\n", node->name);
+ pr_err("%pOFn must have a clock-mult property\n", node);
return;
}
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 935b2de5fb88..1c78fff5513c 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -33,6 +33,7 @@ static const struct clk_ops omap_gate_clkdm_clk_ops = {
.init = &omap2_init_clk_clkdm,
.enable = &omap2_clkops_enable_clkdm,
.disable = &omap2_clkops_disable_clkdm,
+ .restore_context = clk_gate_restore_context,
};
const struct clk_ops omap_gate_clk_ops = {
@@ -40,6 +41,7 @@ const struct clk_ops omap_gate_clk_ops = {
.enable = &omap2_dflt_clk_enable,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
+ .restore_context = clk_gate_restore_context,
};
static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = {
@@ -47,6 +49,7 @@ static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = {
.enable = &omap36xx_gate_clk_enable_with_hsdiv_restore,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
+ .restore_context = clk_gate_restore_context,
};
/**
@@ -179,7 +182,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
}
if (of_clk_get_parent_count(node) != 1) {
- pr_err("%s must have 1 parent\n", node->name);
+ pr_err("%pOFn must have 1 parent\n", node);
return;
}
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 41ae7021670e..87e00c2ee957 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -84,7 +84,7 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node,
parent_name = of_clk_get_parent_name(node, 0);
if (!parent_name) {
- pr_err("%s must have a parent\n", node->name);
+ pr_err("%pOFn must have a parent\n", node);
return;
}
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 69a4308a5a98..883bdde94d04 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -91,10 +91,39 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
+/**
+ * clk_mux_save_context - Save the parent selcted in the mux
+ * @hw: pointer struct clk_hw
+ *
+ * Save the parent mux value.
+ */
+static int clk_mux_save_context(struct clk_hw *hw)
+{
+ struct clk_omap_mux *mux = to_clk_omap_mux(hw);
+
+ mux->saved_parent = ti_clk_mux_get_parent(hw);
+ return 0;
+}
+
+/**
+ * clk_mux_restore_context - Restore the parent in the mux
+ * @hw: pointer struct clk_hw
+ *
+ * Restore the saved parent mux value.
+ */
+static void clk_mux_restore_context(struct clk_hw *hw)
+{
+ struct clk_omap_mux *mux = to_clk_omap_mux(hw);
+
+ ti_clk_mux_set_parent(hw, mux->saved_parent);
+}
+
const struct clk_ops ti_clk_mux_ops = {
.get_parent = ti_clk_mux_get_parent,
.set_parent = ti_clk_mux_set_parent,
.determine_rate = __clk_mux_determine_rate,
+ .save_context = clk_mux_save_context,
+ .restore_context = clk_mux_restore_context,
};
static struct clk *_register_mux(struct device *dev, const char *name,
@@ -186,7 +215,7 @@ static void of_mux_clk_setup(struct device_node *node)
num_parents = of_clk_get_parent_count(node);
if (num_parents < 2) {
- pr_err("mux-clock %s must have parents\n", node->name);
+ pr_err("mux-clock %pOFn must have parents\n", node);
return;
}
parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
@@ -278,7 +307,7 @@ static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
num_parents = of_clk_get_parent_count(node);
if (num_parents < 2) {
- pr_err("%s must have parents\n", node->name);
+ pr_err("%pOFn must have parents\n", node);
goto cleanup;
}
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 88a2cab37f62..d7b53ac8ad11 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -602,7 +602,7 @@ void __init zynq_clock_init(void)
}
if (of_address_to_resource(np, 0, &res)) {
- pr_err("%s: failed to get resource\n", np->name);
+ pr_err("%pOFn: failed to get resource\n", np);
goto np_err;
}
@@ -611,7 +611,7 @@ void __init zynq_clock_init(void)
if (slcr->data) {
zynq_clkc_base = (__force void __iomem *)slcr->data + res.start;
} else {
- pr_err("%s: Unable to get I/O memory\n", np->name);
+ pr_err("%pOFn: Unable to get I/O memory\n", np);
of_node_put(slcr);
goto np_err;
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0cd8eb76ad59..4e1131ef85ae 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -28,20 +28,13 @@ config ARM_ARMADA_37XX_CPUFREQ
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on (ARM_CPU_TOPOLOGY || ARM64) && HAVE_CLK
+ depends on ARM_CPU_TOPOLOGY && HAVE_CLK
# if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y
depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
-config ARM_DT_BL_CPUFREQ
- tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && OF
- help
- This enables probing via DT for Generic CPUfreq driver for ARM
- big.LITTLE platform. This gets frequency tables from DT.
-
config ARM_SCPI_CPUFREQ
tristate "SCPI based CPUfreq driver"
depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index c1ffeabe4ecf..d5ee4562ed06 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -48,9 +48,6 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
##################################################################################
# ARM SoC drivers
obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
-# big LITTLE per platform glues. Keep DT_BL_CPUFREQ as the last entry in all big
-# LITTLE drivers, so that it is probed last.
-obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
deleted file mode 100644
index b944f290c8a4..000000000000
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Generic big.LITTLE CPUFreq Interface driver
- *
- * It provides necessary ops to arm_big_little cpufreq driver and gets
- * Frequency information from Device Tree. Freq table in DT must be in KHz.
- *
- * Copyright (C) 2013 Linaro.
- * Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/pm_opp.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include "arm_big_little.h"
-
-/* get cpu node with valid operating-points */
-static struct device_node *get_cpu_node_with_valid_op(int cpu)
-{
- struct device_node *np = of_cpu_device_node_get(cpu);
-
- if (!of_get_property(np, "operating-points", NULL)) {
- of_node_put(np);
- np = NULL;
- }
-
- return np;
-}
-
-static int dt_get_transition_latency(struct device *cpu_dev)
-{
- struct device_node *np;
- u32 transition_latency = CPUFREQ_ETERNAL;
-
- np = of_node_get(cpu_dev->of_node);
- if (!np) {
- pr_info("Failed to find cpu node. Use CPUFREQ_ETERNAL transition latency\n");
- return CPUFREQ_ETERNAL;
- }
-
- of_property_read_u32(np, "clock-latency", &transition_latency);
- of_node_put(np);
-
- pr_debug("%s: clock-latency: %d\n", __func__, transition_latency);
- return transition_latency;
-}
-
-static const struct cpufreq_arm_bL_ops dt_bL_ops = {
- .name = "dt-bl",
- .get_transition_latency = dt_get_transition_latency,
- .init_opp_table = dev_pm_opp_of_cpumask_add_table,
- .free_opp_table = dev_pm_opp_of_cpumask_remove_table,
-};
-
-static int generic_bL_probe(struct platform_device *pdev)
-{
- struct device_node *np;
-
- np = get_cpu_node_with_valid_op(0);
- if (!np)
- return -ENODEV;
-
- of_node_put(np);
- return bL_cpufreq_register(&dt_bL_ops);
-}
-
-static int generic_bL_remove(struct platform_device *pdev)
-{
- bL_cpufreq_unregister(&dt_bL_ops);
- return 0;
-}
-
-static struct platform_driver generic_bL_platdrv = {
- .driver = {
- .name = "arm-bL-cpufreq-dt",
- },
- .probe = generic_bL_probe,
- .remove = generic_bL_remove,
-};
-module_platform_driver(generic_bL_platdrv);
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
-MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 49c0abf2d48f..9578312e43f2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -386,16 +386,11 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
return cppc_perf.guaranteed_perf;
}
-#else
+#else /* CONFIG_ACPI_CPPC_LIB */
static void intel_pstate_set_itmt_prio(int cpu)
{
}
-
-static int intel_pstate_get_cppc_guranteed(int cpu)
-{
- return -ENOTSUPP;
-}
-#endif
+#endif /* CONFIG_ACPI_CPPC_LIB */
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
@@ -477,7 +472,7 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
acpi_processor_unregister_performance(policy->cpu);
}
-#else
+#else /* CONFIG_ACPI */
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
}
@@ -490,7 +485,14 @@ static inline bool intel_pstate_acpi_pm_profile_server(void)
{
return false;
}
-#endif
+#endif /* CONFIG_ACPI */
+
+#ifndef CONFIG_ACPI_CPPC_LIB
+static int intel_pstate_get_cppc_guranteed(int cpu)
+{
+ return -ENOTSUPP;
+}
+#endif /* CONFIG_ACPI_CPPC_LIB */
static inline void update_turbo_state(void)
{
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 71979605246e..61316fc51548 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -130,11 +130,6 @@ struct menu_device {
int interval_ptr;
};
-static inline int get_loadavg(unsigned long load)
-{
- return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
-}
-
static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
{
int bucket = 0;
@@ -168,18 +163,10 @@ static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters
* to be, the higher this multiplier, and thus the higher
* the barrier to go to an expensive C state.
*/
-static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
+static inline int performance_multiplier(unsigned long nr_iowaiters)
{
- int mult = 1;
-
- /* for higher loadavg, we are more reluctant */
-
- mult += 2 * get_loadavg(load);
-
- /* for IO wait tasks (per cpu!) we add 5x each */
- mult += 10 * nr_iowaiters;
-
- return mult;
+ /* for IO wait tasks (per cpu!) we add 10x each */
+ return 1 + 10 * nr_iowaiters;
}
static DEFINE_PER_CPU(struct menu_device, menu_devices);
@@ -297,7 +284,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int idx;
unsigned int interactivity_req;
unsigned int predicted_us;
- unsigned long nr_iowaiters, cpu_load;
+ unsigned long nr_iowaiters;
ktime_t delta_next;
if (data->needs_update) {
@@ -308,7 +295,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
/* determine the expected residency time, round up */
data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
- get_iowait_load(&nr_iowaiters, &cpu_load);
+ nr_iowaiters = nr_iowait_cpu(dev->cpu);
data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
@@ -352,7 +339,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* Use the performance multiplier and the user-configurable
* latency_req to determine the maximum exit latency.
*/
- interactivity_req = predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
+ interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
if (latency_req > interactivity_req)
latency_req = interactivity_req;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index df9467eef32a..41c9ccdd20d6 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -234,6 +234,7 @@ config EDAC_SKX
depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG
depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_SKX can't be y
select DMI
+ select ACPI_ADXL if ACPI
help
Support for error detection and correction the Intel
Skylake server Integrated Memory Controllers. If your
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index dd209e0dd9ab..a99ea61dad32 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -26,6 +26,7 @@
#include <linux/bitmap.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
+#include <linux/adxl.h>
#include <acpi/nfit.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -35,6 +36,7 @@
#include "edac_module.h"
#define EDAC_MOD_STR "skx_edac"
+#define MSG_SIZE 1024
/*
* Debug macros
@@ -54,6 +56,29 @@
static LIST_HEAD(skx_edac_list);
static u64 skx_tolm, skx_tohm;
+static char *skx_msg;
+static unsigned int nvdimm_count;
+
+enum {
+ INDEX_SOCKET,
+ INDEX_MEMCTRL,
+ INDEX_CHANNEL,
+ INDEX_DIMM,
+ INDEX_MAX
+};
+
+static const char * const component_names[] = {
+ [INDEX_SOCKET] = "ProcessorSocketId",
+ [INDEX_MEMCTRL] = "MemoryControllerId",
+ [INDEX_CHANNEL] = "ChannelId",
+ [INDEX_DIMM] = "DimmSlotId",
+};
+
+static int component_indices[ARRAY_SIZE(component_names)];
+static int adxl_component_count;
+static const char * const *adxl_component_names;
+static u64 *adxl_values;
+static char *adxl_msg;
#define NUM_IMC 2 /* memory controllers per socket */
#define NUM_CHANNELS 3 /* channels per memory controller */
@@ -393,6 +418,8 @@ static int get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
u16 flags;
u64 size = 0;
+ nvdimm_count++;
+
dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc,
imc->src_id, 0);
@@ -941,12 +968,46 @@ static void teardown_skx_debug(void)
}
#endif /*CONFIG_EDAC_DEBUG*/
+static bool skx_adxl_decode(struct decoded_addr *res)
+
+{
+ int i, len = 0;
+
+ if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
+ res->addr < BIT_ULL(32))) {
+ edac_dbg(0, "Address 0x%llx out of range\n", res->addr);
+ return false;
+ }
+
+ if (adxl_decode(res->addr, adxl_values)) {
+ edac_dbg(0, "Failed to decode 0x%llx\n", res->addr);
+ return false;
+ }
+
+ res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]];
+ res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
+ res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
+ res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
+
+ for (i = 0; i < adxl_component_count; i++) {
+ if (adxl_values[i] == ~0x0ull)
+ continue;
+
+ len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx",
+ adxl_component_names[i], adxl_values[i]);
+ if (MSG_SIZE - len <= 0)
+ break;
+ }
+
+ return true;
+}
+
static void skx_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m,
struct decoded_addr *res)
{
enum hw_event_mc_err_type tp_event;
- char *type, *optype, msg[256];
+ char *type, *optype;
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1007,22 +1068,47 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
break;
}
}
+ if (adxl_component_count) {
+ snprintf(skx_msg, MSG_SIZE, "%s%s err_code:%04x:%04x %s",
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable) ? " recoverable" : "",
+ mscod, errcode, adxl_msg);
+ } else {
+ snprintf(skx_msg, MSG_SIZE,
+ "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable) ? " recoverable" : "",
+ mscod, errcode,
+ res->socket, res->imc, res->rank,
+ res->bank_group, res->bank_address, res->row, res->column);
+ }
- snprintf(msg, sizeof(msg),
- "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
- overflow ? " OVERFLOW" : "",
- (uncorrected_error && recoverable) ? " recoverable" : "",
- mscod, errcode,
- res->socket, res->imc, res->rank,
- res->bank_group, res->bank_address, res->row, res->column);
-
- edac_dbg(0, "%s\n", msg);
+ edac_dbg(0, "%s\n", skx_msg);
/* Call the helper to output message */
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
res->channel, res->dimm, -1,
- optype, msg);
+ optype, skx_msg);
+}
+
+static struct mem_ctl_info *get_mci(int src_id, int lmc)
+{
+ struct skx_dev *d;
+
+ if (lmc > NUM_IMC - 1) {
+ skx_printk(KERN_ERR, "Bad lmc %d\n", lmc);
+ return NULL;
+ }
+
+ list_for_each_entry(d, &skx_edac_list, list) {
+ if (d->imc[0].src_id == src_id)
+ return d->imc[lmc].mci;
+ }
+
+ skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc);
+
+ return NULL;
}
static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
@@ -1040,10 +1126,23 @@ static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
return NOTIFY_DONE;
+ memset(&res, 0, sizeof(res));
res.addr = mce->addr;
- if (!skx_decode(&res))
+
+ if (adxl_component_count) {
+ if (!skx_adxl_decode(&res))
+ return NOTIFY_DONE;
+
+ mci = get_mci(res.socket, res.imc);
+ } else {
+ if (!skx_decode(&res))
+ return NOTIFY_DONE;
+
+ mci = res.dev->imc[res.imc].mci;
+ }
+
+ if (!mci)
return NOTIFY_DONE;
- mci = res.dev->imc[res.imc].mci;
if (mce->mcgstatus & MCG_STATUS_MCIP)
type = "Exception";
@@ -1094,6 +1193,62 @@ static void skx_remove(void)
}
}
+static void __init skx_adxl_get(void)
+{
+ const char * const *names;
+ int i, j;
+
+ names = adxl_get_component_names();
+ if (!names) {
+ skx_printk(KERN_NOTICE, "No firmware support for address translation.");
+ skx_printk(KERN_CONT, " Only decoding DDR4 address!\n");
+ return;
+ }
+
+ for (i = 0; i < INDEX_MAX; i++) {
+ for (j = 0; names[j]; j++) {
+ if (!strcmp(component_names[i], names[j])) {
+ component_indices[i] = j;
+ break;
+ }
+ }
+
+ if (!names[j])
+ goto err;
+ }
+
+ adxl_component_names = names;
+ while (*names++)
+ adxl_component_count++;
+
+ adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values),
+ GFP_KERNEL);
+ if (!adxl_values) {
+ adxl_component_count = 0;
+ return;
+ }
+
+ adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL);
+ if (!adxl_msg) {
+ adxl_component_count = 0;
+ kfree(adxl_values);
+ }
+
+ return;
+err:
+ skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ",
+ component_names[i]);
+ for (j = 0; names[j]; j++)
+ skx_printk(KERN_CONT, "%s ", names[j]);
+ skx_printk(KERN_CONT, "\n");
+}
+
+static void __exit skx_adxl_put(void)
+{
+ kfree(adxl_values);
+ kfree(adxl_msg);
+}
+
/*
* skx_init:
* make sure we are running on the correct cpu model
@@ -1158,6 +1313,15 @@ static int __init skx_init(void)
}
}
+ skx_msg = kzalloc(MSG_SIZE, GFP_KERNEL);
+ if (!skx_msg) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ if (nvdimm_count)
+ skx_adxl_get();
+
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1176,6 +1340,9 @@ static void __exit skx_exit(void)
edac_dbg(2, "\n");
mce_unregister_decode_chain(&skx_mce_dec);
skx_remove();
+ if (nvdimm_count)
+ skx_adxl_put();
+ kfree(skx_msg);
teardown_skx_debug();
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 7670e8dda829..7273e5082b41 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -145,34 +145,6 @@ config EFI_PCDP
See DIG64_HCDPv20_042804.pdf available from
<http://www.dig64.org/specifications/>
-config DELL_RBU
- tristate "BIOS update support for DELL systems via sysfs"
- depends on X86
- select FW_LOADER
- select FW_LOADER_USER_HELPER
- help
- Say m if you want to have the option of updating the BIOS for your
- DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
- supporting application to communicate with the BIOS regarding the new
- image for the image update to take effect.
- See <file:Documentation/dell_rbu.txt> for more details on the driver.
-
-config DCDBAS
- tristate "Dell Systems Management Base Driver"
- depends on X86
- help
- The Dell Systems Management Base Driver provides a sysfs interface
- for systems management software to perform System Management
- Interrupts (SMIs) and Host Control Actions (system power cycle or
- power off after OS shutdown) on certain Dell systems.
-
- See <file:Documentation/dcdbas.txt> for more details on the driver
- and the Dell systems on which Dell systems management software makes
- use of this driver.
-
- Say Y or M here to enable the driver for use by Dell systems
- management software such as Dell OpenManage.
-
config DMIID
bool "Export DMI identification via sysfs to userspace"
depends on DMI
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 13660a951437..3158dffd9914 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -11,8 +11,6 @@ obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o
obj-$(CONFIG_EFI_PCDP) += pcdp.o
-obj-$(CONFIG_DELL_RBU) += dell_rbu.o
-obj-$(CONFIG_DCDBAS) += dcdbas.o
obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f2483548cde9..099d83e4e910 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -5,7 +5,7 @@
#include <linux/ctype.h>
#include <linux/dmi.h>
#include <linux/efi.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/random.h>
#include <asm/dmi.h>
#include <asm/unaligned.h>
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 60a95719ecb8..ac1654f74dc7 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -20,7 +20,7 @@
#define pr_fmt(fmt) "apple-properties: " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/efi.h>
#include <linux/io.h>
#include <linux/platform_data/x86/apple.h>
@@ -235,7 +235,7 @@ static int __init map_properties(void)
*/
data->len = 0;
memunmap(data);
- free_bootmem_late(pa_data + sizeof(*data), data_len);
+ memblock_free_late(pa_data + sizeof(*data), data_len);
return ret;
}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 5fc70520e04c..fa2904fb841f 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -15,7 +15,7 @@
static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
{
- return memblock_alloc(size, 0);
+ return memblock_phys_alloc(size, SMP_CACHE_BYTES);
}
static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index 2224f1dc074b..72d9ea18270b 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -18,7 +18,7 @@
* GNU General Public License for more details.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/blkdev.h>
#include <linux/ctype.h>
#include <linux/device.h>
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 5de3ed29282c..d168c87c7d30 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -19,7 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/mm.h>
@@ -333,7 +333,8 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = memblock_virt_alloc(sizeof(struct firmware_map_entry), 0);
+ entry = memblock_alloc(sizeof(struct firmware_map_entry),
+ SMP_CACHE_BYTES);
if (WARN_ON(!entry))
return -ENOMEM;
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index ae861342626e..d92f5b87c251 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -638,7 +638,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
}
ffdc_iov.iov_base = ffdc;
ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
- iov_iter_kvec(&ffdc_iter, WRITE | ITER_KVEC, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
+ iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
cmd[0] = cpu_to_be32(2);
cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
@@ -735,7 +735,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
rbytes = (*resp_len) * sizeof(__be32);
resp_iov.iov_base = response;
resp_iov.iov_len = rbytes;
- iov_iter_kvec(&resp_iter, WRITE | ITER_KVEC, &resp_iov, 1, rbytes);
+ iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes);
/* Perform the command */
mutex_lock(&sbefifo->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 297a5490ad8c..0a4fba196b84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -135,7 +135,8 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
* 2. power off the acp tiles
* 3. check and enter ulv state
*/
- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
}
return 0;
@@ -517,7 +518,8 @@ static int acp_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = state == AMD_PG_STATE_GATE ? true : false;
- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e4dd09a5072..30bc345d6fdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1493,8 +1493,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
- if (amdgpu_sriov_vf(adev))
- adev->powerplay.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@ -1600,7 +1598,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
}
}
- if (adev->powerplay.pp_funcs->load_firmware) {
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
if (r) {
pr_err("firmware loading failed\n");
@@ -3341,7 +3339,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
kthread_park(ring->sched.thread);
- if (job && job->base.sched == &ring->sched)
+ if (job && job->base.sched != &ring->sched)
continue;
drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 28781414d71c..943dbf3c5da1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -114,8 +114,8 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-/* OverDrive(bit 14) disabled by default*/
-uint amdgpu_pp_feature_mask = 0xffffbfff;
+/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xfffd3fff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 790fd5408ddf..1a656b8657f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -392,7 +392,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK))
return;
- if (!adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 94055a485e01..59cc678de8c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -704,7 +704,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
return ret;
if (adev->powerplay.pp_funcs->force_clock_level)
- amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+ ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+
+ if (ret)
+ return -EINVAL;
return count;
}
@@ -737,7 +740,10 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
return ret;
if (adev->powerplay.pp_funcs->force_clock_level)
- amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+ ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+
+ if (ret)
+ return -EINVAL;
return count;
}
@@ -770,7 +776,10 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
return ret;
if (adev->powerplay.pp_funcs->force_clock_level)
- amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+ ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+
+ if (ret)
+ return -EINVAL;
return count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6904d794d60a..352b30409060 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -542,7 +542,8 @@ static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
struct amdgpu_vm_pt_cursor *cursor)
{
amdgpu_vm_pt_next(adev, cursor);
- while (amdgpu_vm_pt_descendant(adev, cursor));
+ if (cursor->pfn != ~0ll)
+ while (amdgpu_vm_pt_descendant(adev, cursor));
}
/**
@@ -3234,8 +3235,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
rbtree_postorder_for_each_entry_safe(mapping, tmp,
&vm->va.rb_root, rb) {
+ /* Don't remove the mapping here, we don't want to trigger a
+ * rebalance and the tree is about to be destroyed anyway.
+ */
list_del(&mapping->list);
- amdgpu_vm_it_remove(mapping, &vm->va);
kfree(mapping);
}
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 3d0f277a6523..617b0c8908a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4815,8 +4815,10 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
if (r)
goto done;
- /* Test KCQs */
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ /* Test KCQs - reversing the order of rings seems to fix ring test failure
+ * after GPU reset
+ */
+ for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
ring = &adev->gfx.compute_ring[i];
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 14649f8475f3..fd23ba1226a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -280,7 +280,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
return;
if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 04fa3d972636..7a8c9172d30a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1366,7 +1366,8 @@ static int sdma_v4_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
sdma_v4_0_init_golden_registers(adev);
@@ -1386,7 +1387,8 @@ static int sdma_v4_0_hw_fini(void *handle)
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
- if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs
+ && adev->powerplay.pp_funcs->set_powergating_by_smu)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e224f23e2215..b0df6dc9a775 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1524,6 +1524,13 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
+ /*
+ * PWM interperts 0 as 100% rather than 0% because of HW
+ * limitation for level 0.So limiting minimum brightness level
+ * to 1.
+ */
+ if (bd->props.brightness < 1)
+ return 1;
if (dc_link_set_backlight_level(dm->backlight_link,
bd->props.brightness, 0, 0))
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 0fab64a2a915..12001a006b2d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -101,7 +101,7 @@ bool dm_pp_apply_display_requirements(
adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
}
- if (adev->powerplay.pp_funcs->display_configuration_change)
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
@@ -304,7 +304,7 @@ bool dm_pp_get_clock_levels_by_type(
struct amd_pp_simple_clock_info validation_clks = { 0 };
uint32_t i;
- if (adev->powerplay.pp_funcs->get_clock_by_type) {
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
dc_to_pp_clock_type(clk_type), &pp_clks)) {
/* Error in pplib. Provide default values. */
@@ -315,7 +315,7 @@ bool dm_pp_get_clock_levels_by_type(
pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
- if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
pp_handle, &validation_clks)) {
/* Error in pplib. Provide default values. */
@@ -398,6 +398,9 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
struct pp_clock_levels_with_voltage pp_clk_info = {0};
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
+ return false;
+
if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
dc_to_pp_clock_type(clk_type),
&pp_clk_info))
@@ -438,7 +441,7 @@ bool dm_pp_apply_clock_for_voltage_request(
if (!pp_clock_request.clock_type)
return false;
- if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
adev->powerplay.pp_handle,
&pp_clock_request);
@@ -455,7 +458,7 @@ bool dm_pp_get_static_clocks(
struct amd_pp_clock_info pp_clk_info = {0};
int ret = 0;
- if (adev->powerplay.pp_funcs->get_current_clocks)
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
ret = adev->powerplay.pp_funcs->get_current_clocks(
adev->powerplay.pp_handle,
&pp_clk_info);
@@ -505,6 +508,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+ if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges)
+ return;
+
for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
if (ranges->reader_wm_sets[i].wm_inst > 3)
wm_dce_clocks[i].wm_set_id = WM_SET_A;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index de190935f0a4..e3624ca24574 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -568,7 +568,7 @@ static struct input_pixel_processor *dce110_ipp_create(
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
- .max_hdmi_pixel_clock = 594000,
+ .max_hdmi_pixel_clock = 300000,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true
};
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index a407892905af..c0d9f332baed 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -40,8 +40,6 @@
#define LITTLEENDIAN_CPU
#endif
-#undef READ
-#undef WRITE
#undef FRAME_SIZE
#define dm_output_to_console(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index e8964cae6b93..d6aa1d414320 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -723,11 +723,14 @@ static int pp_dpm_force_clock_level(void *handle,
pr_info("%s was not implemented.\n", __func__);
return 0;
}
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_info("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
mutex_lock(&hwmgr->smu_lock);
- if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
- ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
- else
- ret = -EINVAL;
+ ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -963,6 +966,7 @@ static int pp_dpm_switch_power_profile(void *handle,
static int pp_set_power_limit(void *handle, uint32_t limit)
{
struct pp_hwmgr *hwmgr = handle;
+ uint32_t max_power_limit;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -975,7 +979,13 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
if (limit == 0)
limit = hwmgr->default_power_limit;
- if (limit > hwmgr->default_power_limit)
+ max_power_limit = hwmgr->default_power_limit;
+ if (hwmgr->od_enabled) {
+ max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
+ max_power_limit /= 100;
+ }
+
+ if (limit > max_power_limit)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -994,8 +1004,13 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
mutex_lock(&hwmgr->smu_lock);
- if (default_limit)
+ if (default_limit) {
*limit = hwmgr->default_power_limit;
+ if (hwmgr->od_enabled) {
+ *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
+ *limit /= 100;
+ }
+ }
else
*limit = hwmgr->power_limit;
@@ -1303,12 +1318,12 @@ static int pp_enable_mgpu_fan_boost(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en)
+ if (!hwmgr)
return -EINVAL;
- if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) {
+ if (!hwmgr->pm_en ||
+ hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
return 0;
- }
mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 6c99cbf51c08..ed35ec0341e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3588,9 +3588,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
break;
}
- if (i >= sclk_table->count)
+ if (i >= sclk_table->count) {
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
+ sclk_table->dpm_levels[i-1].value = sclk;
+ } else {
/* TODO: Check SCLK in DAL's minimum clocks
* in case DeepSleep divider update is required.
*/
@@ -3605,9 +3606,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
break;
}
- if (i >= mclk_table->count)
+ if (i >= mclk_table->count) {
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 4714b5b59825..99a33c33a32c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -718,7 +718,7 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
- 100);
+ 1000);
table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 419a1d77d661..8c4db86bb4b7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -1333,7 +1333,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
dpm_table->dpm_levels[dpm_table->count-1].value;
-
vega10_init_dpm_state(&(dpm_table->dpm_state));
data->dpm_table.eclk_table.count = 0;
@@ -3249,6 +3248,37 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
{
struct vega10_hwmgr *data = hwmgr->backend;
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ const struct vega10_power_state *vega10_ps =
+ cast_const_phw_vega10_power_state(states->pnew_state);
+ struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
+ uint32_t sclk = vega10_ps->performance_levels
+ [vega10_ps->performance_level_count - 1].gfx_clock;
+ struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+ uint32_t mclk = vega10_ps->performance_levels
+ [vega10_ps->performance_level_count - 1].mem_clock;
+ uint32_t i;
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= sclk_table->count) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ sclk_table->dpm_levels[i-1].value = sclk;
+ }
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (mclk == mclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= mclk_table->count) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
@@ -4529,11 +4559,13 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
if (vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock >
- hwmgr->platform_descriptor.overdriveLimit.engineClock)
+ hwmgr->platform_descriptor.overdriveLimit.engineClock) {
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock =
hwmgr->platform_descriptor.overdriveLimit.engineClock;
-
+ pr_warn("max sclk supported by vbios is %d\n",
+ hwmgr->platform_descriptor.overdriveLimit.engineClock);
+ }
return 0;
}
@@ -4581,10 +4613,13 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
if (vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock >
- hwmgr->platform_descriptor.overdriveLimit.memoryClock)
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock) {
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock =
hwmgr->platform_descriptor.overdriveLimit.memoryClock;
+ pr_warn("max mclk supported by vbios is %d\n",
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 9600e2f226e9..74bc37308dc0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2356,6 +2356,13 @@ static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
return vega12_disable_gfx_off(hwmgr);
}
+static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+ PHM_PerformanceLevelDesignation designation, uint32_t index,
+ PHM_PerformanceLevel *level)
+{
+ return 0;
+}
+
static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.backend_init = vega12_hwmgr_backend_init,
.backend_fini = vega12_hwmgr_backend_fini,
@@ -2406,6 +2413,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.register_irq_handlers = smu9_register_irq_handlers,
.start_thermal_controller = vega12_start_thermal_controller,
.powergate_gfx = vega12_gfx_off_control,
+ .get_performance_level = vega12_get_performance_level,
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index b4dbbb7c334c..57143d51e3ee 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -1875,38 +1875,20 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
return ret;
}
-static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
+static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
+ PPCLK_e clk_id, uint32_t *clk_freq)
{
- uint32_t gfx_clk = 0;
int ret = 0;
- *gfx_freq = 0;
+ *clk_freq = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0,
- "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
+ PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0,
+ "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
return ret);
- gfx_clk = smum_get_argument(hwmgr);
+ *clk_freq = smum_get_argument(hwmgr);
- *gfx_freq = gfx_clk * 100;
-
- return 0;
-}
-
-static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
-{
- uint32_t mem_clk = 0;
- int ret = 0;
-
- *mclk_freq = 0;
-
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0,
- "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
- return ret);
- mem_clk = smum_get_argument(hwmgr);
-
- *mclk_freq = mem_clk * 100;
+ *clk_freq = *clk_freq * 100;
return 0;
}
@@ -1937,12 +1919,16 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = vega20_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
+ ret = vega20_get_current_clk_freq(hwmgr,
+ PPCLK_GFXCLK,
+ (uint32_t *)value);
if (!ret)
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = vega20_get_current_mclk_freq(hwmgr, (uint32_t *)value);
+ ret = vega20_get_current_clk_freq(hwmgr,
+ PPCLK_UCLK,
+ (uint32_t *)value);
if (!ret)
*size = 4;
break;
@@ -2012,7 +1998,6 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
switch (clk_type) {
case amd_pp_dcef_clock:
- clk_freq = clock_req->clock_freq_in_khz / 100;
clk_select = PPCLK_DCEFCLK;
break;
case amd_pp_disp_clock:
@@ -2041,11 +2026,20 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
return result;
}
+static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+ PHM_PerformanceLevelDesignation designation, uint32_t index,
+ PHM_PerformanceLevel *level)
+{
+ return 0;
+}
+
static int vega20_notify_smc_display_config_after_ps_adjustment(
struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
+ struct vega20_single_dpm_table *dpm_table =
+ &data->dpm_table.mem_table;
struct PP_Clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
int ret = 0;
@@ -2063,7 +2057,7 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
clock_req.clock_type = amd_pp_dcef_clock;
- clock_req.clock_freq_in_khz = min_clocks.dcefClock;
+ clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10;
if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
if (data->smu_features[GNLD_DS_DCEFCLK].supported)
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
@@ -2076,6 +2070,15 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
}
}
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ "[SetHardMinFreq] Set hard min uclk failed!",
+ return ret);
+ }
+
return 0;
}
@@ -2353,7 +2356,7 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
for (i = 0; i < count; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -2383,7 +2386,7 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
for (i = 0; i < count; i++) {
clocks->data[i].clocks_in_khz =
data->mclk_latency_table.entries[i].frequency =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us =
data->mclk_latency_table.entries[i].latency =
vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
@@ -2408,7 +2411,7 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
for (i = 0; i < count; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -2431,7 +2434,7 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
for (i = 0; i < count; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -2582,11 +2585,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- if (input_clk < clocks.data[0].clocks_in_khz / 100 ||
+ if (input_clk < clocks.data[0].clocks_in_khz / 1000 ||
input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
pr_info("clock freq %d is not within allowed range [%d - %d]\n",
input_clk,
- clocks.data[0].clocks_in_khz / 100,
+ clocks.data[0].clocks_in_khz / 1000,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
return -EINVAL;
}
@@ -2726,7 +2729,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- ret = vega20_get_current_gfx_clk_freq(hwmgr, &now);
+ ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
PP_ASSERT_WITH_CODE(!ret,
"Attempt to get current gfx clk Failed!",
return ret);
@@ -2738,12 +2741,12 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
+ i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now) ? "*" : "");
break;
case PP_MCLK:
- ret = vega20_get_current_mclk_freq(hwmgr, &now);
+ ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now);
PP_ASSERT_WITH_CODE(!ret,
"Attempt to get current mclk freq Failed!",
return ret);
@@ -2755,7 +2758,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
+ i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now) ? "*" : "");
break;
@@ -2820,7 +2823,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
- clocks.data[0].clocks_in_khz / 100,
+ clocks.data[0].clocks_in_khz / 1000,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
}
@@ -3476,6 +3479,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
vega20_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request =
vega20_display_clock_voltage_request,
+ .get_performance_level =
+ vega20_get_performance_level,
/* UMD pstate, profile related */
.force_dpm_level =
vega20_dpm_force_dpm_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
index e5f7f8230065..97f8a1a970c3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
@@ -642,8 +642,14 @@ static int check_powerplay_tables(
"Unsupported PPTable format!", return -1);
PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0,
"Invalid PowerPlay Table!", return -1);
- PP_ASSERT_WITH_CODE(powerplay_table->smcPPTable.Version == PPTABLE_V20_SMU_VERSION,
- "Unmatch PPTable version, vbios update may be needed!", return -1);
+
+ if (powerplay_table->smcPPTable.Version != PPTABLE_V20_SMU_VERSION) {
+ pr_info("Unmatch PPTable version: "
+ "pptable from VBIOS is V%d while driver supported is V%d!",
+ powerplay_table->smcPPTable.Version,
+ PPTABLE_V20_SMU_VERSION);
+ return -EINVAL;
+ }
//dump_pptable(&powerplay_table->smcPPTable);
@@ -716,10 +722,6 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
"[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!",
return -1);
- memset(ppsmc_pptable->Padding32,
- 0,
- sizeof(struct atom_smc_dpm_info_v4_4) -
- sizeof(struct atom_common_table_header));
ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
@@ -778,22 +780,19 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
- if ((smc_dpm_table->table_header.format_revision == 4) &&
- (smc_dpm_table->table_header.content_revision == 4)) {
- for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
- ppsmc_pptable->I2cControllers[i].Enabled =
- smc_dpm_table->i2ccontrollers[i].enabled;
- ppsmc_pptable->I2cControllers[i].SlaveAddress =
- smc_dpm_table->i2ccontrollers[i].slaveaddress;
- ppsmc_pptable->I2cControllers[i].ControllerPort =
- smc_dpm_table->i2ccontrollers[i].controllerport;
- ppsmc_pptable->I2cControllers[i].ThermalThrottler =
- smc_dpm_table->i2ccontrollers[i].thermalthrottler;
- ppsmc_pptable->I2cControllers[i].I2cProtocol =
- smc_dpm_table->i2ccontrollers[i].i2cprotocol;
- ppsmc_pptable->I2cControllers[i].I2cSpeed =
- smc_dpm_table->i2ccontrollers[i].i2cspeed;
- }
+ for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
+ ppsmc_pptable->I2cControllers[i].Enabled =
+ smc_dpm_table->i2ccontrollers[i].enabled;
+ ppsmc_pptable->I2cControllers[i].SlaveAddress =
+ smc_dpm_table->i2ccontrollers[i].slaveaddress;
+ ppsmc_pptable->I2cControllers[i].ControllerPort =
+ smc_dpm_table->i2ccontrollers[i].controllerport;
+ ppsmc_pptable->I2cControllers[i].ThermalThrottler =
+ smc_dpm_table->i2ccontrollers[i].thermalthrottler;
+ ppsmc_pptable->I2cControllers[i].I2cProtocol =
+ smc_dpm_table->i2ccontrollers[i].i2cprotocol;
+ ppsmc_pptable->I2cControllers[i].I2cSpeed =
+ smc_dpm_table->i2ccontrollers[i].i2cspeed;
}
return 0;
@@ -882,15 +881,10 @@ static int init_powerplay_table_information(
if (pptable_information->smc_pptable == NULL)
return -ENOMEM;
- if (powerplay_table->smcPPTable.Version <= 2)
- memcpy(pptable_information->smc_pptable,
- &(powerplay_table->smcPPTable),
- sizeof(PPTable_t) -
- sizeof(I2cControllerConfig_t) * I2C_CONTROLLER_NAME_COUNT);
- else
- memcpy(pptable_information->smc_pptable,
- &(powerplay_table->smcPPTable),
- sizeof(PPTable_t));
+ memcpy(pptable_information->smc_pptable,
+ &(powerplay_table->smcPPTable),
+ sizeof(PPTable_t));
+
result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 2998a49960ed..63d5cf691549 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -29,7 +29,7 @@
// any structure is changed in this file
#define SMU11_DRIVER_IF_VERSION 0x12
-#define PPTABLE_V20_SMU_VERSION 2
+#define PPTABLE_V20_SMU_VERSION 3
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_VCLK_DPM_LEVELS 8
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index f836d30fdd44..09b844ec3eab 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -71,7 +71,11 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
if (result != 0) {
+ /* Read the last message to SMU, to report actual cause */
+ uint32_t val = cgs_read_register(hwmgr->device,
+ mmSMU_MP1_SRBM2P_MSG_0);
pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
+ pr_err("SMU still servicing msg (0x%04x)\n", val);
return result;
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index f8a931cf3665..680566d97adc 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -458,18 +458,6 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
unsigned int val;
int ret;
- /*
- * FIXME:
- * This 70ms was found necessary by experimentation. If it's not
- * present, link training fails. It seems like it can go anywhere from
- * pre_enable() up to semi-auto link training initiation below.
- *
- * Neither the datasheet for the bridge nor the panel tested mention a
- * delay of this magnitude in the timing requirements. So for now, add
- * the mystery delay until someone figures out a better fix.
- */
- msleep(70);
-
/* DSI_A lane config */
val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
@@ -536,7 +524,22 @@ static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge)
/* configure bridge ref_clk */
ti_sn_bridge_set_refclk_freq(pdata);
- /* in case drm_panel is connected then HPD is not supported */
+ /*
+ * HPD on this bridge chip is a bit useless. This is an eDP bridge
+ * so the HPD is an internal signal that's only there to signal that
+ * the panel is done powering up. ...but the bridge chip debounces
+ * this signal by between 100 ms and 400 ms (depending on process,
+ * voltage, and temperate--I measured it at about 200 ms). One
+ * particular panel asserted HPD 84 ms after it was powered on meaning
+ * that we saw HPD 284 ms after power on. ...but the same panel said
+ * that instead of looking at HPD you could just hardcode a delay of
+ * 200 ms. We'll assume that the panel driver will have the hardcoded
+ * delay in its prepare and always disable HPD.
+ *
+ * If HPD somehow makes sense on some future panel we'll have to
+ * change this to be conditional on someone specifying that HPD should
+ * be used.
+ */
regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
HPD_DISABLE);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 701cb334e1ea..d8b526b7932c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -308,6 +308,26 @@ update_connector_routing(struct drm_atomic_state *state,
return 0;
}
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_connector_state->crtc);
+ /*
+ * For compatibility with legacy users, we want to make sure that
+ * we allow DPMS On->Off modesets on unregistered connectors. Modesets
+ * which would result in anything else must be considered invalid, to
+ * avoid turning on new displays on dead connectors.
+ *
+ * Since the connector can be unregistered at any point during an
+ * atomic check or commit, this is racy. But that's OK: all we care
+ * about is ensuring that userspace can't do anything but shut off the
+ * display on a connector that was destroyed after its been notified,
+ * not before.
+ */
+ if (drm_connector_is_unregistered(connector) && crtc_state->active) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
funcs = connector->helper_private;
if (funcs->atomic_best_encoder)
@@ -352,7 +372,6 @@ update_connector_routing(struct drm_atomic_state *state,
set_best_encoder(state, new_connector_state, new_encoder);
- crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
crtc_state->connectors_changed = true;
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 1e40e5decbe9..4943cef178be 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -379,7 +379,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
/* The connector should have been removed from userspace long before
* it is finally destroyed.
*/
- if (WARN_ON(connector->registered))
+ if (WARN_ON(connector->registration_state ==
+ DRM_CONNECTOR_REGISTERED))
drm_connector_unregister(connector);
if (connector->tile_group) {
@@ -436,7 +437,7 @@ int drm_connector_register(struct drm_connector *connector)
return 0;
mutex_lock(&connector->mutex);
- if (connector->registered)
+ if (connector->registration_state != DRM_CONNECTOR_INITIALIZING)
goto unlock;
ret = drm_sysfs_connector_add(connector);
@@ -456,7 +457,7 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
- connector->registered = true;
+ connector->registration_state = DRM_CONNECTOR_REGISTERED;
goto unlock;
err_debugfs:
@@ -478,7 +479,7 @@ EXPORT_SYMBOL(drm_connector_register);
void drm_connector_unregister(struct drm_connector *connector)
{
mutex_lock(&connector->mutex);
- if (!connector->registered) {
+ if (connector->registration_state != DRM_CONNECTOR_REGISTERED) {
mutex_unlock(&connector->mutex);
return;
}
@@ -489,7 +490,7 @@ void drm_connector_unregister(struct drm_connector *connector)
drm_sysfs_connector_remove(connector);
drm_debugfs_connector_remove(connector);
- connector->registered = false;
+ connector->registration_state = DRM_CONNECTOR_UNREGISTERED;
mutex_unlock(&connector->mutex);
}
EXPORT_SYMBOL(drm_connector_unregister);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ff0bfc65a8c1..b506e3622b08 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -122,6 +122,9 @@ static const struct edid_quirk {
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
{ "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
+ /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
+ { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 3fae4dab295f..13f9b56a9ce7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -5102,19 +5102,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
*/
status = connector_status_disconnected;
goto out;
- } else {
- /*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
+ }
+
+ /*
+ * Some external monitors do not signal loss of link synchronization
+ * with an IRQ_HPD, so force a link status check.
+ */
+ if (!intel_dp_is_edp(intel_dp)) {
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
intel_dp_retrain_link(encoder, ctx);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7f155b4f1a7d..1b00f8ea145b 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -77,7 +77,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->pbn = mst_pbn;
/* Zombie connectors can't have VCPI slots */
- if (READ_ONCE(connector->registered)) {
+ if (!drm_connector_is_unregistered(connector)) {
slots = drm_dp_atomic_find_vcpi_slots(state,
&intel_dp->mst_mgr,
port,
@@ -313,7 +313,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (!READ_ONCE(connector->registered))
+ if (drm_connector_is_unregistered(connector))
return intel_connector_update_modes(connector, NULL);
edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
@@ -329,7 +329,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!READ_ONCE(connector->registered))
+ if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
intel_connector->port);
@@ -372,7 +372,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
int bpp = 24; /* MST uses fixed bpp */
int max_rate, mode_rate, max_lanes, max_link_clock;
- if (!READ_ONCE(connector->registered))
+ if (drm_connector_is_unregistered(connector))
return MODE_ERROR;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6bb78076b5b5..6cbbae3f438b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -881,22 +881,16 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
{
struct nv50_head *head = nv50_head(connector_state->crtc);
struct nv50_mstc *mstc = nv50_mstc(connector);
- if (mstc->port) {
- struct nv50_mstm *mstm = mstc->mstm;
- return &mstm->msto[head->base.index]->encoder;
- }
- return NULL;
+
+ return &mstc->mstm->msto[head->base.index]->encoder;
}
static struct drm_encoder *
nv50_mstc_best_encoder(struct drm_connector *connector)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
- if (mstc->port) {
- struct nv50_mstm *mstm = mstc->mstm;
- return &mstm->msto[0]->encoder;
- }
- return NULL;
+
+ return &mstc->mstm->msto[0]->encoder;
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 97964f7f2ace..a04ffb3b2174 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -56,6 +56,8 @@ struct panel_desc {
/**
* @prepare: the time (in milliseconds) that it takes for the panel to
* become ready and start receiving video data
+ * @hpd_absent_delay: Add this to the prepare delay if we know Hot
+ * Plug Detect isn't used.
* @enable: the time (in milliseconds) that it takes for the panel to
* display the first valid frame after starting to receive
* video data
@@ -66,6 +68,7 @@ struct panel_desc {
*/
struct {
unsigned int prepare;
+ unsigned int hpd_absent_delay;
unsigned int enable;
unsigned int disable;
unsigned int unprepare;
@@ -79,6 +82,7 @@ struct panel_simple {
struct drm_panel base;
bool prepared;
bool enabled;
+ bool no_hpd;
const struct panel_desc *desc;
@@ -202,6 +206,7 @@ static int panel_simple_unprepare(struct drm_panel *panel)
static int panel_simple_prepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
+ unsigned int delay;
int err;
if (p->prepared)
@@ -215,8 +220,11 @@ static int panel_simple_prepare(struct drm_panel *panel)
gpiod_set_value_cansleep(p->enable_gpio, 1);
- if (p->desc->delay.prepare)
- msleep(p->desc->delay.prepare);
+ delay = p->desc->delay.prepare;
+ if (p->no_hpd)
+ delay += p->desc->delay.hpd_absent_delay;
+ if (delay)
+ msleep(delay);
p->prepared = true;
@@ -305,6 +313,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->prepared = false;
panel->desc = desc;
+ panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
return PTR_ERR(panel->supply);
@@ -1363,7 +1373,7 @@ static const struct panel_desc innolux_n156bge_l21 = {
},
};
-static const struct drm_display_mode innolux_tv123wam_mode = {
+static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
.clock = 206016,
.hdisplay = 2160,
.hsync_start = 2160 + 48,
@@ -1377,15 +1387,16 @@ static const struct drm_display_mode innolux_tv123wam_mode = {
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
-static const struct panel_desc innolux_tv123wam = {
- .modes = &innolux_tv123wam_mode,
+static const struct panel_desc innolux_p120zdg_bf1 = {
+ .modes = &innolux_p120zdg_bf1_mode,
.num_modes = 1,
.bpc = 8,
.size = {
- .width = 259,
- .height = 173,
+ .width = 254,
+ .height = 169,
},
.delay = {
+ .hpd_absent_delay = 200,
.unprepare = 500,
},
};
@@ -2445,8 +2456,8 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
- .compatible = "innolux,tv123wam",
- .data = &innolux_tv123wam,
+ .compatible = "innolux,p120zdg-bf1",
+ .data = &innolux_p120zdg_bf1,
}, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 5ed319e3b084..41e9935fc584 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -149,6 +149,7 @@ config HID_APPLEIR
config HID_ASUS
tristate "Asus"
depends on LEDS_CLASS
+ depends on ASUS_WMI || ASUS_WMI=n
---help---
Support for Asus notebook built-in keyboard and touchpad via i2c, and
the Asus Republic of Gamers laptop keyboard special keys.
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 88a5672f42cd..dc6d6477e961 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -29,6 +29,7 @@
#include <linux/dmi.h>
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/platform_data/x86/asus-wmi.h>
#include <linux/input/mt.h>
#include <linux/usb.h> /* For to_usb_interface for T100 touchpad intf check */
@@ -349,6 +350,24 @@ static void asus_kbd_backlight_work(struct work_struct *work)
hid_err(led->hdev, "Asus failed to set keyboard backlight: %d\n", ret);
}
+/* WMI-based keyboard backlight LED control (via asus-wmi driver) takes
+ * precedence. We only activate HID-based backlight control when the
+ * WMI control is not available.
+ */
+static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
+{
+ u32 value;
+ int ret;
+
+ ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2,
+ ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value);
+ hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value);
+ if (ret)
+ return false;
+
+ return !!(value & ASUS_WMI_DSTS_PRESENCE_BIT);
+}
+
static int asus_kbd_register_leds(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
@@ -436,7 +455,9 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
drvdata->input = input;
- if (drvdata->enable_backlight && asus_kbd_register_leds(hdev))
+ if (drvdata->enable_backlight &&
+ !asus_kbd_wmi_led_control_present(hdev) &&
+ asus_kbd_register_leds(hdev))
hid_warn(hdev, "Failed to initialize backlight.\n");
return 0;
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index 9ca1feaba98f..33da07d64494 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -3,141 +3,15 @@
* Intel BayTrail PMIC I2C bus semaphore implementaion
* Copyright (c) 2014, Intel Corporation.
*/
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/pm_qos.h>
#include <asm/iosf_mbi.h>
#include "i2c-designware-core.h"
-#define SEMAPHORE_TIMEOUT 500
-#define PUNIT_SEMAPHORE 0x7
-#define PUNIT_SEMAPHORE_CHT 0x10e
-#define PUNIT_SEMAPHORE_BIT BIT(0)
-#define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
-
-static unsigned long acquired;
-
-static u32 get_sem_addr(struct dw_i2c_dev *dev)
-{
- if (dev->flags & MODEL_CHERRYTRAIL)
- return PUNIT_SEMAPHORE_CHT;
- else
- return PUNIT_SEMAPHORE;
-}
-
-static int get_sem(struct dw_i2c_dev *dev, u32 *sem)
-{
- u32 addr = get_sem_addr(dev);
- u32 data;
- int ret;
-
- ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, addr, &data);
- if (ret) {
- dev_err(dev->dev, "iosf failed to read punit semaphore\n");
- return ret;
- }
-
- *sem = data & PUNIT_SEMAPHORE_BIT;
-
- return 0;
-}
-
-static void reset_semaphore(struct dw_i2c_dev *dev)
-{
- if (iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, get_sem_addr(dev),
- 0, PUNIT_SEMAPHORE_BIT))
- dev_err(dev->dev, "iosf failed to reset punit semaphore during write\n");
-
- pm_qos_update_request(&dev->pm_qos, PM_QOS_DEFAULT_VALUE);
-
- iosf_mbi_call_pmic_bus_access_notifier_chain(MBI_PMIC_BUS_ACCESS_END,
- NULL);
- iosf_mbi_punit_release();
-}
-
-static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
-{
- u32 addr;
- u32 sem = PUNIT_SEMAPHORE_ACQUIRE;
- int ret;
- unsigned long start, end;
-
- might_sleep();
-
- if (!dev || !dev->dev)
- return -ENODEV;
-
- if (!dev->release_lock)
- return 0;
-
- iosf_mbi_punit_acquire();
- iosf_mbi_call_pmic_bus_access_notifier_chain(MBI_PMIC_BUS_ACCESS_BEGIN,
- NULL);
-
- /*
- * Disallow the CPU to enter C6 or C7 state, entering these states
- * requires the punit to talk to the pmic and if this happens while
- * we're holding the semaphore, the SoC hangs.
- */
- pm_qos_update_request(&dev->pm_qos, 0);
-
- addr = get_sem_addr(dev);
-
- /* host driver writes to side band semaphore register */
- ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, addr, sem);
- if (ret) {
- dev_err(dev->dev, "iosf punit semaphore request failed\n");
- goto out;
- }
-
- /* host driver waits for bit 0 to be set in semaphore register */
- start = jiffies;
- end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
- do {
- ret = get_sem(dev, &sem);
- if (!ret && sem) {
- acquired = jiffies;
- dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
- jiffies_to_msecs(jiffies - start));
- return 0;
- }
-
- usleep_range(1000, 2000);
- } while (time_before(jiffies, end));
-
- dev_err(dev->dev, "punit semaphore timed out, resetting\n");
-out:
- reset_semaphore(dev);
-
- ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, addr, &sem);
- if (ret)
- dev_err(dev->dev, "iosf failed to read punit semaphore\n");
- else
- dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
-
- WARN_ON(1);
-
- return -ETIMEDOUT;
-}
-
-static void baytrail_i2c_release(struct dw_i2c_dev *dev)
-{
- if (!dev || !dev->dev)
- return;
-
- if (!dev->acquire_lock)
- return;
-
- reset_semaphore(dev);
- dev_dbg(dev->dev, "punit semaphore held for %ums\n",
- jiffies_to_msecs(jiffies - acquired));
-}
-
int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
{
acpi_status status;
@@ -162,18 +36,9 @@ int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
return -EPROBE_DEFER;
dev_info(dev->dev, "I2C bus managed by PUNIT\n");
- dev->acquire_lock = baytrail_i2c_acquire;
- dev->release_lock = baytrail_i2c_release;
+ dev->acquire_lock = iosf_mbi_block_punit_i2c_access;
+ dev->release_lock = iosf_mbi_unblock_punit_i2c_access;
dev->shared_with_punit = true;
- pm_qos_add_request(&dev->pm_qos, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
return 0;
}
-
-void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev)
-{
- if (dev->acquire_lock)
- pm_qos_remove_request(&dev->pm_qos);
-}
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 36271cd75342..a4730111d290 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -269,7 +269,7 @@ int i2c_dw_acquire_lock(struct dw_i2c_dev *dev)
if (!dev->acquire_lock)
return 0;
- ret = dev->acquire_lock(dev);
+ ret = dev->acquire_lock();
if (!ret)
return 0;
@@ -281,7 +281,7 @@ int i2c_dw_acquire_lock(struct dw_i2c_dev *dev)
void i2c_dw_release_lock(struct dw_i2c_dev *dev)
{
if (dev->release_lock)
- dev->release_lock(dev);
+ dev->release_lock();
}
/*
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 9ec8394f4787..b4a0b2b99a78 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -10,7 +10,6 @@
*/
#include <linux/i2c.h>
-#include <linux/pm_qos.h>
#define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
I2C_FUNC_SMBUS_BYTE | \
@@ -209,7 +208,6 @@
* @fp_lcnt: fast plus LCNT value
* @hs_hcnt: high speed HCNT value
* @hs_lcnt: high speed LCNT value
- * @pm_qos: pm_qos_request used while holding a hardware lock on the bus
* @acquire_lock: function to acquire a hardware lock on the bus
* @release_lock: function to release a hardware lock on the bus
* @shared_with_punit: true if this bus is shared with the SoCs PUNIT
@@ -263,9 +261,8 @@ struct dw_i2c_dev {
u16 fp_lcnt;
u16 hs_hcnt;
u16 hs_lcnt;
- struct pm_qos_request pm_qos;
- int (*acquire_lock)(struct dw_i2c_dev *dev);
- void (*release_lock)(struct dw_i2c_dev *dev);
+ int (*acquire_lock)(void);
+ void (*release_lock)(void);
bool shared_with_punit;
void (*disable)(struct dw_i2c_dev *dev);
void (*disable_int)(struct dw_i2c_dev *dev);
@@ -322,8 +319,6 @@ static inline int i2c_dw_probe_slave(struct dw_i2c_dev *dev) { return -EINVAL; }
#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
extern int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev);
-extern void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev);
#else
static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0; }
-static inline void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) {}
#endif
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 997bbb3d925f..9eaac3be1f63 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -418,8 +418,6 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
if (!IS_ERR_OR_NULL(dev->rst))
reset_control_assert(dev->rst);
- i2c_dw_remove_lock_support(dev);
-
return 0;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index dc78aa7369de..28460f6a60cc 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -306,10 +306,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
if (client->flags & I2C_CLIENT_TEN)
return -EINVAL;
- irq = irq_find_mapping(adap->host_notify_domain, client->addr);
- if (!irq)
- irq = irq_create_mapping(adap->host_notify_domain,
- client->addr);
+ irq = irq_create_mapping(adap->host_notify_domain, client->addr);
return irq > 0 ? irq : -ENXIO;
}
@@ -433,6 +430,8 @@ static int i2c_device_remove(struct device *dev)
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
+ client->irq = 0;
+
return status;
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index f9f69f7111a9..44bd5b9166bb 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -11,7 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 676c029494e4..0e780848f59b 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -13,7 +13,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index b05022f94f18..072bb5e36c18 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -718,8 +718,7 @@ l1oip_socket_thread(void *data)
printk(KERN_DEBUG "%s: socket created and open\n",
__func__);
while (!signal_pending(current)) {
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1,
- recvbuf_size);
+ iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, recvbuf_size);
recvlen = sock_recvmsg(socket, &msg, 0);
if (recvlen > 0) {
l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index e8ae2e54151c..0a0b8e1f4236 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/dmapool.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/jiffies.h>
@@ -38,7 +38,6 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
-#include <linux/memblock.h>
#include <linux/sched/signal.h>
#include <asm/byteorder.h>
@@ -493,7 +492,7 @@ int __init smu_init (void)
goto fail_np;
}
- smu = alloc_bootmem(sizeof(struct smu_device));
+ smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES);
spin_lock_init(&smu->lock);
INIT_LIST_HEAD(&smu->cmd_list);
@@ -569,7 +568,7 @@ fail_msg_node:
fail_db_node:
of_node_put(smu->db_node);
fail_bootmem:
- free_bootmem(__pa(smu), sizeof(struct smu_device));
+ memblock_free(__pa(smu), sizeof(struct smu_device));
smu = NULL;
fail_np:
of_node_put(np);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f3fb5bb8c82a..ac1cffd2a09b 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -542,7 +542,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
!discard_bio)
continue;
bio_chain(discard_bio, bio);
- bio_clone_blkg_association(discard_bio, bio);
+ bio_clone_blkcg_association(discard_bio, bio);
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rdev->bdev),
discard_bio, disk_devt(mddev->gendisk),
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 594b462ddf0e..985d35ec6b29 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -3,7 +3,8 @@
# Makefile for the kernel multimedia device drivers.
#
-media-objs := media-device.o media-devnode.o media-entity.o
+media-objs := media-device.o media-devnode.o media-entity.o \
+ media-request.o
#
# I2C drivers should come before other drivers, otherwise they'll fail
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index d6d22cf77066..975ff5669f72 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -356,6 +356,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];
}
+ call_void_bufop(q, init_buffer, vb);
+
q->bufs[vb->index] = vb;
/* Allocate video buffer memory for the MMAP type */
@@ -497,8 +499,9 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
vb->cnt_buf_init, vb->cnt_buf_cleanup,
vb->cnt_buf_prepare, vb->cnt_buf_finish);
- pr_info(" buf_queue: %u buf_done: %u\n",
- vb->cnt_buf_queue, vb->cnt_buf_done);
+ pr_info(" buf_queue: %u buf_done: %u buf_request_complete: %u\n",
+ vb->cnt_buf_queue, vb->cnt_buf_done,
+ vb->cnt_buf_request_complete);
pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
vb->cnt_mem_alloc, vb->cnt_mem_put,
vb->cnt_mem_prepare, vb->cnt_mem_finish,
@@ -683,7 +686,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
}
/*
- * Call queue_cancel to clean up any buffers in the PREPARED or
+ * Call queue_cancel to clean up any buffers in the
* QUEUED state which is possible if buffers were prepared or
* queued without ever calling STREAMON.
*/
@@ -930,6 +933,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
+ vb->synced = false;
}
spin_lock_irqsave(&q->done_lock, flags);
@@ -942,6 +946,14 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
vb->state = state;
}
atomic_dec(&q->owned_by_drv_count);
+
+ if (vb->req_obj.req) {
+ /* This is not supported at the moment */
+ WARN_ON(state == VB2_BUF_STATE_REQUEUEING);
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+
spin_unlock_irqrestore(&q->done_lock, flags);
trace_vb2_buf_done(q, vb);
@@ -976,20 +988,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
/*
* __prepare_mmap() - prepare an MMAP buffer
*/
-static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
+static int __prepare_mmap(struct vb2_buffer *vb)
{
int ret = 0;
- if (pb)
- ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
- vb, pb, vb->planes);
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, vb->planes);
return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
}
/*
* __prepare_userptr() - prepare a USERPTR buffer
*/
-static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
+static int __prepare_userptr(struct vb2_buffer *vb)
{
struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
@@ -1000,12 +1011,10 @@ static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- if (pb) {
- ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
- vb, pb, planes);
- if (ret)
- return ret;
- }
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, planes);
+ if (ret)
+ return ret;
for (plane = 0; plane < vb->num_planes; ++plane) {
/* Skip the plane if already verified */
@@ -1105,7 +1114,7 @@ err:
/*
* __prepare_dmabuf() - prepare a DMABUF buffer
*/
-static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
+static int __prepare_dmabuf(struct vb2_buffer *vb)
{
struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
@@ -1116,12 +1125,10 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- if (pb) {
- ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
- vb, pb, planes);
- if (ret)
- return ret;
- }
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, planes);
+ if (ret)
+ return ret;
for (plane = 0; plane < vb->num_planes; ++plane) {
struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
@@ -1250,9 +1257,10 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
call_void_vb_qop(vb, buf_queue, vb);
}
-static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
+static int __buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
+ enum vb2_buffer_state orig_state = vb->state;
unsigned int plane;
int ret;
@@ -1261,26 +1269,31 @@ static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
return -EIO;
}
+ if (vb->prepared)
+ return 0;
+ WARN_ON(vb->synced);
+
vb->state = VB2_BUF_STATE_PREPARING;
switch (q->memory) {
case VB2_MEMORY_MMAP:
- ret = __prepare_mmap(vb, pb);
+ ret = __prepare_mmap(vb);
break;
case VB2_MEMORY_USERPTR:
- ret = __prepare_userptr(vb, pb);
+ ret = __prepare_userptr(vb);
break;
case VB2_MEMORY_DMABUF:
- ret = __prepare_dmabuf(vb, pb);
+ ret = __prepare_dmabuf(vb);
break;
default:
WARN(1, "Invalid queue type\n");
ret = -EINVAL;
+ break;
}
if (ret) {
dprintk(1, "buffer preparation failed: %d\n", ret);
- vb->state = VB2_BUF_STATE_DEQUEUED;
+ vb->state = orig_state;
return ret;
}
@@ -1288,11 +1301,98 @@ static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
- vb->state = VB2_BUF_STATE_PREPARED;
+ vb->synced = true;
+ vb->prepared = true;
+ vb->state = orig_state;
return 0;
}
+static int vb2_req_prepare(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+ int ret;
+
+ if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST))
+ return -EINVAL;
+
+ mutex_lock(vb->vb2_queue->lock);
+ ret = __buf_prepare(vb);
+ mutex_unlock(vb->vb2_queue->lock);
+ return ret;
+}
+
+static void __vb2_dqbuf(struct vb2_buffer *vb);
+
+static void vb2_req_unprepare(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ mutex_lock(vb->vb2_queue->lock);
+ __vb2_dqbuf(vb);
+ vb->state = VB2_BUF_STATE_IN_REQUEST;
+ mutex_unlock(vb->vb2_queue->lock);
+ WARN_ON(!vb->req_obj.req);
+}
+
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+ struct media_request *req);
+
+static void vb2_req_queue(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ mutex_lock(vb->vb2_queue->lock);
+ vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ mutex_unlock(vb->vb2_queue->lock);
+}
+
+static void vb2_req_unbind(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ if (vb->state == VB2_BUF_STATE_IN_REQUEST)
+ call_void_bufop(vb->vb2_queue, init_buffer, vb);
+}
+
+static void vb2_req_release(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ if (vb->state == VB2_BUF_STATE_IN_REQUEST)
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+}
+
+static const struct media_request_object_ops vb2_core_req_ops = {
+ .prepare = vb2_req_prepare,
+ .unprepare = vb2_req_unprepare,
+ .queue = vb2_req_queue,
+ .unbind = vb2_req_unbind,
+ .release = vb2_req_release,
+};
+
+bool vb2_request_object_is_buffer(struct media_request_object *obj)
+{
+ return obj->ops == &vb2_core_req_ops;
+}
+EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer);
+
+unsigned int vb2_request_buffer_cnt(struct media_request *req)
+{
+ struct media_request_object *obj;
+ unsigned long flags;
+ unsigned int buffer_cnt = 0;
+
+ spin_lock_irqsave(&req->lock, flags);
+ list_for_each_entry(obj, &req->objects, list)
+ if (vb2_request_object_is_buffer(obj))
+ buffer_cnt++;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return buffer_cnt;
+}
+EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt);
+
int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
{
struct vb2_buffer *vb;
@@ -1304,8 +1404,12 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
vb->state);
return -EINVAL;
}
+ if (vb->prepared) {
+ dprintk(1, "buffer already prepared\n");
+ return -EINVAL;
+ }
- ret = __buf_prepare(vb, pb);
+ ret = __buf_prepare(vb);
if (ret)
return ret;
@@ -1314,7 +1418,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
dprintk(2, "prepare of buffer %d succeeded\n", vb->index);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
@@ -1381,7 +1485,8 @@ static int vb2_start_streaming(struct vb2_queue *q)
return ret;
}
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+ struct media_request *req)
{
struct vb2_buffer *vb;
int ret;
@@ -1393,13 +1498,57 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
vb = q->bufs[index];
- switch (vb->state) {
- case VB2_BUF_STATE_DEQUEUED:
- ret = __buf_prepare(vb, pb);
+ if ((req && q->uses_qbuf) ||
+ (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
+ q->uses_requests)) {
+ dprintk(1, "queue in wrong mode (qbuf vs requests)\n");
+ return -EBUSY;
+ }
+
+ if (req) {
+ int ret;
+
+ q->uses_requests = 1;
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "buffer %d not in dequeued state\n",
+ vb->index);
+ return -EINVAL;
+ }
+
+ media_request_object_init(&vb->req_obj);
+
+ /* Make sure the request is in a safe state for updating. */
+ ret = media_request_lock_for_update(req);
if (ret)
return ret;
- break;
- case VB2_BUF_STATE_PREPARED:
+ ret = media_request_object_bind(req, &vb2_core_req_ops,
+ q, true, &vb->req_obj);
+ media_request_unlock_for_update(req);
+ if (ret)
+ return ret;
+
+ vb->state = VB2_BUF_STATE_IN_REQUEST;
+ /* Fill buffer information for the userspace */
+ if (pb) {
+ call_void_bufop(q, copy_timestamp, vb, pb);
+ call_void_bufop(q, fill_user_buffer, vb, pb);
+ }
+
+ dprintk(2, "qbuf of buffer %d succeeded\n", vb->index);
+ return 0;
+ }
+
+ if (vb->state != VB2_BUF_STATE_IN_REQUEST)
+ q->uses_qbuf = 1;
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_DEQUEUED:
+ case VB2_BUF_STATE_IN_REQUEST:
+ if (!vb->prepared) {
+ ret = __buf_prepare(vb);
+ if (ret)
+ return ret;
+ }
break;
case VB2_BUF_STATE_PREPARING:
dprintk(1, "buffer still being prepared\n");
@@ -1600,6 +1749,11 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
vb->planes[i].dbuf_mapped = 0;
}
+ if (vb->req_obj.req) {
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+ call_void_bufop(q, init_buffer, vb);
}
int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
@@ -1625,6 +1779,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
}
call_void_vb_qop(vb, buf_finish, vb);
+ vb->prepared = false;
if (pindex)
*pindex = vb->index;
@@ -1688,6 +1843,8 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
q->start_streaming_called = 0;
q->queued_count = 0;
q->error = 0;
+ q->uses_requests = 0;
+ q->uses_qbuf = 0;
/*
* Remove all buffers from videobuf's list...
@@ -1712,19 +1869,38 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
*/
for (i = 0; i < q->num_buffers; ++i) {
struct vb2_buffer *vb = q->bufs[i];
+ struct media_request *req = vb->req_obj.req;
+
+ /*
+ * If a request is associated with this buffer, then
+ * call buf_request_cancel() to give the driver to complete()
+ * related request objects. Otherwise those objects would
+ * never complete.
+ */
+ if (req) {
+ enum media_request_state state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ state = req->state;
+ spin_unlock_irqrestore(&req->lock, flags);
- if (vb->state == VB2_BUF_STATE_PREPARED ||
- vb->state == VB2_BUF_STATE_QUEUED) {
+ if (state == MEDIA_REQUEST_STATE_QUEUED)
+ call_void_vb_qop(vb, buf_request_complete, vb);
+ }
+
+ if (vb->synced) {
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish,
vb->planes[plane].mem_priv);
+ vb->synced = false;
}
- if (vb->state != VB2_BUF_STATE_DEQUEUED) {
- vb->state = VB2_BUF_STATE_PREPARED;
+ if (vb->prepared) {
call_void_vb_qop(vb, buf_finish, vb);
+ vb->prepared = false;
}
__vb2_dqbuf(vb);
}
@@ -2281,7 +2457,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
* Queue all buffers.
*/
for (i = 0; i < q->num_buffers; i++) {
- ret = vb2_core_qbuf(q, i, NULL);
+ ret = vb2_core_qbuf(q, i, NULL, NULL);
if (ret)
goto err_reqbufs;
fileio->bufs[i].queued = 1;
@@ -2460,7 +2636,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
if (copy_timestamp)
b->timestamp = ktime_get_ns();
- ret = vb2_core_qbuf(q, index, NULL);
+ ret = vb2_core_qbuf(q, index, NULL, NULL);
dprintk(5, "vb2_dbuf result: %d\n", ret);
if (ret)
return ret;
@@ -2563,7 +2739,7 @@ static int vb2_thread(void *data)
if (copy_timestamp)
vb->timestamp = ktime_get_ns();
if (!threadio->stop)
- ret = vb2_core_qbuf(q, vb->index, NULL);
+ ret = vb2_core_qbuf(q, vb->index, NULL, NULL);
call_void_qop(q, wait_prepare, q);
if (ret || threadio->stop)
break;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 886a2d8d5c6c..a17033ab2c22 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-common.h>
@@ -40,10 +41,12 @@ module_param(debug, int, 0644);
pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \
} while (0)
-/* Flags that are set by the vb2 core */
+/* Flags that are set by us */
#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
V4L2_BUF_FLAG_PREPARED | \
+ V4L2_BUF_FLAG_IN_REQUEST | \
+ V4L2_BUF_FLAG_REQUEST_FD | \
V4L2_BUF_FLAG_TIMESTAMP_MASK)
/* Output buffer flags that should be passed on to the driver */
#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
@@ -118,6 +121,16 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
return 0;
}
+/*
+ * __init_v4l2_vb2_buffer() - initialize the v4l2_vb2_buffer struct
+ */
+static void __init_v4l2_vb2_buffer(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->request_fd = -1;
+}
+
static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
{
const struct v4l2_buffer *b = pb;
@@ -154,9 +167,181 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
pr_warn("use the actual size instead.\n");
}
-static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
- const char *opname)
+static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_plane *planes = vbuf->planes;
+ unsigned int plane;
+ int ret;
+
+ ret = __verify_length(vb, b);
+ if (ret < 0) {
+ dprintk(1, "plane parameters verification failed: %d\n", ret);
+ return ret;
+ }
+ if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
+ /*
+ * If the format's field is ALTERNATE, then the buffer's field
+ * should be either TOP or BOTTOM, not ALTERNATE since that
+ * makes no sense. The driver has to know whether the
+ * buffer represents a top or a bottom field in order to
+ * program any DMA correctly. Using ALTERNATE is wrong, since
+ * that just says that it is either a top or a bottom field,
+ * but not which of the two it is.
+ */
+ dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
+ return -EINVAL;
+ }
+ vbuf->sequence = 0;
+ vbuf->request_fd = -1;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ switch (b->memory) {
+ case VB2_MEMORY_USERPTR:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.userptr =
+ b->m.planes[plane].m.userptr;
+ planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ break;
+ case VB2_MEMORY_DMABUF:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ break;
+ default:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.offset =
+ vb->planes[plane].m.offset;
+ planes[plane].length =
+ vb->planes[plane].length;
+ }
+ break;
+ }
+
+ /* Fill in driver-provided information for OUTPUT types */
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Will have to go up to b->length when API starts
+ * accepting variable number of planes.
+ *
+ * If bytesused == 0 for the output buffer, then fall
+ * back to the full buffer size. In that case
+ * userspace clearly never bothered to set it and
+ * it's a safe assumption that they really meant to
+ * use the full plane sizes.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0
+ * as a way to indicate that streaming is finished.
+ * In that case, the driver should use the
+ * allow_zero_bytesused flag to keep old userspace
+ * applications working.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct vb2_plane *pdst = &planes[plane];
+ struct v4l2_plane *psrc = &b->m.planes[plane];
+
+ if (psrc->bytesused == 0)
+ vb2_warn_zero_bytesused(vb);
+
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pdst->bytesused = psrc->bytesused;
+ else
+ pdst->bytesused = psrc->bytesused ?
+ psrc->bytesused : pdst->length;
+ pdst->data_offset = psrc->data_offset;
+ }
+ }
+ } else {
+ /*
+ * Single-planar buffers do not use planes array,
+ * so fill in relevant v4l2_buffer struct fields instead.
+ * In videobuf we use our internal V4l2_planes struct for
+ * single-planar buffers as well, for simplicity.
+ *
+ * If bytesused == 0 for the output buffer, then fall back
+ * to the full buffer size as that's a sensible default.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0 as
+ * a way to indicate that streaming is finished. In that case,
+ * the driver should use the allow_zero_bytesused flag to keep
+ * old userspace applications working.
+ */
+ switch (b->memory) {
+ case VB2_MEMORY_USERPTR:
+ planes[0].m.userptr = b->m.userptr;
+ planes[0].length = b->length;
+ break;
+ case VB2_MEMORY_DMABUF:
+ planes[0].m.fd = b->m.fd;
+ planes[0].length = b->length;
+ break;
+ default:
+ planes[0].m.offset = vb->planes[0].m.offset;
+ planes[0].length = vb->planes[0].length;
+ break;
+ }
+
+ planes[0].data_offset = 0;
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ if (b->bytesused == 0)
+ vb2_warn_zero_bytesused(vb);
+
+ if (vb->vb2_queue->allow_zero_bytesused)
+ planes[0].bytesused = b->bytesused;
+ else
+ planes[0].bytesused = b->bytesused ?
+ b->bytesused : planes[0].length;
+ } else
+ planes[0].bytesused = 0;
+
+ }
+
+ /* Zero flags that we handle */
+ vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
+ if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Non-COPY timestamps and non-OUTPUT queues will get
+ * their timestamp and timestamp source flags from the
+ * queue.
+ */
+ vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * For output buffers mask out the timecode flag:
+ * this will be handled later in vb2_qbuf().
+ * The 'field' is valid metadata for this output buffer
+ * and so that needs to be copied here.
+ */
+ vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
+ vbuf->field = b->field;
+ } else {
+ /* Zero any output buffer flags as this is a capture buffer */
+ vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
+ /* Zero last flag, this is a signal from driver to userspace */
+ vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
+ }
+
+ return 0;
+}
+
+static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b,
+ const char *opname,
+ struct media_request **p_req)
+{
+ struct media_request *req;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb;
+ int ret;
+
if (b->type != q->type) {
dprintk(1, "%s: invalid buffer type\n", opname);
return -EINVAL;
@@ -178,7 +363,82 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
return -EINVAL;
}
- return __verify_planes_array(q->bufs[b->index], b);
+ vb = q->bufs[b->index];
+ vbuf = to_vb2_v4l2_buffer(vb);
+ ret = __verify_planes_array(vb, b);
+ if (ret)
+ return ret;
+
+ if (!vb->prepared) {
+ /* Copy relevant information provided by the userspace */
+ memset(vbuf->planes, 0,
+ sizeof(vbuf->planes[0]) * vb->num_planes);
+ ret = vb2_fill_vb2_v4l2_buffer(vb, b);
+ if (ret)
+ return ret;
+ }
+
+ if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
+ if (q->uses_requests) {
+ dprintk(1, "%s: queue uses requests\n", opname);
+ return -EBUSY;
+ }
+ return 0;
+ } else if (!q->supports_requests) {
+ dprintk(1, "%s: queue does not support requests\n", opname);
+ return -EACCES;
+ } else if (q->uses_qbuf) {
+ dprintk(1, "%s: queue does not use requests\n", opname);
+ return -EBUSY;
+ }
+
+ /*
+ * For proper locking when queueing a request you need to be able
+ * to lock access to the vb2 queue, so check that there is a lock
+ * that we can use. In addition p_req must be non-NULL.
+ */
+ if (WARN_ON(!q->lock || !p_req))
+ return -EINVAL;
+
+ /*
+ * Make sure this op is implemented by the driver. It's easy to forget
+ * this callback, but is it important when canceling a buffer in a
+ * queued request.
+ */
+ if (WARN_ON(!q->ops->buf_request_complete))
+ return -EINVAL;
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "%s: buffer is not in dequeued state\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->request_fd < 0) {
+ dprintk(1, "%s: request_fd < 0\n", opname);
+ return -EINVAL;
+ }
+
+ req = media_request_get_by_fd(mdev, b->request_fd);
+ if (IS_ERR(req)) {
+ dprintk(1, "%s: invalid request_fd\n", opname);
+ return PTR_ERR(req);
+ }
+
+ /*
+ * Early sanity check. This is checked again when the buffer
+ * is bound to the request in vb2_core_qbuf().
+ */
+ if (req->state != MEDIA_REQUEST_STATE_IDLE &&
+ req->state != MEDIA_REQUEST_STATE_UPDATING) {
+ dprintk(1, "%s: request is not idle\n", opname);
+ media_request_put(req);
+ return -EBUSY;
+ }
+
+ *p_req = req;
+ vbuf->request_fd = b->request_fd;
+
+ return 0;
}
/*
@@ -204,7 +464,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
b->timecode = vbuf->timecode;
b->sequence = vbuf->sequence;
b->reserved2 = 0;
- b->reserved = 0;
+ b->request_fd = 0;
if (q->is_multiplanar) {
/*
@@ -261,15 +521,15 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
case VB2_BUF_STATE_ACTIVE:
b->flags |= V4L2_BUF_FLAG_QUEUED;
break;
+ case VB2_BUF_STATE_IN_REQUEST:
+ b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
+ break;
case VB2_BUF_STATE_ERROR:
b->flags |= V4L2_BUF_FLAG_ERROR;
/* fall through */
case VB2_BUF_STATE_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
- case VB2_BUF_STATE_PREPARED:
- b->flags |= V4L2_BUF_FLAG_PREPARED;
- break;
case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
case VB2_BUF_STATE_REQUEUEING:
@@ -277,8 +537,17 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
break;
}
+ if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
+ vb->state == VB2_BUF_STATE_IN_REQUEST) &&
+ vb->synced && vb->prepared)
+ b->flags |= V4L2_BUF_FLAG_PREPARED;
+
if (vb2_buffer_in_use(q, vb))
b->flags |= V4L2_BUF_FLAG_MAPPED;
+ if (vbuf->request_fd >= 0) {
+ b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
+ b->request_fd = vbuf->request_fd;
+ }
if (!q->is_output &&
b->flags & V4L2_BUF_FLAG_DONE &&
@@ -291,158 +560,28 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
* v4l2_buffer by the userspace. It also verifies that struct
* v4l2_buffer has a valid number of planes.
*/
-static int __fill_vb2_buffer(struct vb2_buffer *vb,
- const void *pb, struct vb2_plane *planes)
+static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
{
- struct vb2_queue *q = vb->vb2_queue;
- const struct v4l2_buffer *b = pb;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
unsigned int plane;
- int ret;
-
- ret = __verify_length(vb, b);
- if (ret < 0) {
- dprintk(1, "plane parameters verification failed: %d\n", ret);
- return ret;
- }
- if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
- /*
- * If the format's field is ALTERNATE, then the buffer's field
- * should be either TOP or BOTTOM, not ALTERNATE since that
- * makes no sense. The driver has to know whether the
- * buffer represents a top or a bottom field in order to
- * program any DMA correctly. Using ALTERNATE is wrong, since
- * that just says that it is either a top or a bottom field,
- * but not which of the two it is.
- */
- dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
- return -EINVAL;
- }
- vb->timestamp = 0;
- vbuf->sequence = 0;
- if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
- if (b->memory == VB2_MEMORY_USERPTR) {
- for (plane = 0; plane < vb->num_planes; ++plane) {
- planes[plane].m.userptr =
- b->m.planes[plane].m.userptr;
- planes[plane].length =
- b->m.planes[plane].length;
- }
- }
- if (b->memory == VB2_MEMORY_DMABUF) {
- for (plane = 0; plane < vb->num_planes; ++plane) {
- planes[plane].m.fd =
- b->m.planes[plane].m.fd;
- planes[plane].length =
- b->m.planes[plane].length;
- }
- }
-
- /* Fill in driver-provided information for OUTPUT types */
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * Will have to go up to b->length when API starts
- * accepting variable number of planes.
- *
- * If bytesused == 0 for the output buffer, then fall
- * back to the full buffer size. In that case
- * userspace clearly never bothered to set it and
- * it's a safe assumption that they really meant to
- * use the full plane sizes.
- *
- * Some drivers, e.g. old codec drivers, use bytesused == 0
- * as a way to indicate that streaming is finished.
- * In that case, the driver should use the
- * allow_zero_bytesused flag to keep old userspace
- * applications working.
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- struct vb2_plane *pdst = &planes[plane];
- struct v4l2_plane *psrc = &b->m.planes[plane];
-
- if (psrc->bytesused == 0)
- vb2_warn_zero_bytesused(vb);
-
- if (vb->vb2_queue->allow_zero_bytesused)
- pdst->bytesused = psrc->bytesused;
- else
- pdst->bytesused = psrc->bytesused ?
- psrc->bytesused : pdst->length;
- pdst->data_offset = psrc->data_offset;
- }
- }
- } else {
- /*
- * Single-planar buffers do not use planes array,
- * so fill in relevant v4l2_buffer struct fields instead.
- * In videobuf we use our internal V4l2_planes struct for
- * single-planar buffers as well, for simplicity.
- *
- * If bytesused == 0 for the output buffer, then fall back
- * to the full buffer size as that's a sensible default.
- *
- * Some drivers, e.g. old codec drivers, use bytesused == 0 as
- * a way to indicate that streaming is finished. In that case,
- * the driver should use the allow_zero_bytesused flag to keep
- * old userspace applications working.
- */
- if (b->memory == VB2_MEMORY_USERPTR) {
- planes[0].m.userptr = b->m.userptr;
- planes[0].length = b->length;
- }
+ if (!vb->vb2_queue->is_output || !vb->vb2_queue->copy_timestamp)
+ vb->timestamp = 0;
- if (b->memory == VB2_MEMORY_DMABUF) {
- planes[0].m.fd = b->m.fd;
- planes[0].length = b->length;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
+ planes[plane].m = vbuf->planes[plane].m;
+ planes[plane].length = vbuf->planes[plane].length;
}
-
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- if (b->bytesused == 0)
- vb2_warn_zero_bytesused(vb);
-
- if (vb->vb2_queue->allow_zero_bytesused)
- planes[0].bytesused = b->bytesused;
- else
- planes[0].bytesused = b->bytesused ?
- b->bytesused : planes[0].length;
- } else
- planes[0].bytesused = 0;
-
- }
-
- /* Zero flags that the vb2 core handles */
- vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
- if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * Non-COPY timestamps and non-OUTPUT queues will get
- * their timestamp and timestamp source flags from the
- * queue.
- */
- vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ planes[plane].bytesused = vbuf->planes[plane].bytesused;
+ planes[plane].data_offset = vbuf->planes[plane].data_offset;
}
-
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * For output buffers mask out the timecode flag:
- * this will be handled later in vb2_qbuf().
- * The 'field' is valid metadata for this output buffer
- * and so that needs to be copied here.
- */
- vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
- vbuf->field = b->field;
- } else {
- /* Zero any output buffer flags as this is a capture buffer */
- vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
- /* Zero last flag, this is a signal from driver to userspace */
- vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
- }
-
return 0;
}
static const struct vb2_buf_ops v4l2_buf_ops = {
.verify_planes_array = __verify_planes_array_core,
+ .init_buffer = __init_v4l2_vb2_buffer,
.fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer,
.copy_timestamp = __copy_timestamp,
@@ -483,15 +622,30 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
}
EXPORT_SYMBOL(vb2_querybuf);
+static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
+{
+ *caps = 0;
+ if (q->io_modes & VB2_MMAP)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
+ if (q->io_modes & VB2_USERPTR)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
+ if (q->io_modes & VB2_DMABUF)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+ if (q->supports_requests)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
+}
+
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
{
int ret = vb2_verify_memory_type(q, req->memory, req->type);
+ fill_buf_caps(q, &req->capabilities);
return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
}
EXPORT_SYMBOL_GPL(vb2_reqbufs);
-int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
+int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b)
{
int ret;
@@ -500,7 +654,10 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
return -EBUSY;
}
- ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
+ if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
+ return -EINVAL;
+
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL);
return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
}
@@ -514,6 +671,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
int ret = vb2_verify_memory_type(q, create->memory, f->type);
unsigned i;
+ fill_buf_caps(q, &create->capabilities);
create->index = q->num_buffers;
if (create->count == 0)
return ret != -EBUSY ? ret : 0;
@@ -560,8 +718,10 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
}
EXPORT_SYMBOL_GPL(vb2_create_bufs);
-int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b)
{
+ struct media_request *req = NULL;
int ret;
if (vb2_fileio_is_active(q)) {
@@ -569,8 +729,13 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
return -EBUSY;
}
- ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
- return ret ? ret : vb2_core_qbuf(q, b->index, b);
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req);
+ if (ret)
+ return ret;
+ ret = vb2_core_qbuf(q, b->index, b, req);
+ if (req)
+ media_request_put(req);
+ return ret;
}
EXPORT_SYMBOL_GPL(vb2_qbuf);
@@ -714,6 +879,7 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
+ fill_buf_caps(vdev->queue, &p->capabilities);
if (res)
return res;
if (vb2_queue_is_busy(vdev, file))
@@ -735,6 +901,7 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
p->format.type);
p->index = vdev->queue->num_buffers;
+ fill_buf_caps(vdev->queue, &p->capabilities);
/*
* If count == 0, then just check if memory and type are valid.
* Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
@@ -760,7 +927,7 @@ int vb2_ioctl_prepare_buf(struct file *file, void *priv,
if (vb2_queue_is_busy(vdev, file))
return -EBUSY;
- return vb2_prepare_buf(vdev->queue, p);
+ return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
@@ -779,7 +946,7 @@ int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
if (vb2_queue_is_busy(vdev, file))
return -EBUSY;
- return vb2_qbuf(vdev->queue, p);
+ return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
@@ -961,6 +1128,57 @@ void vb2_ops_wait_finish(struct vb2_queue *vq)
}
EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
+/*
+ * Note that this function is called during validation time and
+ * thus the req_queue_mutex is held to ensure no request objects
+ * can be added or deleted while validating. So there is no need
+ * to protect the objects list.
+ */
+int vb2_request_validate(struct media_request *req)
+{
+ struct media_request_object *obj;
+ int ret = 0;
+
+ if (!vb2_request_buffer_cnt(req))
+ return -ENOENT;
+
+ list_for_each_entry(obj, &req->objects, list) {
+ if (!obj->ops->prepare)
+ continue;
+
+ ret = obj->ops->prepare(obj);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ list_for_each_entry_continue_reverse(obj, &req->objects, list)
+ if (obj->ops->unprepare)
+ obj->ops->unprepare(obj);
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_request_validate);
+
+void vb2_request_queue(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+
+ /*
+ * Queue all objects. Note that buffer objects are at the end of the
+ * objects list, after all other object types. Once buffer objects
+ * are queued, the driver might delete them immediately (if the driver
+ * processes the buffer at once), so we have to use
+ * list_for_each_entry_safe() to handle the case where the object we
+ * queue is deleted.
+ */
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
+ if (obj->ops->queue)
+ obj->ops->queue(obj);
+}
+EXPORT_SYMBOL_GPL(vb2_request_queue);
+
MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index c90b1fd94735..6974f1731529 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -146,8 +146,7 @@ static void _fill_dmx_buffer(struct vb2_buffer *vb, void *pb)
dprintk(3, "[%s]\n", ctx->name);
}
-static int _fill_vb2_buffer(struct vb2_buffer *vb,
- const void *pb, struct vb2_plane *planes)
+static int _fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
{
struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
@@ -385,7 +384,7 @@ int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
int ret;
- ret = vb2_core_qbuf(&ctx->vb_q, b->index, b);
+ ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
b->index, ret);
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 8ef91b1598e3..d6673f4fb47b 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1394,7 +1394,8 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
case RTL2832_SDR_TUNER_E4000:
v4l2_ctrl_handler_init(&dev->hdl, 9);
if (subdev)
- v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, NULL);
+ v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler,
+ NULL, true);
break;
case RTL2832_SDR_TUNER_R820T:
case RTL2832_SDR_TUNER_R828D:
@@ -1423,7 +1424,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
v4l2_ctrl_handler_init(&dev->hdl, 2);
if (subdev)
v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler,
- NULL);
+ NULL, true);
break;
default:
v4l2_ctrl_handler_init(&dev->hdl, 0);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 4c7190db420e..bed24372e61f 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -30,6 +30,7 @@
#include <media/media-device.h>
#include <media/media-devnode.h>
#include <media/media-entity.h>
+#include <media/media-request.h>
#ifdef CONFIG_MEDIA_CONTROLLER
@@ -377,10 +378,19 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
return ret;
}
+static long media_device_request_alloc(struct media_device *mdev,
+ int *alloc_fd)
+{
+ if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
+ return -ENOTTY;
+
+ return media_request_alloc(mdev, alloc_fd);
+}
+
static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
{
- /* All media IOCTLs are _IOWR() */
- if (copy_from_user(karg, uarg, _IOC_SIZE(cmd)))
+ if ((_IOC_DIR(cmd) & _IOC_WRITE) &&
+ copy_from_user(karg, uarg, _IOC_SIZE(cmd)))
return -EFAULT;
return 0;
@@ -388,8 +398,8 @@ static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd)
{
- /* All media IOCTLs are _IOWR() */
- if (copy_to_user(uarg, karg, _IOC_SIZE(cmd)))
+ if ((_IOC_DIR(cmd) & _IOC_READ) &&
+ copy_to_user(uarg, karg, _IOC_SIZE(cmd)))
return -EFAULT;
return 0;
@@ -425,6 +435,7 @@ static const struct media_ioctl_info ioctl_info[] = {
MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX),
MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX),
MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(REQUEST_ALLOC, media_device_request_alloc, 0),
};
static long media_device_ioctl(struct file *filp, unsigned int cmd,
@@ -691,9 +702,13 @@ void media_device_init(struct media_device *mdev)
INIT_LIST_HEAD(&mdev->pads);
INIT_LIST_HEAD(&mdev->links);
INIT_LIST_HEAD(&mdev->entity_notify);
+
+ mutex_init(&mdev->req_queue_mutex);
mutex_init(&mdev->graph_mutex);
ida_init(&mdev->entity_internal_idx);
+ atomic_set(&mdev->request_id, 0);
+
dev_dbg(mdev->dev, "Media device initialized\n");
}
EXPORT_SYMBOL_GPL(media_device_init);
@@ -704,6 +719,7 @@ void media_device_cleanup(struct media_device *mdev)
mdev->entity_internal_idx_max = 0;
media_graph_walk_cleanup(&mdev->pm_count_walk);
mutex_destroy(&mdev->graph_mutex);
+ mutex_destroy(&mdev->req_queue_mutex);
}
EXPORT_SYMBOL_GPL(media_device_cleanup);
diff --git a/drivers/media/media-request.c b/drivers/media/media-request.c
new file mode 100644
index 000000000000..4e9db1fed697
--- /dev/null
+++ b/drivers/media/media-request.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Media device request objects
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * Author: Hans Verkuil <hans.verkuil@cisco.com>
+ * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+#include <linux/refcount.h>
+
+#include <media/media-device.h>
+#include <media/media-request.h>
+
+static const char * const request_state[] = {
+ [MEDIA_REQUEST_STATE_IDLE] = "idle",
+ [MEDIA_REQUEST_STATE_VALIDATING] = "validating",
+ [MEDIA_REQUEST_STATE_QUEUED] = "queued",
+ [MEDIA_REQUEST_STATE_COMPLETE] = "complete",
+ [MEDIA_REQUEST_STATE_CLEANING] = "cleaning",
+ [MEDIA_REQUEST_STATE_UPDATING] = "updating",
+};
+
+static const char *
+media_request_state_str(enum media_request_state state)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
+
+ if (WARN_ON(state >= ARRAY_SIZE(request_state)))
+ return "invalid";
+ return request_state[state];
+}
+
+static void media_request_clean(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+
+ /* Just a sanity check. No other code path is allowed to change this. */
+ WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
+ WARN_ON(req->updating_count);
+ WARN_ON(req->access_count);
+
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
+ media_request_object_unbind(obj);
+ media_request_object_put(obj);
+ }
+
+ req->updating_count = 0;
+ req->access_count = 0;
+ WARN_ON(req->num_incomplete_objects);
+ req->num_incomplete_objects = 0;
+ wake_up_interruptible_all(&req->poll_wait);
+}
+
+static void media_request_release(struct kref *kref)
+{
+ struct media_request *req =
+ container_of(kref, struct media_request, kref);
+ struct media_device *mdev = req->mdev;
+
+ dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
+
+ /* No other users, no need for a spinlock */
+ req->state = MEDIA_REQUEST_STATE_CLEANING;
+
+ media_request_clean(req);
+
+ if (mdev->ops->req_free)
+ mdev->ops->req_free(req);
+ else
+ kfree(req);
+}
+
+void media_request_put(struct media_request *req)
+{
+ kref_put(&req->kref, media_request_release);
+}
+EXPORT_SYMBOL_GPL(media_request_put);
+
+static int media_request_close(struct inode *inode, struct file *filp)
+{
+ struct media_request *req = filp->private_data;
+
+ media_request_put(req);
+ return 0;
+}
+
+static __poll_t media_request_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct media_request *req = filp->private_data;
+ unsigned long flags;
+ __poll_t ret = 0;
+
+ if (!(poll_requested_events(wait) & EPOLLPRI))
+ return 0;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
+ ret = EPOLLPRI;
+ goto unlock;
+ }
+ if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
+ ret = EPOLLERR;
+ goto unlock;
+ }
+
+ poll_wait(filp, &req->poll_wait, wait);
+
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ return ret;
+}
+
+static long media_request_ioctl_queue(struct media_request *req)
+{
+ struct media_device *mdev = req->mdev;
+ enum media_request_state state;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
+
+ /*
+ * Ensure the request that is validated will be the one that gets queued
+ * next by serialising the queueing process. This mutex is also used
+ * to serialize with canceling a vb2 queue and with setting values such
+ * as controls in a request.
+ */
+ mutex_lock(&mdev->req_queue_mutex);
+
+ media_request_get(req);
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_IDLE)
+ req->state = MEDIA_REQUEST_STATE_VALIDATING;
+ state = req->state;
+ spin_unlock_irqrestore(&req->lock, flags);
+ if (state != MEDIA_REQUEST_STATE_VALIDATING) {
+ dev_dbg(mdev->dev,
+ "request: unable to queue %s, request in state %s\n",
+ req->debug_str, media_request_state_str(state));
+ media_request_put(req);
+ mutex_unlock(&mdev->req_queue_mutex);
+ return -EBUSY;
+ }
+
+ ret = mdev->ops->req_validate(req);
+
+ /*
+ * If the req_validate was successful, then we mark the state as QUEUED
+ * and call req_queue. The reason we set the state first is that this
+ * allows req_queue to unbind or complete the queued objects in case
+ * they are immediately 'consumed'. State changes from QUEUED to another
+ * state can only happen if either the driver changes the state or if
+ * the user cancels the vb2 queue. The driver can only change the state
+ * after each object is queued through the req_queue op (and note that
+ * that op cannot fail), so setting the state to QUEUED up front is
+ * safe.
+ *
+ * The other reason for changing the state is if the vb2 queue is
+ * canceled, and that uses the req_queue_mutex which is still locked
+ * while req_queue is called, so that's safe as well.
+ */
+ spin_lock_irqsave(&req->lock, flags);
+ req->state = ret ? MEDIA_REQUEST_STATE_IDLE
+ : MEDIA_REQUEST_STATE_QUEUED;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ if (!ret)
+ mdev->ops->req_queue(req);
+
+ mutex_unlock(&mdev->req_queue_mutex);
+
+ if (ret) {
+ dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
+ req->debug_str, ret);
+ media_request_put(req);
+ }
+
+ return ret;
+}
+
+static long media_request_ioctl_reinit(struct media_request *req)
+{
+ struct media_device *mdev = req->mdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state != MEDIA_REQUEST_STATE_IDLE &&
+ req->state != MEDIA_REQUEST_STATE_COMPLETE) {
+ dev_dbg(mdev->dev,
+ "request: %s not in idle or complete state, cannot reinit\n",
+ req->debug_str);
+ spin_unlock_irqrestore(&req->lock, flags);
+ return -EBUSY;
+ }
+ if (req->access_count) {
+ dev_dbg(mdev->dev,
+ "request: %s is being accessed, cannot reinit\n",
+ req->debug_str);
+ spin_unlock_irqrestore(&req->lock, flags);
+ return -EBUSY;
+ }
+ req->state = MEDIA_REQUEST_STATE_CLEANING;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ media_request_clean(req);
+
+ spin_lock_irqsave(&req->lock, flags);
+ req->state = MEDIA_REQUEST_STATE_IDLE;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return 0;
+}
+
+static long media_request_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct media_request *req = filp->private_data;
+
+ switch (cmd) {
+ case MEDIA_REQUEST_IOC_QUEUE:
+ return media_request_ioctl_queue(req);
+ case MEDIA_REQUEST_IOC_REINIT:
+ return media_request_ioctl_reinit(req);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct file_operations request_fops = {
+ .owner = THIS_MODULE,
+ .poll = media_request_poll,
+ .unlocked_ioctl = media_request_ioctl,
+ .release = media_request_close,
+};
+
+struct media_request *
+media_request_get_by_fd(struct media_device *mdev, int request_fd)
+{
+ struct file *filp;
+ struct media_request *req;
+
+ if (!mdev || !mdev->ops ||
+ !mdev->ops->req_validate || !mdev->ops->req_queue)
+ return ERR_PTR(-EACCES);
+
+ filp = fget(request_fd);
+ if (!filp)
+ goto err_no_req_fd;
+
+ if (filp->f_op != &request_fops)
+ goto err_fput;
+ req = filp->private_data;
+ if (req->mdev != mdev)
+ goto err_fput;
+
+ /*
+ * Note: as long as someone has an open filehandle of the request,
+ * the request can never be released. The fget() above ensures that
+ * even if userspace closes the request filehandle, the release()
+ * fop won't be called, so the media_request_get() always succeeds
+ * and there is no race condition where the request was released
+ * before media_request_get() is called.
+ */
+ media_request_get(req);
+ fput(filp);
+
+ return req;
+
+err_fput:
+ fput(filp);
+
+err_no_req_fd:
+ dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(media_request_get_by_fd);
+
+int media_request_alloc(struct media_device *mdev, int *alloc_fd)
+{
+ struct media_request *req;
+ struct file *filp;
+ int fd;
+ int ret;
+
+ /* Either both are NULL or both are non-NULL */
+ if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
+ return -ENOMEM;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
+ if (IS_ERR(filp)) {
+ ret = PTR_ERR(filp);
+ goto err_put_fd;
+ }
+
+ if (mdev->ops->req_alloc)
+ req = mdev->ops->req_alloc(mdev);
+ else
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_fput;
+ }
+
+ filp->private_data = req;
+ req->mdev = mdev;
+ req->state = MEDIA_REQUEST_STATE_IDLE;
+ req->num_incomplete_objects = 0;
+ kref_init(&req->kref);
+ INIT_LIST_HEAD(&req->objects);
+ spin_lock_init(&req->lock);
+ init_waitqueue_head(&req->poll_wait);
+ req->updating_count = 0;
+ req->access_count = 0;
+
+ *alloc_fd = fd;
+
+ snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
+ atomic_inc_return(&mdev->request_id), fd);
+ dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
+
+ fd_install(fd, filp);
+
+ return 0;
+
+err_fput:
+ fput(filp);
+
+err_put_fd:
+ put_unused_fd(fd);
+
+ return ret;
+}
+
+static void media_request_object_release(struct kref *kref)
+{
+ struct media_request_object *obj =
+ container_of(kref, struct media_request_object, kref);
+ struct media_request *req = obj->req;
+
+ if (WARN_ON(req))
+ media_request_object_unbind(obj);
+ obj->ops->release(obj);
+}
+
+struct media_request_object *
+media_request_object_find(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv)
+{
+ struct media_request_object *obj;
+ struct media_request_object *found = NULL;
+ unsigned long flags;
+
+ if (WARN_ON(!ops || !priv))
+ return NULL;
+
+ spin_lock_irqsave(&req->lock, flags);
+ list_for_each_entry(obj, &req->objects, list) {
+ if (obj->ops == ops && obj->priv == priv) {
+ media_request_object_get(obj);
+ found = obj;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&req->lock, flags);
+ return found;
+}
+EXPORT_SYMBOL_GPL(media_request_object_find);
+
+void media_request_object_put(struct media_request_object *obj)
+{
+ kref_put(&obj->kref, media_request_object_release);
+}
+EXPORT_SYMBOL_GPL(media_request_object_put);
+
+void media_request_object_init(struct media_request_object *obj)
+{
+ obj->ops = NULL;
+ obj->req = NULL;
+ obj->priv = NULL;
+ obj->completed = false;
+ INIT_LIST_HEAD(&obj->list);
+ kref_init(&obj->kref);
+}
+EXPORT_SYMBOL_GPL(media_request_object_init);
+
+int media_request_object_bind(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv, bool is_buffer,
+ struct media_request_object *obj)
+{
+ unsigned long flags;
+ int ret = -EBUSY;
+
+ if (WARN_ON(!ops->release))
+ return -EACCES;
+
+ spin_lock_irqsave(&req->lock, flags);
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
+ goto unlock;
+
+ obj->req = req;
+ obj->ops = ops;
+ obj->priv = priv;
+
+ if (is_buffer)
+ list_add_tail(&obj->list, &req->objects);
+ else
+ list_add(&obj->list, &req->objects);
+ req->num_incomplete_objects++;
+ ret = 0;
+
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(media_request_object_bind);
+
+void media_request_object_unbind(struct media_request_object *obj)
+{
+ struct media_request *req = obj->req;
+ unsigned long flags;
+ bool completed = false;
+
+ if (WARN_ON(!req))
+ return;
+
+ spin_lock_irqsave(&req->lock, flags);
+ list_del(&obj->list);
+ obj->req = NULL;
+
+ if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
+ goto unlock;
+
+ if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
+ goto unlock;
+
+ if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
+ if (!obj->completed)
+ req->num_incomplete_objects--;
+ goto unlock;
+ }
+
+ if (WARN_ON(!req->num_incomplete_objects))
+ goto unlock;
+
+ req->num_incomplete_objects--;
+ if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
+ !req->num_incomplete_objects) {
+ req->state = MEDIA_REQUEST_STATE_COMPLETE;
+ completed = true;
+ wake_up_interruptible_all(&req->poll_wait);
+ }
+
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ if (obj->ops->unbind)
+ obj->ops->unbind(obj);
+ if (completed)
+ media_request_put(req);
+}
+EXPORT_SYMBOL_GPL(media_request_object_unbind);
+
+void media_request_object_complete(struct media_request_object *obj)
+{
+ struct media_request *req = obj->req;
+ unsigned long flags;
+ bool completed = false;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (obj->completed)
+ goto unlock;
+ obj->completed = true;
+ if (WARN_ON(!req->num_incomplete_objects) ||
+ WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
+ goto unlock;
+
+ if (!--req->num_incomplete_objects) {
+ req->state = MEDIA_REQUEST_STATE_COMPLETE;
+ wake_up_interruptible_all(&req->poll_wait);
+ completed = true;
+ }
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ if (completed)
+ media_request_put(req);
+}
+EXPORT_SYMBOL_GPL(media_request_object_complete);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index b2cfcbb0008e..d4906c04dc6e 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4210,7 +4210,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
/* register video4linux + input */
if (!bttv_tvcards[btv->c.type].no_video) {
v4l2_ctrl_add_handler(&btv->radio_ctrl_handler, hdl,
- v4l2_ctrl_radio_filter);
+ v4l2_ctrl_radio_filter, false);
if (btv->radio_ctrl_handler.error) {
result = btv->radio_ctrl_handler.error;
goto fail2;
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 3083434bb636..a00b77d80ed9 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1527,7 +1527,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
dev->cxhdl.priv = dev;
dev->cxhdl.func = cx23885_api_func;
cx2341x_handler_set_50hz(&dev->cxhdl, tsport->height == 576);
- v4l2_ctrl_add_handler(&dev->ctrl_handler, &dev->cxhdl.hdl, NULL);
+ v4l2_ctrl_add_handler(&dev->ctrl_handler, &dev->cxhdl.hdl, NULL, false);
/* Allocate and initialize V4L video device */
dev->v4l_device = cx23885_video_dev_alloc(tsport,
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 199756547f03..6c0bb9fe4a31 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1183,7 +1183,7 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
err = cx2341x_handler_init(&dev->cxhdl, 36);
if (err)
goto fail_core;
- v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL);
+ v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL, false);
/* blackbird stuff */
pr_info("cx23416 based mpeg encoder (blackbird reference design)\n");
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index df4e7a0686e0..e1549d352f70 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1378,7 +1378,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
if (vc->id == V4L2_CID_CHROMA_AGC)
core->chroma_agc = vc;
}
- v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL);
+ v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL, false);
/* load and configure helper modules */
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 747a082229dc..9735bbb908e3 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -265,9 +265,9 @@ static int empress_init(struct saa7134_dev *dev)
"%s empress (%s)", dev->name,
saa7134_boards[dev->board].name);
v4l2_ctrl_handler_init(hdl, 21);
- v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter);
+ v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter, false);
if (dev->empress_sd)
- v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL);
+ v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL, true);
if (hdl->error) {
video_device_release(dev->empress_dev);
return hdl->error;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 1a22ae7cbdd9..8f28741ebb35 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2137,7 +2137,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
hdl = &dev->radio_ctrl_handler;
v4l2_ctrl_handler_init(hdl, 2);
v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler,
- v4l2_ctrl_radio_filter);
+ v4l2_ctrl_radio_filter, false);
if (hdl->error)
return hdl->error;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index f56220e549bb..3e9fcf4f8a13 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -1424,7 +1424,7 @@ static int fimc_link_setup(struct media_entity *entity,
return 0;
return v4l2_ctrl_add_handler(&vc->ctx->ctrls.handler,
- sensor->ctrl_handler, NULL);
+ sensor->ctrl_handler, NULL, true);
}
static const struct media_entity_operations fimc_sd_media_ops = {
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 5658f6a326f7..078d64114b24 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -940,7 +940,7 @@ isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
int ret;
mutex_lock(&video->queue_lock);
- ret = vb2_qbuf(&vfh->queue, b);
+ ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
mutex_unlock(&video->queue_lock);
return ret;
@@ -1028,7 +1028,7 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
ctrls.count = 1;
ctrls.controls = &ctrl;
- ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
+ ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, NULL, &ctrls);
if (ret < 0) {
dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
pipe->external->name);
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index a3f135364474..f476b2f1eb35 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -475,7 +475,7 @@ static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
return ret;
ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler,
- NULL);
+ NULL, true);
if (ret < 0) {
v4l2_ctrl_handler_free(&vin->ctrl_handler);
return ret;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 8483dc36715d..c417ff8f6fe5 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -1164,7 +1164,7 @@ static int rcar_drif_notify_complete(struct v4l2_async_notifier *notifier)
}
ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl,
- sdr->ep.subdev->ctrl_handler, NULL);
+ sdr->ep.subdev->ctrl_handler, NULL, true);
if (ret) {
rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret);
goto error;
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 23b008d1a47b..c3fc94ef251e 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -943,7 +943,7 @@ static int s3c_camif_qbuf(struct file *file, void *priv,
if (vp->owner && vp->owner != priv)
return -EBUSY;
- return vb2_qbuf(&vp->vb_queue, buf);
+ return vb2_qbuf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, buf);
}
static int s3c_camif_dqbuf(struct file *file, void *priv,
@@ -981,7 +981,7 @@ static int s3c_camif_prepare_buf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct camif_vp *vp = video_drvdata(file);
- return vb2_prepare_buf(&vp->vb_queue, b);
+ return vb2_prepare_buf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, b);
}
static int s3c_camif_g_selection(struct file *file, void *priv,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 670ca869babb..ece59ce1b149 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -632,9 +632,9 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return -EIO;
}
if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- return vb2_qbuf(&ctx->vq_src, buf);
+ return vb2_qbuf(&ctx->vq_src, NULL, buf);
else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return vb2_qbuf(&ctx->vq_dst, buf);
+ return vb2_qbuf(&ctx->vq_dst, NULL, buf);
return -EINVAL;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 7037d48bdc2c..8fcf627dedfb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1621,9 +1621,9 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
mfc_err("Call on QBUF after EOS command\n");
return -EIO;
}
- return vb2_qbuf(&ctx->vq_src, buf);
+ return vb2_qbuf(&ctx->vq_src, NULL, buf);
} else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- return vb2_qbuf(&ctx->vq_dst, buf);
+ return vb2_qbuf(&ctx->vq_dst, NULL, buf);
}
return -EINVAL;
}
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 0a70fb67c401..21034339cdcb 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -394,7 +394,7 @@ static int soc_camera_qbuf(struct file *file, void *priv,
if (icd->streamer != file)
return -EBUSY;
- return vb2_qbuf(&icd->vb2_vidq, p);
+ return vb2_qbuf(&icd->vb2_vidq, NULL, p);
}
static int soc_camera_dqbuf(struct file *file, void *priv,
@@ -430,7 +430,7 @@ static int soc_camera_prepare_buf(struct file *file, void *priv,
{
struct soc_camera_device *icd = file->private_data;
- return vb2_prepare_buf(&icd->vb2_vidq, b);
+ return vb2_prepare_buf(&icd->vb2_vidq, NULL, b);
}
static int soc_camera_expbuf(struct file *file, void *priv,
@@ -1181,7 +1181,8 @@ static int soc_camera_probe_finish(struct soc_camera_device *icd)
v4l2_subdev_call(sd, video, g_tvnorms, &icd->vdev->tvnorms);
- ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, NULL);
+ ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler,
+ NULL, true);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 60c522ee2e03..af150a0395df 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -3,7 +3,8 @@
*
* This is a virtual device driver for testing mem-to-mem videobuf framework.
* It simulates a device that uses memory buffers for both source and
- * destination, processes the data and issues an "irq" (simulated by a timer).
+ * destination, processes the data and issues an "irq" (simulated by a delayed
+ * workqueue).
* The device is capable of multi-instance, multi-buffer-per-transaction
* operation (via the mem2mem framework).
*
@@ -19,7 +20,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
-#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -148,7 +148,7 @@ struct vim2m_dev {
struct mutex dev_mutex;
spinlock_t irqlock;
- struct timer_list timer;
+ struct delayed_work work_run;
struct v4l2_m2m_dev *m2m_dev;
};
@@ -336,12 +336,6 @@ static int device_process(struct vim2m_ctx *ctx,
return 0;
}
-static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
-{
- dprintk(dev, "Scheduling a simulated irq\n");
- mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
-}
-
/*
* mem2mem callbacks
*/
@@ -385,15 +379,24 @@ static void device_run(void *priv)
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ /* Apply request controls if any */
+ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
+ &ctx->hdl);
+
device_process(ctx, src_buf, dst_buf);
- /* Run a timer, which simulates a hardware irq */
- schedule_irq(dev, ctx->transtime);
+ /* Complete request controls if any */
+ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
+ &ctx->hdl);
+
+ /* Run delayed work, which simulates a hardware irq */
+ schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime));
}
-static void device_isr(struct timer_list *t)
+static void device_work(struct work_struct *w)
{
- struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
+ struct vim2m_dev *vim2m_dev =
+ container_of(w, struct vim2m_dev, work_run.work);
struct vim2m_ctx *curr_ctx;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
@@ -805,6 +808,7 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
+ flush_scheduled_work();
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
@@ -812,12 +816,21 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (vbuf == NULL)
return;
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->hdl);
spin_lock_irqsave(&ctx->dev->irqlock, flags);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
}
}
+static void vim2m_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
+}
+
static const struct vb2_ops vim2m_qops = {
.queue_setup = vim2m_queue_setup,
.buf_prepare = vim2m_buf_prepare,
@@ -826,6 +839,7 @@ static const struct vb2_ops vim2m_qops = {
.stop_streaming = vim2m_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
+ .buf_request_complete = vim2m_buf_request_complete,
};
static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
@@ -841,6 +855,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
src_vq->mem_ops = &vb2_vmalloc_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->supports_requests = true;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -992,6 +1007,11 @@ static const struct v4l2_m2m_ops m2m_ops = {
.job_abort = job_abort,
};
+static const struct media_device_ops m2m_media_ops = {
+ .req_validate = vb2_request_validate,
+ .req_queue = vb2_m2m_request_queue,
+};
+
static int vim2m_probe(struct platform_device *pdev)
{
struct vim2m_dev *dev;
@@ -1015,6 +1035,7 @@ static int vim2m_probe(struct platform_device *pdev)
vfd = &dev->vfd;
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
+ INIT_DELAYED_WORK(&dev->work_run, device_work);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
@@ -1026,7 +1047,6 @@ static int vim2m_probe(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
- timer_setup(&dev->timer, device_isr, 0);
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
@@ -1040,6 +1060,7 @@ static int vim2m_probe(struct platform_device *pdev)
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
media_device_init(&dev->mdev);
+ dev->mdev.ops = &m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
ret = v4l2_m2m_register_media_controller(dev->m2m_dev,
@@ -1083,7 +1104,6 @@ static int vim2m_remove(struct platform_device *pdev)
media_device_cleanup(&dev->mdev);
#endif
v4l2_m2m_release(dev->m2m_dev);
- del_timer_sync(&dev->timer);
video_unregister_device(&dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 06961e7d8036..626e2b24a403 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -627,6 +627,13 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev)
kfree(dev);
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+static const struct media_device_ops vivid_media_ops = {
+ .req_validate = vb2_request_validate,
+ .req_queue = vb2_request_queue,
+};
+#endif
+
static int vivid_create_instance(struct platform_device *pdev, int inst)
{
static const struct v4l2_dv_timings def_dv_timings =
@@ -657,6 +664,16 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->inst = inst;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->v4l2_dev.mdev = &dev->mdev;
+
+ /* Initialize media device */
+ strlcpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model));
+ dev->mdev.dev = &pdev->dev;
+ media_device_init(&dev->mdev);
+ dev->mdev.ops = &vivid_media_ops;
+#endif
+
/* register v4l2_device */
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
"%s-%03d", VIVID_MODULE_NAME, inst);
@@ -1060,6 +1077,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1080,6 +1098,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1100,6 +1119,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1120,6 +1140,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1139,6 +1160,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 8;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1174,6 +1196,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->lock = &dev->mutex;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->vid_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
#ifdef CONFIG_VIDEO_VIVID_CEC
if (in_type_counter[HDMI]) {
struct cec_adapter *adap;
@@ -1226,6 +1255,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->lock = &dev->mutex;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->vid_out_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_out_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
#ifdef CONFIG_VIDEO_VIVID_CEC
for (i = 0; i < dev->num_outputs; i++) {
struct cec_adapter *adap;
@@ -1275,6 +1311,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->tvnorms = tvnorms_cap;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->vbi_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_cap_nr[inst]);
if (ret < 0)
goto unreg_dev;
@@ -1300,6 +1343,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->tvnorms = tvnorms_out;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->vbi_out_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_out_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_out_nr[inst]);
if (ret < 0)
goto unreg_dev;
@@ -1323,6 +1373,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->lock = &dev->mutex;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->sdr_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->sdr_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
ret = video_register_device(vfd, VFL_TYPE_SDR, sdr_cap_nr[inst]);
if (ret < 0)
goto unreg_dev;
@@ -1369,12 +1426,25 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
video_device_node_name(vfd));
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+ /* Register the media device */
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ dev_err(dev->mdev.dev,
+ "media device register failed (err=%d)\n", ret);
+ goto unreg_dev;
+ }
+#endif
+
/* Now that everything is fine, let's add it to device list */
vivid_devs[inst] = dev;
return 0;
unreg_dev:
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+#endif
video_unregister_device(&dev->radio_tx_dev);
video_unregister_device(&dev->radio_rx_dev);
video_unregister_device(&dev->sdr_cap_dev);
@@ -1445,6 +1515,10 @@ static int vivid_remove(struct platform_device *pdev)
if (!dev)
continue;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+#endif
+
if (dev->has_vid_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->vid_cap_dev));
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index cd4c8230563c..1891254c8f0b 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -136,6 +136,14 @@ struct vivid_cec_work {
struct vivid_dev {
unsigned inst;
struct v4l2_device v4l2_dev;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+ struct media_pad vid_cap_pad;
+ struct media_pad vid_out_pad;
+ struct media_pad vbi_cap_pad;
+ struct media_pad vbi_out_pad;
+ struct media_pad sdr_cap_pad;
+#endif
struct v4l2_ctrl_handler ctrl_hdl_user_gen;
struct v4l2_ctrl_handler ctrl_hdl_user_vid;
struct v4l2_ctrl_handler ctrl_hdl_user_aud;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 999aa101b150..bfffeda12f14 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -1662,59 +1662,59 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true);
if (dev->has_vid_cap) {
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL, false);
if (hdl_vid_cap->error)
return hdl_vid_cap->error;
dev->vid_cap_dev.ctrl_handler = hdl_vid_cap;
}
if (dev->has_vid_out) {
- v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL);
- v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL);
- v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL, false);
if (hdl_vid_out->error)
return hdl_vid_out->error;
dev->vid_out_dev.ctrl_handler = hdl_vid_out;
}
if (dev->has_vbi_cap) {
- v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL);
- v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL);
- v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL, false);
if (hdl_vbi_cap->error)
return hdl_vbi_cap->error;
dev->vbi_cap_dev.ctrl_handler = hdl_vbi_cap;
}
if (dev->has_vbi_out) {
- v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL, false);
if (hdl_vbi_out->error)
return hdl_vbi_out->error;
dev->vbi_out_dev.ctrl_handler = hdl_vbi_out;
}
if (dev->has_radio_rx) {
- v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL);
+ v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL, false);
if (hdl_radio_rx->error)
return hdl_radio_rx->error;
dev->radio_rx_dev.ctrl_handler = hdl_radio_rx;
}
if (dev->has_radio_tx) {
- v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL);
+ v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL, false);
if (hdl_radio_tx->error)
return hdl_radio_tx->error;
dev->radio_tx_dev.ctrl_handler = hdl_radio_tx;
}
if (dev->has_sdr_cap) {
- v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL, false);
if (hdl_sdr_cap->error)
return hdl_sdr_cap->error;
dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap;
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index f06003bb8e42..eebfff2126be 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -703,6 +703,8 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
goto update_mv;
if (vid_cap_buf) {
+ v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_cap);
/* Fill buffer */
vivid_fillbuff(dev, vid_cap_buf);
dprintk(dev, 1, "filled buffer %d\n",
@@ -713,6 +715,8 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
vivid_overlay(dev, vid_cap_buf);
+ v4l2_ctrl_request_complete(vid_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_cap);
vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vid_cap buffer %d done\n",
@@ -720,10 +724,14 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
}
if (vbi_cap_buf) {
+ v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_cap);
if (dev->stream_sliced_vbi_cap)
vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
else
vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
+ v4l2_ctrl_request_complete(vbi_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_cap);
vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vbi_cap %d done\n",
@@ -891,6 +899,8 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vid_cap_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_cap);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vid_cap buffer %d done\n",
buf->vb.vb2_buf.index);
@@ -904,6 +914,8 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vbi_cap_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_cap);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vbi_cap buffer %d done\n",
buf->vb.vb2_buf.index);
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index 9981e7548019..5a14810eeb69 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -75,6 +75,10 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
return;
if (vid_out_buf) {
+ v4l2_ctrl_request_setup(vid_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_out);
+ v4l2_ctrl_request_complete(vid_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_out);
vid_out_buf->vb.sequence = dev->vid_out_seq_count;
if (dev->field_out == V4L2_FIELD_ALTERNATE) {
/*
@@ -92,6 +96,10 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
}
if (vbi_out_buf) {
+ v4l2_ctrl_request_setup(vbi_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_out);
+ v4l2_ctrl_request_complete(vbi_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_out);
if (dev->stream_sliced_vbi_out)
vivid_sliced_vbi_out_process(dev, vbi_out_buf);
@@ -262,6 +270,8 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vid_out_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_out);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vid_out buffer %d done\n",
buf->vb.vb2_buf.index);
@@ -275,6 +285,8 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vbi_out_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_out);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vbi_out buffer %d done\n",
buf->vb.vb2_buf.index);
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 200b789a3f21..dcdc80e272c2 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -102,6 +102,10 @@ static void vivid_thread_sdr_cap_tick(struct vivid_dev *dev)
if (sdr_cap_buf) {
sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count;
+ v4l2_ctrl_request_setup(sdr_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_sdr_cap);
+ v4l2_ctrl_request_complete(sdr_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_sdr_cap);
vivid_sdr_cap_process(dev, sdr_cap_buf);
sdr_cap_buf->vb.vb2_buf.timestamp =
ktime_get_ns() + dev->time_wrap_offset;
@@ -272,6 +276,8 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_sdr_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -293,6 +299,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
buf = list_entry(dev->sdr_cap_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_sdr_cap);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
@@ -303,12 +311,20 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
mutex_lock(&dev->mutex);
}
+static void sdr_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_sdr_cap);
+}
+
const struct vb2_ops vivid_sdr_cap_qops = {
.queue_setup = sdr_cap_queue_setup,
.buf_prepare = sdr_cap_buf_prepare,
.buf_queue = sdr_cap_buf_queue,
.start_streaming = sdr_cap_start_streaming,
.stop_streaming = sdr_cap_stop_streaming,
+ .buf_request_complete = sdr_cap_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index 92a852955173..903cebeb5ce5 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -204,6 +204,8 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -220,12 +222,20 @@ static void vbi_cap_stop_streaming(struct vb2_queue *vq)
vivid_stop_generating_vid_cap(dev, &dev->vbi_cap_streaming);
}
+static void vbi_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_cap);
+}
+
const struct vb2_ops vivid_vbi_cap_qops = {
.queue_setup = vbi_cap_queue_setup,
.buf_prepare = vbi_cap_buf_prepare,
.buf_queue = vbi_cap_buf_queue,
.start_streaming = vbi_cap_start_streaming,
.stop_streaming = vbi_cap_stop_streaming,
+ .buf_request_complete = vbi_cap_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 69486c130a7e..9357c07e30d6 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -96,6 +96,8 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_out);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -115,12 +117,20 @@ static void vbi_out_stop_streaming(struct vb2_queue *vq)
dev->vbi_out_have_cc[1] = false;
}
+static void vbi_out_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_out);
+}
+
const struct vb2_ops vivid_vbi_out_qops = {
.queue_setup = vbi_out_queue_setup,
.buf_prepare = vbi_out_buf_prepare,
.buf_queue = vbi_out_buf_queue,
.start_streaming = vbi_out_start_streaming,
.stop_streaming = vbi_out_stop_streaming,
+ .buf_request_complete = vbi_out_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 6cf910a60ecf..9c8e8be81ce3 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -243,6 +243,8 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -260,6 +262,13 @@ static void vid_cap_stop_streaming(struct vb2_queue *vq)
dev->can_loop_video = false;
}
+static void vid_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap);
+}
+
const struct vb2_ops vivid_vid_cap_qops = {
.queue_setup = vid_cap_queue_setup,
.buf_prepare = vid_cap_buf_prepare,
@@ -267,6 +276,7 @@ const struct vb2_ops vivid_vid_cap_qops = {
.buf_queue = vid_cap_buf_queue,
.start_streaming = vid_cap_start_streaming,
.stop_streaming = vid_cap_stop_streaming,
+ .buf_request_complete = vid_cap_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 50248e2176a0..aaf13f03d5d4 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -162,6 +162,8 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_out);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -179,12 +181,20 @@ static void vid_out_stop_streaming(struct vb2_queue *vq)
dev->can_loop_video = false;
}
+static void vid_out_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_out);
+}
+
const struct vb2_ops vivid_vid_out_qops = {
.queue_setup = vid_out_queue_setup,
.buf_prepare = vid_out_buf_prepare,
.buf_queue = vid_out_buf_queue,
.start_streaming = vid_out_start_streaming,
.stop_streaming = vid_out_stop_streaming,
+ .buf_request_complete = vid_out_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index aa7f3c307b22..3f401fbd0ecc 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -949,7 +949,7 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
buf->length = cam->frame_size;
buf->reserved2 = 0;
- buf->reserved = 0;
+ buf->request_fd = 0;
memset(&buf->timecode, 0, sizeof(buf->timecode));
DBG("DQBUF #%d status:%d seq:%d length:%d\n", buf->index,
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index f700ec35b7f3..2641e23d946b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1992,7 +1992,7 @@ int cx231xx_417_register(struct cx231xx *dev)
dev->mpeg_ctrl_handler.ops = &cx231xx_ops;
if (dev->sd_cx25840)
v4l2_ctrl_add_handler(&dev->mpeg_ctrl_handler.hdl,
- dev->sd_cx25840->ctrl_handler, NULL);
+ dev->sd_cx25840->ctrl_handler, NULL, false);
if (dev->mpeg_ctrl_handler.hdl.error) {
err = dev->mpeg_ctrl_handler.hdl.error;
dprintk(3, "%s: can't add cx25840 controls\n", dev->name);
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index f2f034c5cd62..c990f70c0ea6 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -2204,10 +2204,10 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
if (dev->sd_cx25840) {
v4l2_ctrl_add_handler(&dev->ctrl_handler,
- dev->sd_cx25840->ctrl_handler, NULL);
+ dev->sd_cx25840->ctrl_handler, NULL, true);
v4l2_ctrl_add_handler(&dev->radio_ctrl_handler,
dev->sd_cx25840->ctrl_handler,
- v4l2_ctrl_radio_filter);
+ v4l2_ctrl_radio_filter, true);
}
if (dev->ctrl_handler.error)
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 0fc4076c6d16..10b5b95bee34 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -1278,7 +1278,7 @@ static int msi2500_probe(struct usb_interface *intf,
}
/* currently all controls are from subdev */
- v4l2_ctrl_add_handler(&dev->hdl, sd->ctrl_handler, NULL);
+ v4l2_ctrl_add_handler(&dev->hdl, sd->ctrl_handler, NULL, true);
dev->v4l2_dev.ctrl_handler = &dev->hdl;
dev->vdev.v4l2_dev = &dev->v4l2_dev;
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 6c992f197255..ee7b5318b351 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1627,7 +1627,7 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
v4l2_ctrl_add_handler(&dev->ctrl_handler,
- &dev->radio_ctrl_handler, NULL);
+ &dev->radio_ctrl_handler, NULL, false);
if (dev->radio_ctrl_handler.error)
ret = dev->radio_ctrl_handler.error;
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index fecccb5e7628..8964e16f2b22 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -300,12 +300,13 @@ int uvc_create_buffers(struct uvc_video_queue *queue,
return ret;
}
-int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
+int uvc_queue_buffer(struct uvc_video_queue *queue,
+ struct media_device *mdev, struct v4l2_buffer *buf)
{
int ret;
mutex_lock(&queue->mutex);
- ret = vb2_qbuf(&queue->queue, buf);
+ ret = vb2_qbuf(&queue->queue, mdev, buf);
mutex_unlock(&queue->mutex);
return ret;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index b26182ce7462..84be596d3269 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -751,7 +751,8 @@ static int uvc_ioctl_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
if (!uvc_has_privileges(handle))
return -EBUSY;
- return uvc_queue_buffer(&stream->queue, buf);
+ return uvc_queue_buffer(&stream->queue,
+ stream->vdev.v4l2_dev->mdev, buf);
}
static int uvc_ioctl_expbuf(struct file *file, void *fh,
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 591eae3d0b0d..c0cbd833d0a4 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -700,6 +700,7 @@ int uvc_query_buffer(struct uvc_video_queue *queue,
int uvc_create_buffers(struct uvc_video_queue *queue,
struct v4l2_create_buffers *v4l2_cb);
int uvc_queue_buffer(struct uvc_video_queue *queue,
+ struct media_device *mdev,
struct v4l2_buffer *v4l2_buf);
int uvc_export_buffer(struct uvc_video_queue *queue,
struct v4l2_exportbuffer *exp);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 6481212fda77..f4325329fbd6 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -244,6 +244,7 @@ struct v4l2_format32 {
* return: number of created buffers
* @memory: buffer memory type
* @format: frame format, for which buffers are requested
+ * @capabilities: capabilities of this buffer type.
* @reserved: future extensions
*/
struct v4l2_create_buffers32 {
@@ -251,7 +252,8 @@ struct v4l2_create_buffers32 {
__u32 count;
__u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
- __u32 reserved[8];
+ __u32 capabilities;
+ __u32 reserved[7];
};
static int __bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size)
@@ -411,6 +413,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers __user *p64,
if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
copy_in_user(p32, p64,
offsetof(struct v4l2_create_buffers32, format)) ||
+ assign_in_user(&p32->capabilities, &p64->capabilities) ||
copy_in_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
return -EFAULT;
return __put_v4l2_format32(&p64->format, &p32->format);
@@ -482,7 +485,7 @@ struct v4l2_buffer32 {
} m;
__u32 length;
__u32 reserved2;
- __u32 reserved;
+ __s32 request_fd;
};
static int get_v4l2_plane32(struct v4l2_plane __user *p64,
@@ -581,6 +584,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
{
u32 type;
u32 length;
+ s32 request_fd;
enum v4l2_memory memory;
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
@@ -595,7 +599,9 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
get_user(memory, &p32->memory) ||
put_user(memory, &p64->memory) ||
get_user(length, &p32->length) ||
- put_user(length, &p64->length))
+ put_user(length, &p64->length) ||
+ get_user(request_fd, &p32->request_fd) ||
+ put_user(request_fd, &p64->request_fd))
return -EFAULT;
if (V4L2_TYPE_IS_OUTPUT(type))
@@ -699,7 +705,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer __user *p64,
copy_in_user(&p32->timecode, &p64->timecode, sizeof(p64->timecode)) ||
assign_in_user(&p32->sequence, &p64->sequence) ||
assign_in_user(&p32->reserved2, &p64->reserved2) ||
- assign_in_user(&p32->reserved, &p64->reserved) ||
+ assign_in_user(&p32->request_fd, &p64->request_fd) ||
get_user(length, &p64->length) ||
put_user(length, &p32->length))
return -EFAULT;
@@ -834,7 +840,8 @@ struct v4l2_ext_controls32 {
__u32 which;
__u32 count;
__u32 error_idx;
- __u32 reserved[2];
+ __s32 request_fd;
+ __u32 reserved[1];
compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
};
@@ -909,6 +916,7 @@ static int get_v4l2_ext_controls32(struct file *file,
get_user(count, &p32->count) ||
put_user(count, &p64->count) ||
assign_in_user(&p64->error_idx, &p32->error_idx) ||
+ assign_in_user(&p64->request_fd, &p32->request_fd) ||
copy_in_user(p64->reserved, p32->reserved, sizeof(p64->reserved)))
return -EFAULT;
@@ -974,6 +982,7 @@ static int put_v4l2_ext_controls32(struct file *file,
get_user(count, &p64->count) ||
put_user(count, &p32->count) ||
assign_in_user(&p32->error_idx, &p64->error_idx) ||
+ assign_in_user(&p32->request_fd, &p64->request_fd) ||
copy_in_user(p32->reserved, p64->reserved, sizeof(p32->reserved)) ||
get_user(kcontrols, &p64->controls))
return -EFAULT;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 4c0ecf29d278..6e37950292cd 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -37,8 +37,8 @@
struct v4l2_ctrl_helper {
/* Pointer to the control reference of the master control */
struct v4l2_ctrl_ref *mref;
- /* The control corresponding to the v4l2_ext_control ID field. */
- struct v4l2_ctrl *ctrl;
+ /* The control ref corresponding to the v4l2_ext_control ID field. */
+ struct v4l2_ctrl_ref *ref;
/* v4l2_ext_control index of the next control belonging to the
same cluster, or 0 if there isn't any. */
u32 next;
@@ -844,6 +844,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: return "MPEG-2 Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: return "MPEG-2 Quantization Matrices";
/* VPX controls */
case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
@@ -1292,6 +1294,12 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_RDS_TX_ALT_FREQS:
*type = V4L2_CTRL_TYPE_U32;
break;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION:
+ *type = V4L2_CTRL_TYPE_MPEG2_QUANTIZATION;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1550,6 +1558,7 @@ static void std_log(const struct v4l2_ctrl *ctrl)
static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr)
{
+ struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
size_t len;
u64 offset;
s64 val;
@@ -1612,6 +1621,54 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
return -ERANGE;
return 0;
+ case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
+ p_mpeg2_slice_params = ptr.p;
+
+ switch (p_mpeg2_slice_params->sequence.chroma_format) {
+ case 1: /* 4:2:0 */
+ case 2: /* 4:2:2 */
+ case 3: /* 4:4:4 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.intra_dc_precision) {
+ case 0: /* 8 bits */
+ case 1: /* 9 bits */
+ case 11: /* 11 bits */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.picture_structure) {
+ case 1: /* interlaced top field */
+ case 2: /* interlaced bottom field */
+ case 3: /* progressive */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.picture_coding_type) {
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_I:
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (p_mpeg2_slice_params->backward_ref_index >= VIDEO_MAX_FRAME ||
+ p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
+ return -EINVAL;
+
+ return 0;
+
+ case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
+ return 0;
+
default:
return -EINVAL;
}
@@ -1668,6 +1725,13 @@ static int new_to_user(struct v4l2_ext_control *c,
return ptr_to_user(c, ctrl, ctrl->p_new);
}
+/* Helper function: copy the request value back to the caller */
+static int req_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl_ref *ref)
+{
+ return ptr_to_user(c, ref->ctrl, ref->p_req);
+}
+
/* Helper function: copy the initial control value back to the caller */
static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
@@ -1787,6 +1851,26 @@ static void cur_to_new(struct v4l2_ctrl *ctrl)
ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new);
}
+/* Copy the new value to the request value */
+static void new_to_req(struct v4l2_ctrl_ref *ref)
+{
+ if (!ref)
+ return;
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
+ ref->req = ref;
+}
+
+/* Copy the request value to the new value */
+static void req_to_new(struct v4l2_ctrl_ref *ref)
+{
+ if (!ref)
+ return;
+ if (ref->req)
+ ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
+ else
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
+}
+
/* Return non-zero if one or more of the controls in the cluster has a new
value that differs from the current value. */
static int cluster_changed(struct v4l2_ctrl *master)
@@ -1896,11 +1980,15 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
lockdep_set_class_and_name(hdl->lock, key, name);
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
+ INIT_LIST_HEAD(&hdl->requests);
+ INIT_LIST_HEAD(&hdl->requests_queued);
+ hdl->request_is_queued = false;
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
hdl->buckets = kvmalloc_array(hdl->nr_of_buckets,
sizeof(hdl->buckets[0]),
GFP_KERNEL | __GFP_ZERO);
hdl->error = hdl->buckets ? 0 : -ENOMEM;
+ media_request_object_init(&hdl->req_obj);
return hdl->error;
}
EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
@@ -1915,6 +2003,14 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
if (hdl == NULL || hdl->buckets == NULL)
return;
+ if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
+ struct v4l2_ctrl_handler *req, *next_req;
+
+ list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
+ media_request_object_unbind(&req->req_obj);
+ media_request_object_put(&req->req_obj);
+ }
+ }
mutex_lock(hdl->lock);
/* Free all nodes */
list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
@@ -2016,13 +2112,19 @@ EXPORT_SYMBOL(v4l2_ctrl_find);
/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl *ctrl)
+ struct v4l2_ctrl *ctrl,
+ struct v4l2_ctrl_ref **ctrl_ref,
+ bool from_other_dev, bool allocate_req)
{
struct v4l2_ctrl_ref *ref;
struct v4l2_ctrl_ref *new_ref;
u32 id = ctrl->id;
u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1;
int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
+ unsigned int size_extra_req = 0;
+
+ if (ctrl_ref)
+ *ctrl_ref = NULL;
/*
* Automatically add the control class if it is not yet present and
@@ -2036,10 +2138,16 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
if (hdl->error)
return hdl->error;
- new_ref = kzalloc(sizeof(*new_ref), GFP_KERNEL);
+ if (allocate_req)
+ size_extra_req = ctrl->elems * ctrl->elem_size;
+ new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
if (!new_ref)
return handler_set_err(hdl, -ENOMEM);
new_ref->ctrl = ctrl;
+ new_ref->from_other_dev = from_other_dev;
+ if (size_extra_req)
+ new_ref->p_req.p = &new_ref[1];
+
if (ctrl->handler == hdl) {
/* By default each control starts in a cluster of its own.
new_ref->ctrl is basically a cluster array with one
@@ -2079,6 +2187,8 @@ insert_in_hash:
/* Insert the control node in the hash */
new_ref->next = hdl->buckets[bucket];
hdl->buckets[bucket] = new_ref;
+ if (ctrl_ref)
+ *ctrl_ref = new_ref;
unlock:
mutex_unlock(hdl->lock);
@@ -2133,6 +2243,12 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_U32:
elem_size = sizeof(u32);
break;
+ case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -2220,7 +2336,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
}
- if (handler_new_ref(hdl, ctrl)) {
+ if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
kvfree(ctrl);
return NULL;
}
@@ -2389,7 +2505,8 @@ EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
/* Add the controls from another handler to our own. */
int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *add,
- bool (*filter)(const struct v4l2_ctrl *ctrl))
+ bool (*filter)(const struct v4l2_ctrl *ctrl),
+ bool from_other_dev)
{
struct v4l2_ctrl_ref *ref;
int ret = 0;
@@ -2412,7 +2529,7 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
/* Filter any unwanted controls */
if (filter && !filter(ctrl))
continue;
- ret = handler_new_ref(hdl, ctrl);
+ ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false);
if (ret)
break;
}
@@ -2815,6 +2932,148 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
}
EXPORT_SYMBOL(v4l2_querymenu);
+static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_handler *from)
+{
+ struct v4l2_ctrl_ref *ref;
+ int err = 0;
+
+ if (WARN_ON(!hdl || hdl == from))
+ return -EINVAL;
+
+ if (hdl->error)
+ return hdl->error;
+
+ WARN_ON(hdl->lock != &hdl->_lock);
+
+ mutex_lock(from->lock);
+ list_for_each_entry(ref, &from->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl_ref *new_ref;
+
+ /* Skip refs inherited from other devices */
+ if (ref->from_other_dev)
+ continue;
+ /* And buttons */
+ if (ctrl->type == V4L2_CTRL_TYPE_BUTTON)
+ continue;
+ err = handler_new_ref(hdl, ctrl, &new_ref, false, true);
+ if (err)
+ break;
+ }
+ mutex_unlock(from->lock);
+ return err;
+}
+
+static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+ struct v4l2_ctrl_handler *prev_hdl = NULL;
+ struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+
+ if (list_empty(&main_hdl->requests_queued))
+ goto queue;
+
+ prev_hdl = list_last_entry(&main_hdl->requests_queued,
+ struct v4l2_ctrl_handler, requests_queued);
+ /*
+ * Note: prev_hdl and hdl must contain the same list of control
+ * references, so if any differences are detected then that is a
+ * driver bug and the WARN_ON is triggered.
+ */
+ mutex_lock(prev_hdl->lock);
+ ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
+ struct v4l2_ctrl_ref, node);
+ list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
+ if (ref_ctrl->req)
+ continue;
+ while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
+ /* Should never happen, but just in case... */
+ if (list_is_last(&ref_ctrl_prev->node,
+ &prev_hdl->ctrl_refs))
+ break;
+ ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
+ }
+ if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
+ break;
+ ref_ctrl->req = ref_ctrl_prev->req;
+ }
+ mutex_unlock(prev_hdl->lock);
+queue:
+ list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
+ hdl->request_is_queued = true;
+}
+
+static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_del_init(&hdl->requests);
+ if (hdl->request_is_queued) {
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ }
+}
+
+static void v4l2_ctrl_request_release(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(hdl);
+}
+
+static const struct media_request_object_ops req_ops = {
+ .queue = v4l2_ctrl_request_queue,
+ .unbind = v4l2_ctrl_request_unbind,
+ .release = v4l2_ctrl_request_release,
+};
+
+struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
+ struct v4l2_ctrl_handler *parent)
+{
+ struct media_request_object *obj;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING &&
+ req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return NULL;
+
+ obj = media_request_object_find(req, &req_ops, parent);
+ if (obj)
+ return container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find);
+
+struct v4l2_ctrl *
+v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return (ref && ref->req == ref) ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
+
+static int v4l2_ctrl_request_bind(struct media_request *req,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *from)
+{
+ int ret;
+
+ ret = v4l2_ctrl_request_clone(hdl, from);
+
+ if (!ret) {
+ ret = media_request_object_bind(req, &req_ops,
+ from, false, &hdl->req_obj);
+ if (!ret)
+ list_add_tail(&hdl->requests, &from->requests);
+ }
+ return ret;
+}
/* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
@@ -2876,6 +3135,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
if (cs->which &&
cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
+ cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
V4L2_CTRL_ID2WHICH(id) != cs->which)
return -EINVAL;
@@ -2886,6 +3146,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
ref = find_ref_lock(hdl, id);
if (ref == NULL)
return -EINVAL;
+ h->ref = ref;
ctrl = ref->ctrl;
if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -2908,7 +3169,6 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
}
/* Store the ref to the master control of the cluster */
h->mref = ref;
- h->ctrl = ctrl;
/* Initially set next to 0, meaning that there is no other
control in this helper array belonging to the same
cluster */
@@ -2955,15 +3215,15 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
whether there are any controls at all. */
static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
{
- if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL)
+ if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL ||
+ which == V4L2_CTRL_WHICH_REQUEST_VAL)
return 0;
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
}
-
-
/* Get extended controls. Allocates the helpers array if needed. */
-int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
@@ -2993,7 +3253,7 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
cs->error_idx = cs->count;
for (i = 0; !ret && i < cs->count; i++)
- if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
ret = -EACCES;
for (i = 0; !ret && i < cs->count; i++) {
@@ -3027,8 +3287,12 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
u32 idx = i;
do {
- ret = ctrl_to_user(cs->controls + idx,
- helpers[idx].ctrl);
+ if (helpers[idx].ref->req)
+ ret = req_to_user(cs->controls + idx,
+ helpers[idx].ref->req);
+ else
+ ret = ctrl_to_user(cs->controls + idx,
+ helpers[idx].ref->ctrl);
idx = helpers[idx].next;
} while (!ret && idx);
}
@@ -3039,6 +3303,91 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
kvfree(helpers);
return ret;
}
+
+static struct media_request_object *
+v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl,
+ struct media_request *req, bool set)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *new_hdl;
+ int ret;
+
+ if (IS_ERR(req))
+ return ERR_CAST(req);
+
+ if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
+ return ERR_PTR(-EBUSY);
+
+ obj = media_request_object_find(req, &req_ops, hdl);
+ if (obj)
+ return obj;
+ if (!set)
+ return ERR_PTR(-ENOENT);
+
+ new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL);
+ if (!new_hdl)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &new_hdl->req_obj;
+ ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8);
+ if (!ret)
+ ret = v4l2_ctrl_request_bind(req, new_hdl, hdl);
+ if (ret) {
+ kfree(new_hdl);
+
+ return ERR_PTR(ret);
+ }
+
+ media_request_object_get(obj);
+ return obj;
+}
+
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) {
+ if (!mdev || cs->request_fd < 0)
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (req->state != MEDIA_REQUEST_STATE_COMPLETE) {
+ media_request_put(req);
+ return -EACCES;
+ }
+
+ ret = media_request_lock_for_access(req);
+ if (ret) {
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, false);
+ if (IS_ERR(obj)) {
+ media_request_unlock_for_access(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ }
+
+ ret = v4l2_g_ext_ctrls_common(hdl, cs);
+
+ if (obj) {
+ media_request_unlock_for_access(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+ }
+ return ret;
+}
EXPORT_SYMBOL(v4l2_g_ext_ctrls);
/* Helper function to get a single control */
@@ -3180,7 +3529,7 @@ static int validate_ctrls(struct v4l2_ext_controls *cs,
cs->error_idx = cs->count;
for (i = 0; i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+ struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl;
union v4l2_ctrl_ptr p_new;
cs->error_idx = i;
@@ -3227,9 +3576,9 @@ static void update_from_auto_cluster(struct v4l2_ctrl *master)
}
/* Try or try-and-set controls */
-static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- bool set)
+static int try_set_ext_ctrls_common(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs, bool set)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
@@ -3292,7 +3641,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
do {
/* Check if the auto control is part of the
list, and remember the new value. */
- if (helpers[tmp_idx].ctrl == master)
+ if (helpers[tmp_idx].ref->ctrl == master)
new_auto_val = cs->controls[tmp_idx].value;
tmp_idx = helpers[tmp_idx].next;
} while (tmp_idx);
@@ -3305,7 +3654,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
/* Copy the new caller-supplied control values.
user_to_new() sets 'is_new' to 1. */
do {
- struct v4l2_ctrl *ctrl = helpers[idx].ctrl;
+ struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl;
ret = user_to_new(cs->controls + idx, ctrl);
if (!ret && ctrl->is_ptr)
@@ -3314,14 +3663,23 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
} while (!ret && idx);
if (!ret)
- ret = try_or_set_cluster(fh, master, set, 0);
+ ret = try_or_set_cluster(fh, master,
+ !hdl->req_obj.req && set, 0);
+ if (!ret && hdl->req_obj.req && set) {
+ for (j = 0; j < master->ncontrols; j++) {
+ struct v4l2_ctrl_ref *ref =
+ find_ref(hdl, master->cluster[j]->id);
+
+ new_to_req(ref);
+ }
+ }
/* Copy the new values back to userspace. */
if (!ret) {
idx = i;
do {
ret = new_to_user(cs->controls + idx,
- helpers[idx].ctrl);
+ helpers[idx].ref->ctrl);
idx = helpers[idx].next;
} while (!ret && idx);
}
@@ -3333,16 +3691,60 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
return ret;
}
-int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+static int try_set_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) {
+ if (!mdev || cs->request_fd < 0)
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = media_request_lock_for_update(req);
+ if (ret) {
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, set);
+ if (IS_ERR(obj)) {
+ media_request_unlock_for_update(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ }
+
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, set);
+
+ if (obj) {
+ media_request_unlock_for_update(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+ }
+
+ return ret;
+}
+
+int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(NULL, hdl, cs, false);
+ return try_set_ext_ctrls(NULL, hdl, mdev, cs, false);
}
EXPORT_SYMBOL(v4l2_try_ext_ctrls);
int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs)
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(fh, hdl, cs, true);
+ return try_set_ext_ctrls(fh, hdl, mdev, cs, true);
}
EXPORT_SYMBOL(v4l2_s_ext_ctrls);
@@ -3441,6 +3843,162 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+void v4l2_ctrl_request_complete(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+
+ if (!req || !main_hdl)
+ return;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj)
+ return;
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ unsigned int i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ ref->req = ref;
+
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ call_op(master, g_volatile_ctrl);
+ new_to_req(ref);
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+ if (ref->req == ref)
+ continue;
+
+ v4l2_ctrl_lock(ctrl);
+ if (ref->req)
+ ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
+ else
+ ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
+ v4l2_ctrl_unlock(ctrl);
+ }
+
+ WARN_ON(!hdl->request_is_queued);
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ media_request_object_complete(obj);
+ media_request_object_put(obj);
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_complete);
+
+void v4l2_ctrl_request_setup(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+
+ if (!req || !main_hdl)
+ return;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj)
+ return;
+ if (obj->completed) {
+ media_request_object_put(obj);
+ return;
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node)
+ ref->req_done = false;
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ bool have_new_data = false;
+ int i;
+
+ /*
+ * Skip if this control was already handled by a cluster.
+ * Skip button controls and read-only controls.
+ */
+ if (ref->req_done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ v4l2_ctrl_lock(master);
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ if (r->req && r == r->req) {
+ have_new_data = true;
+ break;
+ }
+ }
+ }
+ if (!have_new_data) {
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ req_to_new(r);
+ master->cluster[i]->is_new = 1;
+ r->req_done = true;
+ }
+ }
+ /*
+ * For volatile autoclusters that are currently in auto mode
+ * we need to discover if it will be set to manual mode.
+ * If so, then we have to copy the current volatile values
+ * first since those will become the new manual values (which
+ * may be overwritten by explicit new values from this set
+ * of controls).
+ */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ s32 new_auto_val = *master->p_new.p_s32;
+
+ /*
+ * If the new value == the manual value, then copy
+ * the current volatile values.
+ */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ try_or_set_cluster(NULL, master, true, 0);
+
+ v4l2_ctrl_unlock(master);
+ }
+
+ media_request_object_put(obj);
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_setup);
+
void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
{
if (ctrl == NULL)
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 69e775930fc4..feb749aaaa42 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -444,8 +444,22 @@ static int v4l2_release(struct inode *inode, struct file *filp)
struct video_device *vdev = video_devdata(filp);
int ret = 0;
- if (vdev->fops->release)
- ret = vdev->fops->release(filp);
+ /*
+ * We need to serialize the release() with queueing new requests.
+ * The release() may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (vdev->fops->release) {
+ if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
+ mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ ret = vdev->fops->release(filp);
+ mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ } else {
+ ret = vdev->fops->release(filp);
+ }
+ }
+
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
dprintk("%s: release\n",
video_device_node_name(vdev));
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 098562901f25..df0ac38c4050 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -178,7 +178,8 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
sd->v4l2_dev = v4l2_dev;
/* This just returns 0 if either of the two args is NULL */
- err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
+ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler,
+ NULL, true);
if (err)
goto error_module;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 7de041bae84f..c63746968fa3 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -474,13 +474,13 @@ static void v4l_print_buffer(const void *arg, bool write_only)
const struct v4l2_plane *plane;
int i;
- pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, flags=0x%08x, field=%s, sequence=%d, memory=%s",
+ pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
p->timestamp.tv_sec / 3600,
(int)(p->timestamp.tv_sec / 60) % 60,
(int)(p->timestamp.tv_sec % 60),
(long)p->timestamp.tv_usec,
p->index,
- prt_names(p->type, v4l2_type_names),
+ prt_names(p->type, v4l2_type_names), p->request_fd,
p->flags, prt_names(p->field, v4l2_field_names),
p->sequence, prt_names(p->memory, v4l2_memory_names));
@@ -590,8 +590,8 @@ static void v4l_print_ext_controls(const void *arg, bool write_only)
const struct v4l2_ext_controls *p = arg;
int i;
- pr_cont("which=0x%x, count=%d, error_idx=%d",
- p->which, p->count, p->error_idx);
+ pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d",
+ p->which, p->count, p->error_idx, p->request_fd);
for (i = 0; i < p->count; i++) {
if (!p->controls[i].size)
pr_cont(", id/val=0x%x/0x%x",
@@ -907,7 +907,7 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
__u32 i;
/* zero the reserved fields */
- c->reserved[0] = c->reserved[1] = 0;
+ c->reserved[0] = 0;
for (i = 0; i < c->count; i++)
c->controls[i].reserved2[0] = 0;
@@ -1309,6 +1309,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_H263: descr = "H.263"; break;
case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break;
case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break;
+ case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break;
case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 part 2 ES"; break;
case V4L2_PIX_FMT_XVID: descr = "Xvid"; break;
case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break;
@@ -1336,6 +1337,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
+ case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break;
default:
WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
if (fmt->description[0])
@@ -1877,7 +1879,7 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(p, memory);
+ CLEAR_AFTER_FIELD(p, capabilities);
return ops->vidioc_reqbufs(file, fh, p);
}
@@ -1918,7 +1920,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(create, format);
+ CLEAR_AFTER_FIELD(create, capabilities);
v4l_sanitize_format(&create->format);
@@ -2109,9 +2111,9 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
+ return v4l2_g_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
@@ -2128,9 +2130,9 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
+ return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
@@ -2147,9 +2149,9 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
+ return v4l2_try_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
@@ -2780,6 +2782,7 @@ static long __video_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *vfd = video_devdata(file);
+ struct mutex *req_queue_lock = NULL;
struct mutex *lock; /* ioctl serialization mutex */
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
bool write_only = false;
@@ -2799,10 +2802,27 @@ static long __video_do_ioctl(struct file *file,
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
vfh = file->private_data;
+ /*
+ * We need to serialize streamon/off with queueing new requests.
+ * These ioctls may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (v4l2_device_supports_requests(vfd->v4l2_dev) &&
+ (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) {
+ req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex;
+
+ if (mutex_lock_interruptible(req_queue_lock))
+ return -ERESTARTSYS;
+ }
+
lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg);
- if (lock && mutex_lock_interruptible(lock))
+ if (lock && mutex_lock_interruptible(lock)) {
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
return -ERESTARTSYS;
+ }
if (!video_is_registered(vfd)) {
ret = -ENODEV;
@@ -2861,6 +2881,8 @@ done:
unlock:
if (lock)
mutex_unlock(lock);
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
return ret;
}
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index ce9bd1b91210..d7806db222d8 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -387,7 +387,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (m2m_dev->m2m_ops->job_abort)
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
- dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
+ dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
wait_event(m2m_ctx->finished,
!(m2m_ctx->job_flags & TRANS_RUNNING));
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
@@ -473,12 +473,19 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
+ struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_qbuf(vq, buf);
- if (!ret)
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
+ dprintk("%s: requests cannot be used with capture buffers\n",
+ __func__);
+ return -EPERM;
+ }
+ ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
+ if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
v4l2_m2m_try_schedule(m2m_ctx);
return ret;
@@ -498,15 +505,11 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
+ struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
- int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_prepare_buf(vq, buf);
- if (!ret)
- v4l2_m2m_try_schedule(m2m_ctx);
-
- return ret;
+ return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
@@ -950,6 +953,52 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+void vb2_m2m_request_queue(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+ struct v4l2_m2m_ctx *m2m_ctx = NULL;
+
+ /*
+ * Queue all objects. Note that buffer objects are at the end of the
+ * objects list, after all other object types. Once buffer objects
+ * are queued, the driver might delete them immediately (if the driver
+ * processes the buffer at once), so we have to use
+ * list_for_each_entry_safe() to handle the case where the object we
+ * queue is deleted.
+ */
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
+ struct v4l2_m2m_ctx *m2m_ctx_obj;
+ struct vb2_buffer *vb;
+
+ if (!obj->ops->queue)
+ continue;
+
+ if (vb2_request_object_is_buffer(obj)) {
+ /* Sanity checks */
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
+ m2m_ctx_obj = container_of(vb->vb2_queue,
+ struct v4l2_m2m_ctx,
+ out_q_ctx.q);
+ WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
+ m2m_ctx = m2m_ctx_obj;
+ }
+
+ /*
+ * The buffer we queue here can in theory be immediately
+ * unbound, hence the use of list_for_each_entry_safe()
+ * above and why we call the queue op last.
+ */
+ obj->ops->queue(obj);
+ }
+
+ WARN_ON(!m2m_ctx);
+
+ if (m2m_ctx)
+ v4l2_m2m_try_schedule(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(vb2_m2m_request_queue);
+
/* Videobuf2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 792f41dffe23..f5f0d71ec745 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -222,17 +222,20 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_G_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
- return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler,
+ sd->v4l2_dev->mdev, arg);
case VIDIOC_S_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
- return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
+ sd->v4l2_dev->mdev, arg);
case VIDIOC_TRY_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
- return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler,
+ sd->v4l2_dev->mdev, arg);
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
diff --git a/drivers/mfd/cros_ec_dev.h b/drivers/mfd/cros_ec_dev.h
index 45e9453608c5..978d836a0248 100644
--- a/drivers/mfd/cros_ec_dev.h
+++ b/drivers/mfd/cros_ec_dev.h
@@ -26,12 +26,13 @@
#define CROS_EC_DEV_VERSION "1.0.0"
-/*
- * @offset: within EC_LPC_ADDR_MEMMAP region
- * @bytes: number of bytes to read. zero means "read a string" (including '\0')
- * (at most only EC_MEMMAP_SIZE bytes can be read)
- * @buffer: where to store the result
- * ioctl returns the number of bytes read, negative on error
+/**
+ * struct cros_ec_readmem - Struct used to read mapped memory.
+ * @offset: Within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: Number of bytes to read. Zero means "read a string" (including '\0')
+ * At most only EC_MEMMAP_SIZE bytes can be read.
+ * @buffer: Where to store the result. The ioctl returns the number of bytes
+ * read or negative on error.
*/
struct cros_ec_readmem {
uint32_t offset;
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 3370a4138e94..951c984de61a 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -8,7 +8,9 @@ lkdtm-$(CONFIG_LKDTM) += perms.o
lkdtm-$(CONFIG_LKDTM) += refcount.o
lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += usercopy.o
+lkdtm-$(CONFIG_LKDTM) += stackleak.o
+KASAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_rodata.o := n
OBJCOPYFLAGS :=
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 5a755590d3dc..2837dc77478e 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -184,6 +184,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_STACK_BEYOND),
CRASHTYPE(USERCOPY_KERNEL),
CRASHTYPE(USERCOPY_KERNEL_DS),
+ CRASHTYPE(STACKLEAK_ERASING),
};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 07db641d71d0..3c6fd327e166 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -84,4 +84,7 @@ void lkdtm_USERCOPY_STACK_BEYOND(void);
void lkdtm_USERCOPY_KERNEL(void);
void lkdtm_USERCOPY_KERNEL_DS(void);
+/* lkdtm_stackleak.c */
+void lkdtm_STACKLEAK_ERASING(void);
+
#endif
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
new file mode 100644
index 000000000000..d5a084475abc
--- /dev/null
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code tests that the current task stack is properly erased (filled
+ * with STACKLEAK_POISON).
+ *
+ * Authors:
+ * Alexander Popov <alex.popov@linux.com>
+ * Tycho Andersen <tycho@tycho.ws>
+ */
+
+#include "lkdtm.h"
+#include <linux/stackleak.h>
+
+void lkdtm_STACKLEAK_ERASING(void)
+{
+ unsigned long *sp, left, found, i;
+ const unsigned long check_depth =
+ STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+
+ /*
+ * For the details about the alignment of the poison values, see
+ * the comment in stackleak_track_stack().
+ */
+ sp = PTR_ALIGN(&i, sizeof(unsigned long));
+
+ left = ((unsigned long)sp & (THREAD_SIZE - 1)) / sizeof(unsigned long);
+ sp--;
+
+ /*
+ * One 'long int' at the bottom of the thread stack is reserved
+ * and not poisoned.
+ */
+ if (left > 1) {
+ left--;
+ } else {
+ pr_err("FAIL: not enough stack space for the test\n");
+ return;
+ }
+
+ pr_info("checking unused part of the thread stack (%lu bytes)...\n",
+ left * sizeof(unsigned long));
+
+ /*
+ * Search for 'check_depth' poison values in a row (just like
+ * stackleak_erase() does).
+ */
+ for (i = 0, found = 0; i < left && found <= check_depth; i++) {
+ if (*(sp - i) == STACKLEAK_POISON)
+ found++;
+ else
+ found = 0;
+ }
+
+ if (found <= check_depth) {
+ pr_err("FAIL: thread stack is not erased (checked %lu bytes)\n",
+ i * sizeof(unsigned long));
+ return;
+ }
+
+ pr_info("first %lu bytes are unpoisoned\n",
+ (i - found) * sizeof(unsigned long));
+
+ /* The rest of thread stack should be erased */
+ for (; i < left; i++) {
+ if (*(sp - i) != STACKLEAK_POISON) {
+ pr_err("FAIL: thread stack is NOT properly erased\n");
+ return;
+ }
+ }
+
+ pr_info("OK: the rest of the thread stack is properly erased\n");
+ return;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index bd52f29b4a4e..264f4ed8eef2 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -3030,7 +3030,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
- iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size);
+ iov_iter_kvec(&from, WRITE, &v, 1, buf_size);
qp_lock(qpair);
@@ -3074,7 +3074,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
- iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
+ iov_iter_kvec(&to, READ, &v, 1, buf_size);
qp_lock(qpair);
@@ -3119,7 +3119,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
- iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
+ iov_iter_kvec(&to, READ, &v, 1, buf_size);
qp_lock(qpair);
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index fc15ec58230a..0d33cf0842ad 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -25,7 +25,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <uapi/linux/magic.h>
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index a07e24970be4..11c5bad95226 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -33,7 +33,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 38fa60ddaf2e..28510e33924f 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -38,7 +38,7 @@
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/io.h>
#include "arcdevice.h"
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 4e56aaf2b984..2c546013a980 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -34,7 +34,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 9697977b80f0..6b9ad8673218 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -638,8 +638,7 @@ static int bond_fill_info(struct sk_buff *skb,
goto nla_put_failure;
if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
- sizeof(bond->params.ad_actor_system),
- &bond->params.ad_actor_system))
+ ETH_ALEN, &bond->params.ad_actor_system))
goto nla_put_failure;
}
if (!bond_3ad_get_active_agg_info(bond, &info)) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index e82e4ca20620..055b40606dbc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -316,8 +316,8 @@ struct hnae3_ae_ops {
int (*set_loopback)(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en);
- void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
- bool en_mc_pmc);
+ int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc);
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
void (*get_pauseparam)(struct hnae3_handle *handle,
@@ -391,7 +391,7 @@ struct hnae3_ae_ops {
int vector_num,
struct hnae3_ring_chain_node *vr_chain);
- void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
+ int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
u32 (*get_fw_version)(struct hnae3_handle *handle);
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 32f3aca814e7..3f96aa30068e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -509,16 +509,18 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
h->netdev_flags = new_flags;
}
-void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
+int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
if (h->ae_algo->ops->set_promisc_mode) {
- h->ae_algo->ops->set_promisc_mode(h,
- promisc_flags & HNAE3_UPE,
- promisc_flags & HNAE3_MPE);
+ return h->ae_algo->ops->set_promisc_mode(h,
+ promisc_flags & HNAE3_UPE,
+ promisc_flags & HNAE3_MPE);
}
+
+ return 0;
}
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
@@ -1494,18 +1496,22 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
return ret;
}
-static void hns3_restore_vlan(struct net_device *netdev)
+static int hns3_restore_vlan(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret = 0;
u16 vid;
- int ret;
for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
- if (ret)
- netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
- vid, ret);
+ if (ret) {
+ netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
+ vid, ret);
+ return ret;
+ }
}
+
+ return ret;
}
static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
@@ -2727,7 +2733,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
GFP_KERNEL);
if (!chain)
- return -ENOMEM;
+ goto err_free_chain;
cur_chain->next = chain;
chain->tqp_index = tx_ring->tqp->tqp_index;
@@ -2757,7 +2763,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
while (rx_ring) {
chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
if (!chain)
- return -ENOMEM;
+ goto err_free_chain;
cur_chain->next = chain;
chain->tqp_index = rx_ring->tqp->tqp_index;
@@ -2772,6 +2778,16 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
}
return 0;
+
+err_free_chain:
+ cur_chain = head->next;
+ while (cur_chain) {
+ chain = cur_chain->next;
+ devm_kfree(&pdev->dev, chain);
+ cur_chain = chain;
+ }
+
+ return -ENOMEM;
}
static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
@@ -2821,7 +2837,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
int ret = 0;
- u16 i;
+ int i;
hns3_nic_set_cpumask(priv);
@@ -2868,13 +2884,19 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (ret)
- return ret;
+ goto map_ring_fail;
netif_napi_add(priv->netdev, &tqp_vector->napi,
hns3_nic_common_poll, NAPI_POLL_WEIGHT);
}
return 0;
+
+map_ring_fail:
+ while (i--)
+ netif_napi_del(&priv->tqp_vector[i].napi);
+
+ return ret;
}
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
@@ -3031,8 +3053,10 @@ static int hns3_queue_to_ring(struct hnae3_queue *tqp,
return ret;
ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
- if (ret)
+ if (ret) {
+ devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
return ret;
+ }
return 0;
}
@@ -3059,6 +3083,12 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv)
return 0;
err:
+ while (i--) {
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
+ }
+
devm_kfree(&pdev->dev, priv->ring_data);
return ret;
}
@@ -3226,9 +3256,6 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
int i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- if (h->ae_algo->ops->reset_queue)
- h->ae_algo->ops->reset_queue(h, i);
-
hns3_fini_ring(priv->ring_data[i].ring);
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
}
@@ -3236,11 +3263,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
}
/* Set mac addr if it is configured. or leave it to the AE driver */
-static void hns3_init_mac_addr(struct net_device *netdev, bool init)
+static int hns3_init_mac_addr(struct net_device *netdev, bool init)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
+ int ret = 0;
if (h->ae_algo->ops->get_mac_addr && init) {
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
@@ -3255,8 +3283,9 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init)
}
if (h->ae_algo->ops->set_mac_addr)
- h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
+ ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
+ return ret;
}
static int hns3_restore_fd_rules(struct net_device *netdev)
@@ -3469,20 +3498,29 @@ err_out:
return ret;
}
-static void hns3_recover_hw_addr(struct net_device *ndev)
+static int hns3_recover_hw_addr(struct net_device *ndev)
{
struct netdev_hw_addr_list *list;
struct netdev_hw_addr *ha, *tmp;
+ int ret = 0;
/* go through and sync uc_addr entries to the device */
list = &ndev->uc;
- list_for_each_entry_safe(ha, tmp, &list->list, list)
- hns3_nic_uc_sync(ndev, ha->addr);
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ ret = hns3_nic_uc_sync(ndev, ha->addr);
+ if (ret)
+ return ret;
+ }
/* go through and sync mc_addr entries to the device */
list = &ndev->mc;
- list_for_each_entry_safe(ha, tmp, &list->list, list)
- hns3_nic_mc_sync(ndev, ha->addr);
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ ret = hns3_nic_mc_sync(ndev, ha->addr);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
}
static void hns3_remove_hw_addr(struct net_device *netdev)
@@ -3609,7 +3647,10 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
int ret;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- h->ae_algo->ops->reset_queue(h, i);
+ ret = h->ae_algo->ops->reset_queue(h, i);
+ if (ret)
+ return ret;
+
hns3_init_ring_hw(priv->ring_data[i].ring);
/* We need to clear tx ring here because self test will
@@ -3701,18 +3742,30 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
bool vlan_filter_enable;
int ret;
- hns3_init_mac_addr(netdev, false);
- hns3_recover_hw_addr(netdev);
- hns3_update_promisc_mode(netdev, handle->netdev_flags);
+ ret = hns3_init_mac_addr(netdev, false);
+ if (ret)
+ return ret;
+
+ ret = hns3_recover_hw_addr(netdev);
+ if (ret)
+ return ret;
+
+ ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
+ if (ret)
+ return ret;
+
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
-
/* Hardware table is only clear when pf resets */
- if (!(handle->flags & HNAE3_SUPPORT_VF))
- hns3_restore_vlan(netdev);
+ if (!(handle->flags & HNAE3_SUPPORT_VF)) {
+ ret = hns3_restore_vlan(netdev);
+ return ret;
+ }
- hns3_restore_fd_rules(netdev);
+ ret = hns3_restore_fd_rules(netdev);
+ if (ret)
+ return ret;
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 71cfca132d0b..d3636d088aa3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -640,7 +640,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
-void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
+int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ac13cb2b168e..690f62ed87dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -24,15 +24,15 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring)
return ring->desc_num - used - 1;
}
-static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
+static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
{
- int u = ring->next_to_use;
- int c = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
- if (unlikely(h >= ring->desc_num))
- return 0;
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
- return u > c ? (h > c && h <= u) : (h > c || h <= u);
+ return head >= ntc || head <= ntu;
}
static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
@@ -304,6 +304,10 @@ int hclge_cmd_queue_init(struct hclge_dev *hdev)
{
int ret;
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
/* Setup the queue entries for use cmd queue */
hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
@@ -337,18 +341,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
u32 version;
int ret;
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock_bh(&hdev->hw.cmq.crq.lock);
+
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
hdev->hw.cmq.crq.next_to_clean = 0;
hdev->hw.cmq.crq.next_to_use = 0;
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
hclge_cmd_init_regs(&hdev->hw);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index dca6f2326c26..123c37e653f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -751,7 +751,7 @@ static void hclge_process_ncsi_error(struct hclge_dev *hdev,
ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
HCLGE_NCSI_INT_CLR, 0);
if (ret)
- dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n",
+ dev_err(dev, "failed(=%d) to clear NCSI interrupt status\n",
ret);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 5234b5373ed3..ffdd96020860 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2236,7 +2236,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
}
/* clear the source of interrupt if it is not cause by reset */
- if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
+ if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
hclge_clear_event_cause(hdev, event_cause, clearval);
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -2470,14 +2470,17 @@ static void hclge_reset(struct hclge_dev *hdev)
handle = &hdev->vport[0].nic;
rtnl_lock();
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
if (!hclge_reset_wait(hdev)) {
+ rtnl_lock();
hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
hclge_reset_ae_dev(hdev->ae_dev);
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
hclge_clear_reset_cause(hdev);
} else {
+ rtnl_lock();
/* schedule again to check pending resets later */
set_bit(hdev->reset_type, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
@@ -3314,8 +3317,8 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
param->vf_id = vport_id;
}
-static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
- bool en_mc_pmc)
+static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -3323,7 +3326,7 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
vport->vport_id);
- hclge_cmd_set_promisc_mode(hdev, &param);
+ return hclge_cmd_set_promisc_mode(hdev, &param);
}
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
@@ -6107,31 +6110,28 @@ static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
return tqp->index;
}
-void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
- int ret;
-
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- return;
+ int ret = 0;
queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
ret = hclge_tqp_enable(hdev, queue_id, 0, false);
if (ret) {
- dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
- return;
+ dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
+ return ret;
}
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
if (ret) {
- dev_warn(&hdev->pdev->dev,
- "Send reset tqp cmd fail, ret = %d\n", ret);
- return;
+ dev_err(&hdev->pdev->dev,
+ "Send reset tqp cmd fail, ret = %d\n", ret);
+ return ret;
}
reset_try_times = 0;
@@ -6144,16 +6144,16 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
- dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
- return;
+ dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
+ return ret;
}
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
- if (ret) {
- dev_warn(&hdev->pdev->dev,
- "Deassert the soft reset fail, ret = %d\n", ret);
- return;
- }
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Deassert the soft reset fail, ret = %d\n", ret);
+
+ return ret;
}
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index e3dfd654eca9..0d9215404269 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -778,7 +778,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
-void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 04462a347a94..f890022938d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -400,6 +400,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_warn(&hdev->pdev->dev,
+ "command queue needs re-initializing\n");
+ return;
+ }
+
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 24b1f2a0c32a..03018638f701 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -52,7 +52,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
@@ -90,7 +90,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e0a86a58342c..085edb945389 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -925,12 +925,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
return status;
}
-static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
- bool en_uc_pmc, bool en_mc_pmc)
+static int hclgevf_set_promisc_mode(struct hnae3_handle *handle,
+ bool en_uc_pmc, bool en_mc_pmc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
+ return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
@@ -1080,7 +1080,7 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1, false, NULL, 0);
}
-static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[2];
@@ -1091,10 +1091,10 @@ static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
/* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
if (ret)
- return;
+ return ret;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
- 2, true, NULL, 0);
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
+ 2, true, NULL, 0);
}
static int hclgevf_notify_client(struct hclgevf_dev *hdev,
@@ -1170,6 +1170,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
/* bring down the nic to stop any ongoing TX/RX */
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
+
/* check if VF could successfully fetch the hardware reset completion
* status from the hardware
*/
@@ -1181,12 +1183,15 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
ret);
dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
+ rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
rtnl_unlock();
return ret;
}
+ rtnl_lock();
+
/* now, re-initialize the nic client and ae device*/
ret = hclgevf_reset_stack(hdev);
if (ret)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index 967c993d5303..bbf9bdd0ee3e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -532,7 +532,7 @@ void hinic_task_set_inner_l3(struct hinic_sq_task *task,
}
void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
- enum hinic_l4_offload_type l4_type,
+ enum hinic_l4_tunnel_type l4_type,
u32 tunnel_len)
{
task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index a0dc63a4bfc7..038522e202b6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -160,7 +160,7 @@ void hinic_task_set_inner_l3(struct hinic_sq_task *task,
u32 network_len);
void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
- enum hinic_l4_offload_type l4_type,
+ enum hinic_l4_tunnel_type l4_type,
u32 tunnel_len);
void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index fd3373d82a9e..59e1bc0f609e 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -200,6 +200,15 @@ config IXGBE_DCB
If unsure, say N.
+config IXGBE_IPSEC
+ bool "IPSec XFRM cryptography-offload acceleration"
+ depends on IXGBE
+ depends on XFRM_OFFLOAD
+ default y
+ select XFRM_ALGO
+ ---help---
+ Enable support for IPSec offload in ixgbe.ko
+
config IXGBEVF
tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
depends on PCI_MSI
@@ -217,6 +226,15 @@ config IXGBEVF
will be called ixgbevf. MSI-X interrupt support is required
for this driver to work correctly.
+config IXGBEVF_IPSEC
+ bool "IPSec XFRM cryptography-offload acceleration"
+ depends on IXGBEVF
+ depends on XFRM_OFFLOAD
+ default y
+ select XFRM_ALGO
+ ---help---
+ Enable support for IPSec offload in ixgbevf.ko
+
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
imply PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index e707d717012f..5d4f1761dc0c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -244,7 +244,8 @@ process_mbx:
}
/* guarantee we have free space in the SM mailbox */
- if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
+ if (hw->mbx.state == FM10K_STATE_OPEN &&
+ !hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
/* keep track of how many times this occurs */
interface->hw_sm_mbx_full++;
@@ -302,6 +303,28 @@ void fm10k_iov_suspend(struct pci_dev *pdev)
}
}
+static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
+{
+ u32 err_mask;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return;
+
+ /* Mask the completion abort bit in the ERR_UNCOR_MASK register,
+ * preventing the device from reporting these errors to the upstream
+ * PCIe root device. This avoids bringing down platforms which upgrade
+ * non-fatal completer aborts into machine check exceptions. Completer
+ * aborts can occur whenever a VF reads a queue it doesn't own.
+ */
+ pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
+ err_mask |= PCI_ERR_UNC_COMP_ABORT;
+ pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
+
+ mmiowb();
+}
+
int fm10k_iov_resume(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
@@ -317,6 +340,12 @@ int fm10k_iov_resume(struct pci_dev *pdev)
if (!iov_data)
return -ENOMEM;
+ /* Lower severity of completer abort error reporting as
+ * the VFs can trigger this any time they read a queue
+ * that they don't own.
+ */
+ fm10k_mask_aer_comp_abort(pdev);
+
/* allocate hardware resources for the VFs */
hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
@@ -460,20 +489,6 @@ void fm10k_iov_disable(struct pci_dev *pdev)
fm10k_iov_free_data(pdev);
}
-static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
-{
- u32 err_sev;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- return;
-
- pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
- err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
- pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
-}
-
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
{
int current_vfs = pci_num_vf(pdev);
@@ -495,12 +510,6 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
/* allocate VFs if not already allocated */
if (num_vfs && num_vfs != current_vfs) {
- /* Disable completer abort error reporting as
- * the VFs can trigger this any time they read a queue
- * that they don't own.
- */
- fm10k_disable_aer_comp_abort(pdev);
-
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 503bbc017792..5b2a50e5798f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -11,7 +11,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.23.4-k"
+#define DRV_VERSION "0.26.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 02345d381303..e49fb51d3613 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -23,6 +23,8 @@ static const struct fm10k_info *fm10k_info_tbl[] = {
*/
static const struct pci_device_id fm10k_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
+ { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_QDA2), fm10k_device_pf },
+ { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_DA2), fm10k_device_pf },
{ PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
/* required last entry */
{ 0, }
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 3e608e493f9d..9fb9fca375e3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -15,6 +15,8 @@ struct fm10k_hw;
#define FM10K_DEV_ID_PF 0x15A4
#define FM10K_DEV_ID_VF 0x15A5
+#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0
+#define FM10K_DEV_ID_SDI_FM10420_DA2 0x15D5
#define FM10K_MAX_QUEUES 256
#define FM10K_MAX_QUEUES_PF 128
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 81b0e1f8d14b..ac5698ed0b11 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3674,7 +3674,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
local_vf_id, v_opcode, msglen);
switch (ret) {
- case VIRTCHNL_ERR_PARAM:
+ case VIRTCHNL_STATUS_ERR_PARAM:
return -EPERM;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 9f4d700e09df..29ced6b74d36 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -51,9 +51,15 @@
*
* The 40 bit 82580 SYSTIM overflows every
* 2^40 * 10^-9 / 60 = 18.3 minutes.
+ *
+ * SYSTIM is converted to real time using a timecounter. As
+ * timecounter_cyc2time() allows old timestamps, the timecounter
+ * needs to be updated at least once per half of the SYSTIM interval.
+ * Scheduling of delayed work is not very accurate, so we aim for 8
+ * minutes to be sure the actual interval is shorter than 9.16 minutes.
*/
-#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index ca6b0c458e4a..4fb0d9e3f2da 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -17,4 +17,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
-ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o
+ixgbe-$(CONFIG_IXGBE_IPSEC) += ixgbe_ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ec1b87cc4410..143bdd5ee2a0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -769,9 +769,9 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
-#endif /* CONFIG_XFRM_OFFLOAD */
+#endif /* CONFIG_IXGBE_IPSEC */
/* AF_XDP zero-copy */
struct xdp_umem **xsk_umems;
@@ -1008,7 +1008,7 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
@@ -1036,5 +1036,5 @@ static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
u32 *mbuf, u32 vf) { return -EACCES; }
static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
u32 *mbuf, u32 vf) { return -EACCES; }
-#endif /* CONFIG_XFRM_OFFLOAD */
+#endif /* CONFIG_IXGBE_IPSEC */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0049a2becd7e..113b38e0defb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8694,7 +8694,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#endif /* IXGBE_FCOE */
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
@@ -10190,7 +10190,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
* the TSO, so it's the exception.
*/
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
if (!skb->sp)
#endif
features &= ~NETIF_F_TSO;
@@ -10883,7 +10883,7 @@ skip_sriov:
if (hw->mac.type >= ixgbe_mac_82599EB)
netdev->features |= NETIF_F_SCTP_CRC;
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index af25a8fffeb8..5dacfc870259 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -722,8 +722,10 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
adapter->default_up, vf);
- if (vfinfo->spoofchk_enabled)
+ if (vfinfo->spoofchk_enabled) {
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ hw->mac.ops.set_mac_anti_spoofing(hw, true, vf);
+ }
}
/* reset multicast table array for vf */
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 297d0f0858b5..186a4bb24fde 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -10,5 +10,5 @@ ixgbevf-objs := vf.o \
mbx.o \
ethtool.o \
ixgbevf_main.o
-ixgbevf-$(CONFIG_XFRM_OFFLOAD) += ipsec.o
+ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e399e1c0c54a..ecab686574b6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -459,7 +459,7 @@ int ethtool_ioctl(struct ifreq *ifr);
extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBEVF_IPSEC
void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter);
@@ -482,7 +482,7 @@ static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
{ return 0; }
-#endif /* CONFIG_XFRM_OFFLOAD */
+#endif /* CONFIG_IXGBEVF_IPSEC */
void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 98707ee11d72..5e47ede7e832 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4150,7 +4150,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb);
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBEVF_IPSEC
if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 176c6b56fdcc..398328f10743 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -796,6 +796,7 @@ struct mvpp2_queue_vector {
int nrxqs;
u32 pending_cause_rx;
struct mvpp2_port *port;
+ struct cpumask *mask;
};
struct mvpp2_port {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 14f9679c957c..7a37a37e3fb3 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3298,24 +3298,30 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+ qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!qv->mask) {
+ err = -ENOMEM;
+ goto err;
+ }
+
irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+ }
err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
if (err)
goto err;
if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
- unsigned long mask = 0;
unsigned int cpu;
for_each_present_cpu(cpu) {
if (mvpp2_cpu_to_thread(port->priv, cpu) ==
qv->sw_thread_id)
- mask |= BIT(cpu);
+ cpumask_set_cpu(cpu, qv->mask);
}
- irq_set_affinity_hint(qv->irq, to_cpumask(&mask));
+ irq_set_affinity_hint(qv->irq, qv->mask);
}
}
@@ -3325,6 +3331,8 @@ err:
struct mvpp2_queue_vector *qv = port->qvecs + i;
irq_set_affinity_hint(qv->irq, NULL);
+ kfree(qv->mask);
+ qv->mask = NULL;
free_irq(qv->irq, qv);
}
@@ -3339,6 +3347,8 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
struct mvpp2_queue_vector *qv = port->qvecs + i;
irq_set_affinity_hint(qv->irq, NULL);
+ kfree(qv->mask);
+ qv->mask = NULL;
irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
free_irq(qv->irq, qv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 5a6d0919533d..db00bf1c23f5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -43,6 +43,7 @@
#include <linux/vmalloc.h>
#include <linux/irq.h>
+#include <net/ip.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 94224c22ecc3..79638dcbae78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -713,43 +713,15 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
rq->stats->ecn_mark += !!rc;
}
-static __be32 mlx5e_get_fcs(struct sk_buff *skb)
+static u32 mlx5e_get_fcs(const struct sk_buff *skb)
{
- int last_frag_sz, bytes_in_prev, nr_frags;
- u8 *fcs_p1, *fcs_p2;
- skb_frag_t *last_frag;
- __be32 fcs_bytes;
+ const void *fcs_bytes;
+ u32 _fcs_bytes;
- if (!skb_is_nonlinear(skb))
- return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
+ fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
+ ETH_FCS_LEN, &_fcs_bytes);
- nr_frags = skb_shinfo(skb)->nr_frags;
- last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
- last_frag_sz = skb_frag_size(last_frag);
-
- /* If all FCS data is in last frag */
- if (last_frag_sz >= ETH_FCS_LEN)
- return *(__be32 *)(skb_frag_address(last_frag) +
- last_frag_sz - ETH_FCS_LEN);
-
- fcs_p2 = (u8 *)skb_frag_address(last_frag);
- bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
-
- /* Find where the other part of the FCS is - Linear or another frag */
- if (nr_frags == 1) {
- fcs_p1 = skb_tail_pointer(skb);
- } else {
- skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
-
- fcs_p1 = skb_frag_address(prev_frag) +
- skb_frag_size(prev_frag);
- }
- fcs_p1 -= bytes_in_prev;
-
- memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
- memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
-
- return fcs_bytes;
+ return __get_unaligned_cpu32(fcs_bytes);
}
static u8 get_ip_proto(struct sk_buff *skb, __be16 proto)
@@ -797,8 +769,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
network_depth - ETH_HLEN,
skb->csum);
if (unlikely(netdev->features & NETIF_F_RXFCS))
- skb->csum = csum_add(skb->csum,
- (__force __wsum)mlx5e_get_fcs(skb));
+ skb->csum = csum_block_add(skb->csum,
+ (__force __wsum)mlx5e_get_fcs(skb),
+ skb->len - ETH_FCS_LEN);
stats->csum_complete++;
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 937d0ace699a..30f751e69698 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -943,8 +943,8 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
devlink);
- if (err)
- mlxsw_core->reload_fail = true;
+ mlxsw_core->reload_fail = !!err;
+
return err;
}
@@ -1083,8 +1083,15 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (mlxsw_core->reload_fail)
- goto reload_fail;
+ if (mlxsw_core->reload_fail) {
+ if (!reload)
+ /* Only the parts that were not de-initialized in the
+ * failed reload attempt need to be de-initialized.
+ */
+ goto reload_fail_deinit;
+ else
+ return;
+ }
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
@@ -1098,9 +1105,12 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
- if (reload)
- return;
-reload_fail:
+
+ return;
+
+reload_fail_deinit:
+ devlink_unregister(devlink);
+ devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 32cb6718bb17..db3d2790aeec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3284,7 +3284,7 @@ static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
* Configures the ETS elements.
*/
#define MLXSW_REG_QEEC_ID 0x400D
-#define MLXSW_REG_QEEC_LEN 0x1C
+#define MLXSW_REG_QEEC_LEN 0x20
MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
@@ -3326,6 +3326,15 @@ MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
*/
MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
+/* reg_qeec_mise
+ * Min shaper configuration enable. Enables configuration of the min
+ * shaper on this ETS element
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mise, 0x0C, 31, 1);
+
enum {
MLXSW_REG_QEEC_BYTES_MODE,
MLXSW_REG_QEEC_PACKETS_MODE,
@@ -3342,6 +3351,17 @@ enum {
*/
MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
+/* The smallest permitted min shaper rate. */
+#define MLXSW_REG_QEEC_MIS_MIN 200000 /* Kbps */
+
+/* reg_qeec_min_shaper_rate
+ * Min shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28);
+
/* reg_qeec_mase
* Max shaper configuration enable. Enables configuration of the max
* shaper on this ETS element.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 8a4983adae94..a2df12b79f8e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2740,6 +2740,21 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
}
+static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 minrate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_mise_set(qeec_pl, true);
+ mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
u8 switch_prio, u8 tclass)
{
@@ -2817,6 +2832,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
+ /* Configure the min shaper for multicast TCs. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i + 8, i,
+ MLXSW_REG_QEEC_MIS_MIN);
+ if (err)
+ return err;
+ }
+
/* Map all priorities to traffic class 0. */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index bc60d7a8b49d..739a51f0a366 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2661,8 +2661,6 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
- if (!fdb_info->added_by_user)
- break;
mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
break;
case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b72ef171477e..bdd351597b55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -243,7 +243,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
*/
int stmmac_mdio_reset(struct mii_bus *bus)
{
-#if defined(CONFIG_STMMAC_PLATFORM)
+#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e52b9d3c0bd6..0b70c8bab045 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1704,7 +1704,6 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
op->fcp_req.rspaddr = &op->rsp_iu;
op->fcp_req.rsplen = sizeof(op->rsp_iu);
op->fcp_req.done = nvme_fc_fcpio_done;
- op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
op->ctrl = ctrl;
op->queue = queue;
op->rq = rq;
@@ -1752,6 +1751,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
if (res)
return res;
op->op.fcp_req.first_sgl = &op->sgl[0];
+ op->op.fcp_req.private = &op->priv[0];
return res;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f30031945ee4..c33bb201b884 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1663,6 +1663,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
struct pci_dev *pdev = to_pci_dev(dev->dev);
int bar;
+ if (dev->cmb_size)
+ return;
+
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!dev->cmbsz)
return;
@@ -2147,7 +2150,6 @@ static void nvme_pci_disable(struct nvme_dev *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- nvme_release_cmb(dev);
pci_free_irq_vectors(pdev);
if (pci_is_enabled(pdev)) {
@@ -2595,6 +2597,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_stop_ctrl(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
nvme_dev_disable(dev, true);
+ nvme_release_cmb(dev);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 39d972e2595f..01feebec29ea 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -101,7 +101,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
rw = READ;
}
- iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count);
+ iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
iocb->ki_pos = pos;
iocb->ki_filp = req->ns->file;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d023cf303d56..09692c9b32a7 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -777,8 +777,6 @@ struct device_node *of_get_next_cpu_node(struct device_node *prev)
if (!(of_node_name_eq(next, "cpu") ||
(next->type && !of_node_cmp(next->type, "cpu"))))
continue;
- if (!__of_device_is_available(next))
- continue;
if (of_node_get(next))
break;
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 76c83c1ffeda..bb532aae0d92 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -11,7 +11,6 @@
#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -1115,7 +1114,6 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
return 1;
}
-#ifdef CONFIG_HAVE_MEMBLOCK
#ifndef MIN_MEMBLOCK_ADDR
#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
#endif
@@ -1178,29 +1176,9 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
return memblock_reserve(base, size);
}
-#else
-void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- WARN_ON(1);
-}
-
-int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
-{
- return -ENOSYS;
-}
-
-int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
- phys_addr_t size, bool nomap)
-{
- pr_err("Reserved memory not supported, ignoring range %pa - %pa%s\n",
- &base, &size, nomap ? " (nomap)" : "");
- return -ENOSYS;
-}
-#endif
-
static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
- return memblock_virt_alloc(size, align);
+ return memblock_alloc(size, align);
}
bool __init early_init_dt_verify(void *params)
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 895c83e0c7b6..1977ee0adcb1 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -20,13 +20,12 @@
#include <linux/of_reserved_mem.h>
#include <linux/sort.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
#define MAX_RESERVED_REGIONS 32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
-#if defined(CONFIG_HAVE_MEMBLOCK)
-#include <linux/memblock.h>
int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
@@ -37,6 +36,7 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
* panic()s on allocation failure.
*/
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
+ align = !align ? SMP_CACHE_BYTES : align;
base = __memblock_alloc_base(size, align, end);
if (!base)
return -ENOMEM;
@@ -54,16 +54,6 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
return memblock_remove(base, size);
return 0;
}
-#else
-int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
- phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
- phys_addr_t *res_base)
-{
- pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n",
- size, nomap ? " (nomap)" : "");
- return -ENOSYS;
-}
-#endif
/**
* res_mem_save_node() - save fdt node for second pass initialization
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index a3a6866765f2..49ae2aa744d6 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -5,7 +5,7 @@
#define pr_fmt(fmt) "### dt-test ### " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -2192,7 +2192,7 @@ static struct device_node *overlay_base_root;
static void * __init dt_alloc_memory(u64 size, u64 align)
{
- return memblock_virt_alloc(size, align);
+ return memblock_alloc(size, align);
}
/*
diff --git a/drivers/platform/chrome/chromeos_tbmc.c b/drivers/platform/chrome/chromeos_tbmc.c
index 1e81f8144c0d..ce259ec9f990 100644
--- a/drivers/platform/chrome/chromeos_tbmc.c
+++ b/drivers/platform/chrome/chromeos_tbmc.c
@@ -99,7 +99,7 @@ static const struct acpi_device_id chromeos_tbmc_acpi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, chromeos_tbmc_acpi_device_ids);
-static const SIMPLE_DEV_PM_OPS(chromeos_tbmc_pm_ops, NULL,
+static SIMPLE_DEV_PM_OPS(chromeos_tbmc_pm_ops, NULL,
chromeos_tbmc_resume);
static struct acpi_driver chromeos_tbmc_driver = {
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 31c8b8c49e45..e1b75775cd4a 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -25,14 +25,16 @@
#include <linux/dmi.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/mfd/cros_ec.h>
#include <linux/mfd/cros_ec_commands.h>
-#include <linux/mfd/cros_ec_lpc_reg.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/suspend.h>
+#include "cros_ec_lpc_reg.h"
+
#define DRV_NAME "cros_ec_lpcs"
#define ACPI_DRV_NAME "GOOG0004"
@@ -248,7 +250,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
acpi_status status;
struct cros_ec_device *ec_dev;
u8 buf[2];
- int ret;
+ int irq, ret;
if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE,
dev_name(dev))) {
@@ -287,6 +289,18 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
+ /*
+ * Some boards do not have an IRQ allotted for cros_ec_lpc,
+ * which makes ENXIO an expected (and safe) scenario.
+ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq > 0)
+ ec_dev->irq = irq;
+ else if (irq != -ENXIO) {
+ dev_err(dev, "couldn't retrieve IRQ number (%d)\n", irq);
+ return irq;
+ }
+
ret = cros_ec_register(ec_dev);
if (ret) {
dev_err(dev, "couldn't register ec_dev (%d)\n", ret);
diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c
index 2eda2c2fc210..c4edfa83e493 100644
--- a/drivers/platform/chrome/cros_ec_lpc_mec.c
+++ b/drivers/platform/chrome/cros_ec_lpc_mec.c
@@ -24,10 +24,11 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/cros_ec_commands.h>
-#include <linux/mfd/cros_ec_lpc_mec.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#include "cros_ec_lpc_mec.h"
+
/*
* This mutex must be held while accessing the EMI unit. We can't rely on the
* EC mutex because memmap data may be accessed without it being held.
diff --git a/include/linux/mfd/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h
index 176496ddc66c..105068c0e919 100644
--- a/include/linux/mfd/cros_ec_lpc_mec.h
+++ b/drivers/platform/chrome/cros_ec_lpc_mec.h
@@ -21,8 +21,8 @@
* expensive.
*/
-#ifndef __LINUX_MFD_CROS_EC_MEC_H
-#define __LINUX_MFD_CROS_EC_MEC_H
+#ifndef __CROS_EC_LPC_MEC_H
+#define __CROS_EC_LPC_MEC_H
#include <linux/mfd/cros_ec_commands.h>
@@ -87,4 +87,4 @@ void cros_ec_lpc_mec_destroy(void);
u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
unsigned int offset, unsigned int length, u8 *buf);
-#endif /* __LINUX_MFD_CROS_EC_MEC_H */
+#endif /* __CROS_EC_LPC_MEC_H */
diff --git a/drivers/platform/chrome/cros_ec_lpc_reg.c b/drivers/platform/chrome/cros_ec_lpc_reg.c
index dcc7a3e30604..fc23d535c404 100644
--- a/drivers/platform/chrome/cros_ec_lpc_reg.c
+++ b/drivers/platform/chrome/cros_ec_lpc_reg.c
@@ -24,7 +24,8 @@
#include <linux/io.h>
#include <linux/mfd/cros_ec.h>
#include <linux/mfd/cros_ec_commands.h>
-#include <linux/mfd/cros_ec_lpc_mec.h>
+
+#include "cros_ec_lpc_mec.h"
static u8 lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest)
{
diff --git a/include/linux/mfd/cros_ec_lpc_reg.h b/drivers/platform/chrome/cros_ec_lpc_reg.h
index 5560bef63c2b..1c12c38b306a 100644
--- a/include/linux/mfd/cros_ec_lpc_reg.h
+++ b/drivers/platform/chrome/cros_ec_lpc_reg.h
@@ -21,8 +21,8 @@
* expensive.
*/
-#ifndef __LINUX_MFD_CROS_EC_REG_H
-#define __LINUX_MFD_CROS_EC_REG_H
+#ifndef __CROS_EC_LPC_REG_H
+#define __CROS_EC_LPC_REG_H
/**
* cros_ec_lpc_read_bytes - Read bytes from a given LPC-mapped address.
@@ -58,4 +58,4 @@ void cros_ec_lpc_reg_init(void);
*/
void cros_ec_lpc_reg_destroy(void);
-#endif /* __LINUX_MFD_CROS_EC_REG_H */
+#endif /* __CROS_EC_LPC_REG_H */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index bdac939de223..54f6a40c75c6 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -60,7 +60,10 @@ config ACERHDF
After loading this driver the BIOS is still in control of the fan.
To let the kernel handle the fan, do:
- echo -n enabled > /sys/class/thermal/thermal_zone0/mode
+ echo -n enabled > /sys/class/thermal/thermal_zoneN/mode
+ where N=0,1,2... depending on the number of thermal nodes and the
+ detection order of your particular system. The "type" parameter
+ in the same node directory will tell you if it is "acerhdf".
For more information about this driver see
<http://piie.net/files/acerhdf_README.txt>
@@ -105,6 +108,22 @@ config ASUS_LAPTOP
If you have an ACPI-compatible ASUS laptop, say Y or M here.
+config DCDBAS
+ tristate "Dell Systems Management Base Driver"
+ depends on X86
+ help
+ The Dell Systems Management Base Driver provides a sysfs interface
+ for systems management software to perform System Management
+ Interrupts (SMIs) and Host Control Actions (system power cycle or
+ power off after OS shutdown) on certain Dell systems.
+
+ See <file:Documentation/dcdbas.txt> for more details on the driver
+ and the Dell systems on which Dell systems management software makes
+ use of this driver.
+
+ Say Y or M here to enable the driver for use by Dell systems
+ management software such as Dell OpenManage.
+
#
# The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those
# backends are selected. The "depends" line prevents a configuration
@@ -227,6 +246,18 @@ config DELL_RBTN
To compile this driver as a module, choose M here: the module will
be called dell-rbtn.
+config DELL_RBU
+ tristate "BIOS update support for DELL systems via sysfs"
+ depends on X86
+ select FW_LOADER
+ select FW_LOADER_USER_HELPER
+ help
+ Say m if you want to have the option of updating the BIOS for your
+ DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
+ supporting application to communicate with the BIOS regarding the new
+ image for the image update to take effect.
+ See <file:Documentation/dell_rbu.txt> for more details on the driver.
+
config FUJITSU_LAPTOP
tristate "Fujitsu Laptop Extras"
@@ -336,6 +367,20 @@ config HP_WMI
To compile this driver as a module, choose M here: the module will
be called hp-wmi.
+config LG_LAPTOP
+ tristate "LG Laptop Extras"
+ depends on ACPI
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ help
+ This driver adds support for hotkeys as well as control of keyboard
+ backlight, battery maximum charge level and various other ACPI
+ features.
+
+ If you have an LG Gram laptop, say Y or M here.
+
config MSI_LAPTOP
tristate "MSI Laptop Extras"
depends on ACPI
@@ -1231,6 +1276,18 @@ config I2C_MULTI_INSTANTIATE
To compile this driver as a module, choose M here: the module
will be called i2c-multi-instantiate.
+config INTEL_ATOMISP2_PM
+ tristate "Intel AtomISP2 dummy / power-management driver"
+ depends on PCI && IOSF_MBI && PM
+ help
+ Power-management driver for Intel's Image Signal Processor found on
+ Bay and Cherry Trail devices. This dummy driver's sole purpose is to
+ turn the ISP off (put it in D3) to save power and to allow entering
+ of S0ix modes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_atomisp2_pm.
+
endif # X86_PLATFORM_DEVICES
config PMC_ATOM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index e6d1becf81ce..39ae94135406 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -9,9 +9,11 @@ obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o
obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o
obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
+obj-$(CONFIG_LG_LAPTOP) += lg-laptop.o
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
+obj-$(CONFIG_DCDBAS) += dcdbas.o
obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
dell-smbios-objs := dell-smbios-base.o
dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
@@ -23,6 +25,7 @@ obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
+obj-$(CONFIG_DELL_RBU) += dell_rbu.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ACER_WIRELESS) += acer-wireless.o
obj-$(CONFIG_ACERHDF) += acerhdf.o
@@ -92,3 +95,4 @@ obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
+obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index ea22591ee66f..505224225378 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -86,6 +86,7 @@ static unsigned int interval = 10;
static unsigned int fanon = 60000;
static unsigned int fanoff = 53000;
static unsigned int verbose;
+static unsigned int list_supported;
static unsigned int fanstate = ACERHDF_FAN_AUTO;
static char force_bios[16];
static char force_product[16];
@@ -104,10 +105,12 @@ module_param(fanoff, uint, 0600);
MODULE_PARM_DESC(fanoff, "Turn the fan off below this temperature");
module_param(verbose, uint, 0600);
MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
+module_param(list_supported, uint, 0600);
+MODULE_PARM_DESC(list_supported, "List supported models and BIOS versions");
module_param_string(force_bios, force_bios, 16, 0);
-MODULE_PARM_DESC(force_bios, "Force BIOS version and omit BIOS check");
+MODULE_PARM_DESC(force_bios, "Pretend system has this known supported BIOS version");
module_param_string(force_product, force_product, 16, 0);
-MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check");
+MODULE_PARM_DESC(force_product, "Pretend system is this known supported model");
/*
* cmd_off: to switch the fan completely off and check if the fan is off
@@ -130,7 +133,7 @@ static const struct manualcmd mcmd = {
.moff = 0xff,
};
-/* BIOS settings */
+/* BIOS settings - only used during probe */
struct bios_settings {
const char *vendor;
const char *product;
@@ -141,8 +144,18 @@ struct bios_settings {
int mcmd_enable;
};
+/* This could be a daughter struct in the above, but not worth the redirect */
+struct ctrl_settings {
+ u8 fanreg;
+ u8 tempreg;
+ struct fancmd cmd;
+ int mcmd_enable;
+};
+
+static struct ctrl_settings ctrl_cfg __read_mostly;
+
/* Register addresses and values for different BIOS versions */
-static const struct bios_settings bios_tbl[] = {
+static const struct bios_settings bios_tbl[] __initconst = {
/* AOA110 */
{"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00}, 0},
{"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00}, 0},
@@ -233,6 +246,7 @@ static const struct bios_settings bios_tbl[] = {
{"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
{"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
{"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0},
/* Packard Bell */
{"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0},
{"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
@@ -256,8 +270,6 @@ static const struct bios_settings bios_tbl[] = {
{"", "", "", 0, 0, {0, 0}, 0}
};
-static const struct bios_settings *bios_cfg __read_mostly;
-
/*
* this struct is used to instruct thermal layer to use bang_bang instead of
* default governor for acerhdf
@@ -270,7 +282,7 @@ static int acerhdf_get_temp(int *temp)
{
u8 read_temp;
- if (ec_read(bios_cfg->tempreg, &read_temp))
+ if (ec_read(ctrl_cfg.tempreg, &read_temp))
return -EINVAL;
*temp = read_temp * 1000;
@@ -282,10 +294,10 @@ static int acerhdf_get_fanstate(int *state)
{
u8 fan;
- if (ec_read(bios_cfg->fanreg, &fan))
+ if (ec_read(ctrl_cfg.fanreg, &fan))
return -EINVAL;
- if (fan != bios_cfg->cmd.cmd_off)
+ if (fan != ctrl_cfg.cmd.cmd_off)
*state = ACERHDF_FAN_AUTO;
else
*state = ACERHDF_FAN_OFF;
@@ -306,13 +318,13 @@ static void acerhdf_change_fanstate(int state)
state = ACERHDF_FAN_AUTO;
}
- cmd = (state == ACERHDF_FAN_OFF) ? bios_cfg->cmd.cmd_off
- : bios_cfg->cmd.cmd_auto;
+ cmd = (state == ACERHDF_FAN_OFF) ? ctrl_cfg.cmd.cmd_off
+ : ctrl_cfg.cmd.cmd_auto;
fanstate = state;
- ec_write(bios_cfg->fanreg, cmd);
+ ec_write(ctrl_cfg.fanreg, cmd);
- if (bios_cfg->mcmd_enable && state == ACERHDF_FAN_OFF) {
+ if (ctrl_cfg.mcmd_enable && state == ACERHDF_FAN_OFF) {
if (verbose)
pr_notice("turning off fan manually\n");
ec_write(mcmd.mreg, mcmd.moff);
@@ -615,10 +627,11 @@ static int str_starts_with(const char *str, const char *start)
}
/* check hardware */
-static int acerhdf_check_hardware(void)
+static int __init acerhdf_check_hardware(void)
{
char const *vendor, *version, *product;
const struct bios_settings *bt = NULL;
+ int found = 0;
/* get BIOS data */
vendor = dmi_get_system_info(DMI_SYS_VENDOR);
@@ -632,6 +645,17 @@ static int acerhdf_check_hardware(void)
pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER);
+ if (list_supported) {
+ pr_info("List of supported Manufacturer/Model/BIOS:\n");
+ pr_info("---------------------------------------------------\n");
+ for (bt = bios_tbl; bt->vendor[0]; bt++) {
+ pr_info("%-13s | %-17s | %-10s\n", bt->vendor,
+ bt->product, bt->version);
+ }
+ pr_info("---------------------------------------------------\n");
+ return -ECANCELED;
+ }
+
if (force_bios[0]) {
version = force_bios;
pr_info("forcing BIOS version: %s\n", version);
@@ -657,30 +681,36 @@ static int acerhdf_check_hardware(void)
if (str_starts_with(vendor, bt->vendor) &&
str_starts_with(product, bt->product) &&
str_starts_with(version, bt->version)) {
- bios_cfg = bt;
+ found = 1;
break;
}
}
- if (!bios_cfg) {
+ if (!found) {
pr_err("unknown (unsupported) BIOS version %s/%s/%s, please report, aborting!\n",
vendor, product, version);
return -EINVAL;
}
+ /* Copy control settings from BIOS table before we free it. */
+ ctrl_cfg.fanreg = bt->fanreg;
+ ctrl_cfg.tempreg = bt->tempreg;
+ memcpy(&ctrl_cfg.cmd, &bt->cmd, sizeof(struct fancmd));
+ ctrl_cfg.mcmd_enable = bt->mcmd_enable;
+
/*
* if started with kernel mode off, prevent the kernel from switching
* off the fan
*/
if (!kernelmode) {
pr_notice("Fan control off, to enable do:\n");
- pr_notice("echo -n \"enabled\" > /sys/class/thermal/thermal_zone0/mode\n");
+ pr_notice("echo -n \"enabled\" > /sys/class/thermal/thermal_zoneN/mode # N=0,1,2...\n");
}
return 0;
}
-static int acerhdf_register_platform(void)
+static int __init acerhdf_register_platform(void)
{
int err = 0;
@@ -712,7 +742,7 @@ static void acerhdf_unregister_platform(void)
platform_driver_unregister(&acerhdf_driver);
}
-static int acerhdf_register_thermal(void)
+static int __init acerhdf_register_thermal(void)
{
cl_dev = thermal_cooling_device_register("acerhdf-fan", NULL,
&acerhdf_cooling_ops);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 93ee2d5466f8..c285a16675ee 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -43,6 +43,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/platform_data/x86/asus-wmi.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include <linux/acpi.h>
@@ -69,89 +70,6 @@ MODULE_LICENSE("GPL");
#define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7
-/* WMI Methods */
-#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
-#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */
-#define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */
-#define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */
-#define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */
-#define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */
-#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* FaN? */
-#define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */
-#define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */
-#define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */
-#define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */
-#define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */
-#define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */
-#define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/
-#define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */
-#define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */
-#define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */
-#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */
-#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */
-#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */
-
-#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE
-
-/* Wireless */
-#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
-#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
-#define ASUS_WMI_DEVID_CWAP 0x00010003
-#define ASUS_WMI_DEVID_WLAN 0x00010011
-#define ASUS_WMI_DEVID_WLAN_LED 0x00010012
-#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
-#define ASUS_WMI_DEVID_GPS 0x00010015
-#define ASUS_WMI_DEVID_WIMAX 0x00010017
-#define ASUS_WMI_DEVID_WWAN3G 0x00010019
-#define ASUS_WMI_DEVID_UWB 0x00010021
-
-/* Leds */
-/* 0x000200XX and 0x000400XX */
-#define ASUS_WMI_DEVID_LED1 0x00020011
-#define ASUS_WMI_DEVID_LED2 0x00020012
-#define ASUS_WMI_DEVID_LED3 0x00020013
-#define ASUS_WMI_DEVID_LED4 0x00020014
-#define ASUS_WMI_DEVID_LED5 0x00020015
-#define ASUS_WMI_DEVID_LED6 0x00020016
-
-/* Backlight and Brightness */
-#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
-#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
-#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
-#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
-#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
-#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
-
-/* Misc */
-#define ASUS_WMI_DEVID_CAMERA 0x00060013
-
-/* Storage */
-#define ASUS_WMI_DEVID_CARDREADER 0x00080013
-
-/* Input */
-#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011
-#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012
-
-/* Fan, Thermal */
-#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
-#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012
-
-/* Power */
-#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
-
-/* Deep S3 / Resume on LID open */
-#define ASUS_WMI_DEVID_LID_RESUME 0x00120031
-
-/* DSTS masks */
-#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
-#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
-#define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000
-#define ASUS_WMI_DSTS_USER_BIT 0x00020000
-#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000
-#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF
-#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
-#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F
-
#define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13
#define ASUS_FAN_SFUN_READ 0x06
@@ -239,7 +157,6 @@ struct asus_wmi {
int lightbar_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
- struct work_struct kbd_led_work;
struct work_struct wlan_led_work;
struct work_struct lightbar_led_work;
@@ -302,8 +219,7 @@ static void asus_wmi_input_exit(struct asus_wmi *asus)
asus->inputdev = NULL;
}
-static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
- u32 *retval)
+int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval)
{
struct bios_args args = {
.arg0 = arg0,
@@ -339,6 +255,7 @@ exit:
return 0;
}
+EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method);
static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
{
@@ -456,12 +373,9 @@ static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
return read_tpd_led_state(asus);
}
-static void kbd_led_update(struct work_struct *work)
+static void kbd_led_update(struct asus_wmi *asus)
{
int ctrl_param = 0;
- struct asus_wmi *asus;
-
- asus = container_of(work, struct asus_wmi, kbd_led_work);
/*
* bits 0-2: level
@@ -471,7 +385,6 @@ static void kbd_led_update(struct work_struct *work)
ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
- led_classdev_notify_brightness_hw_changed(&asus->kbd_led, asus->kbd_led_wk);
}
static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
@@ -516,7 +429,7 @@ static void do_kbd_led_set(struct led_classdev *led_cdev, int value)
value = 0;
asus->kbd_led_wk = value;
- queue_work(asus->led_workqueue, &asus->kbd_led_work);
+ kbd_led_update(asus);
}
static void kbd_led_set(struct led_classdev *led_cdev,
@@ -525,6 +438,14 @@ static void kbd_led_set(struct led_classdev *led_cdev,
do_kbd_led_set(led_cdev, value);
}
+static void kbd_led_set_by_kbd(struct asus_wmi *asus, enum led_brightness value)
+{
+ struct led_classdev *led_cdev = &asus->kbd_led;
+
+ do_kbd_led_set(led_cdev, value);
+ led_classdev_notify_brightness_hw_changed(led_cdev, asus->kbd_led_wk);
+}
+
static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
{
struct asus_wmi *asus;
@@ -671,8 +592,6 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
led_val = kbd_led_read(asus, NULL, NULL);
if (led_val >= 0) {
- INIT_WORK(&asus->kbd_led_work, kbd_led_update);
-
asus->kbd_led_wk = led_val;
asus->kbd_led.name = "asus::kbd_backlight";
asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED;
@@ -1746,18 +1665,18 @@ static void asus_wmi_notify(u32 value, void *context)
}
if (code == NOTIFY_KBD_BRTUP) {
- do_kbd_led_set(&asus->kbd_led, asus->kbd_led_wk + 1);
+ kbd_led_set_by_kbd(asus, asus->kbd_led_wk + 1);
goto exit;
}
if (code == NOTIFY_KBD_BRTDWN) {
- do_kbd_led_set(&asus->kbd_led, asus->kbd_led_wk - 1);
+ kbd_led_set_by_kbd(asus, asus->kbd_led_wk - 1);
goto exit;
}
if (code == NOTIFY_KBD_BRTTOGGLE) {
if (asus->kbd_led_wk == asus->kbd_led.max_brightness)
- do_kbd_led_set(&asus->kbd_led, 0);
+ kbd_led_set_by_kbd(asus, 0);
else
- do_kbd_led_set(&asus->kbd_led, asus->kbd_led_wk + 1);
+ kbd_led_set_by_kbd(asus, asus->kbd_led_wk + 1);
goto exit;
}
@@ -2291,7 +2210,7 @@ static int asus_hotk_resume(struct device *device)
struct asus_wmi *asus = dev_get_drvdata(device);
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
- queue_work(asus->led_workqueue, &asus->kbd_led_work);
+ kbd_led_update(asus);
return 0;
}
@@ -2327,7 +2246,7 @@ static int asus_hotk_restore(struct device *device)
rfkill_set_sw_state(asus->uwb.rfkill, bl);
}
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
- queue_work(asus->led_workqueue, &asus->kbd_led_work);
+ kbd_led_update(asus);
return 0;
}
diff --git a/drivers/firmware/dcdbas.c b/drivers/platform/x86/dcdbas.c
index 0bdea60c65dd..88bd7efafe14 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/platform/x86/dcdbas.c
@@ -21,11 +21,13 @@
*/
#include <linux/platform_device.h>
+#include <linux/acpi.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/gfp.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mc146818rtc.h>
#include <linux/module.h>
@@ -36,12 +38,11 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mutex.h>
-#include <asm/io.h>
#include "dcdbas.h"
#define DRIVER_NAME "dcdbas"
-#define DRIVER_VERSION "5.6.0-3.2"
+#define DRIVER_VERSION "5.6.0-3.3"
#define DRIVER_DESCRIPTION "Dell Systems Management Base Driver"
static struct platform_device *dcdbas_pdev;
@@ -49,19 +50,23 @@ static struct platform_device *dcdbas_pdev;
static u8 *smi_data_buf;
static dma_addr_t smi_data_buf_handle;
static unsigned long smi_data_buf_size;
+static unsigned long max_smi_data_buf_size = MAX_SMI_DATA_BUF_SIZE;
static u32 smi_data_buf_phys_addr;
static DEFINE_MUTEX(smi_data_lock);
+static u8 *eps_buffer;
static unsigned int host_control_action;
static unsigned int host_control_smi_type;
static unsigned int host_control_on_shutdown;
+static bool wsmt_enabled;
+
/**
* smi_data_buf_free: free SMI data buffer
*/
static void smi_data_buf_free(void)
{
- if (!smi_data_buf)
+ if (!smi_data_buf || wsmt_enabled)
return;
dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
@@ -86,7 +91,7 @@ static int smi_data_buf_realloc(unsigned long size)
if (smi_data_buf_size >= size)
return 0;
- if (size > MAX_SMI_DATA_BUF_SIZE)
+ if (size > max_smi_data_buf_size)
return -EINVAL;
/* new buffer is needed */
@@ -169,7 +174,7 @@ static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
{
ssize_t ret;
- if ((pos + count) > MAX_SMI_DATA_BUF_SIZE)
+ if ((pos + count) > max_smi_data_buf_size)
return -EINVAL;
mutex_lock(&smi_data_lock);
@@ -322,8 +327,20 @@ static ssize_t smi_request_store(struct device *dev,
ret = count;
break;
case 1:
- /* Calling Interface SMI */
- smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer);
+ /*
+ * Calling Interface SMI
+ *
+ * Provide physical address of command buffer field within
+ * the struct smi_cmd to BIOS.
+ *
+ * Because the address that smi_cmd (smi_data_buf) points to
+ * will be from memremap() of a non-memory address if WSMT
+ * is present, we can't use virt_to_phys() on smi_cmd, so
+ * we have to use the physical address that was saved when
+ * the virtual address for smi_cmd was received.
+ */
+ smi_cmd->ebx = smi_data_buf_phys_addr +
+ offsetof(struct smi_cmd, command_buffer);
ret = dcdbas_smi_request(smi_cmd);
if (!ret)
ret = count;
@@ -482,6 +499,93 @@ static void dcdbas_host_control(void)
}
}
+/* WSMT */
+
+static u8 checksum(u8 *buffer, u8 length)
+{
+ u8 sum = 0;
+ u8 *end = buffer + length;
+
+ while (buffer < end)
+ sum += *buffer++;
+ return sum;
+}
+
+static inline struct smm_eps_table *check_eps_table(u8 *addr)
+{
+ struct smm_eps_table *eps = (struct smm_eps_table *)addr;
+
+ if (strncmp(eps->smm_comm_buff_anchor, SMM_EPS_SIG, 4) != 0)
+ return NULL;
+
+ if (checksum(addr, eps->length) != 0)
+ return NULL;
+
+ return eps;
+}
+
+static int dcdbas_check_wsmt(void)
+{
+ struct acpi_table_wsmt *wsmt = NULL;
+ struct smm_eps_table *eps = NULL;
+ u64 remap_size;
+ u8 *addr;
+
+ acpi_get_table(ACPI_SIG_WSMT, 0, (struct acpi_table_header **)&wsmt);
+ if (!wsmt)
+ return 0;
+
+ /* Check if WSMT ACPI table shows that protection is enabled */
+ if (!(wsmt->protection_flags & ACPI_WSMT_FIXED_COMM_BUFFERS) ||
+ !(wsmt->protection_flags & ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION))
+ return 0;
+
+ /* Scan for EPS (entry point structure) */
+ for (addr = (u8 *)__va(0xf0000);
+ addr < (u8 *)__va(0x100000 - sizeof(struct smm_eps_table));
+ addr += 16) {
+ eps = check_eps_table(addr);
+ if (eps)
+ break;
+ }
+
+ if (!eps) {
+ dev_dbg(&dcdbas_pdev->dev, "found WSMT, but no EPS found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Get physical address of buffer and map to virtual address.
+ * Table gives size in 4K pages, regardless of actual system page size.
+ */
+ if (upper_32_bits(eps->smm_comm_buff_addr + 8)) {
+ dev_warn(&dcdbas_pdev->dev, "found WSMT, but EPS buffer address is above 4GB\n");
+ return -EINVAL;
+ }
+ /*
+ * Limit remap size to MAX_SMI_DATA_BUF_SIZE + 8 (since the first 8
+ * bytes are used for a semaphore, not the data buffer itself).
+ */
+ remap_size = eps->num_of_4k_pages * PAGE_SIZE;
+ if (remap_size > MAX_SMI_DATA_BUF_SIZE + 8)
+ remap_size = MAX_SMI_DATA_BUF_SIZE + 8;
+ eps_buffer = memremap(eps->smm_comm_buff_addr, remap_size, MEMREMAP_WB);
+ if (!eps_buffer) {
+ dev_warn(&dcdbas_pdev->dev, "found WSMT, but failed to map EPS buffer\n");
+ return -ENOMEM;
+ }
+
+ /* First 8 bytes is for a semaphore, not part of the smi_data_buf */
+ smi_data_buf_phys_addr = eps->smm_comm_buff_addr + 8;
+ smi_data_buf = eps_buffer + 8;
+ smi_data_buf_size = remap_size - 8;
+ max_smi_data_buf_size = smi_data_buf_size;
+ wsmt_enabled = true;
+ dev_info(&dcdbas_pdev->dev,
+ "WSMT found, using firmware-provided SMI buffer.\n");
+ return 1;
+}
+
/**
* dcdbas_reboot_notify: handle reboot notification for host control
*/
@@ -548,6 +652,11 @@ static int dcdbas_probe(struct platform_device *dev)
dcdbas_pdev = dev;
+ /* Check if ACPI WSMT table specifies protected SMI buffer address */
+ error = dcdbas_check_wsmt();
+ if (error < 0)
+ return error;
+
/*
* BIOS SMI calls require buffer addresses be in 32-bit address space.
* This is done by setting the DMA mask below.
@@ -635,6 +744,8 @@ static void __exit dcdbas_exit(void)
*/
if (dcdbas_pdev)
smi_data_buf_free();
+ if (eps_buffer)
+ memunmap(eps_buffer);
platform_device_unregister(dcdbas_pdev_reg);
platform_driver_unregister(&dcdbas_driver);
}
diff --git a/drivers/firmware/dcdbas.h b/drivers/platform/x86/dcdbas.h
index ca3cb0a54ab6..52729a494b00 100644
--- a/drivers/firmware/dcdbas.h
+++ b/drivers/platform/x86/dcdbas.h
@@ -53,6 +53,7 @@
#define EXPIRED_TIMER (0)
#define SMI_CMD_MAGIC (0x534D4931)
+#define SMM_EPS_SIG "$SCB"
#define DCDBAS_DEV_ATTR_RW(_name) \
DEVICE_ATTR(_name,0600,_name##_show,_name##_store);
@@ -103,5 +104,14 @@ struct apm_cmd {
int dcdbas_smi_request(struct smi_cmd *smi_cmd);
+struct smm_eps_table {
+ char smm_comm_buff_anchor[4];
+ u8 length;
+ u8 checksum;
+ u8 version;
+ u64 smm_comm_buff_addr;
+ u64 num_of_4k_pages;
+} __packed;
+
#endif /* _DCDBAS_H_ */
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
index 97a90bebc360..ab9b822a6dfe 100644
--- a/drivers/platform/x86/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell-smbios-smm.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include "../../firmware/dcdbas.h"
+#include "dcdbas.h"
#include "dell-smbios.h"
static int da_command_address;
diff --git a/drivers/firmware/dell_rbu.c b/drivers/platform/x86/dell_rbu.c
index fb8af5cb7c9b..ccefa84f7305 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/platform/x86/dell_rbu.c
@@ -45,6 +45,7 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/dma-mapping.h>
+#include <asm/set_memory.h>
MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
@@ -181,6 +182,11 @@ static int create_packet(void *data, size_t length)
packet_data_temp_buf = NULL;
}
}
+ /*
+ * set to uncachable or it may never get written back before reboot
+ */
+ set_memory_uc((unsigned long)packet_data_temp_buf, 1 << ordernum);
+
spin_lock(&rbu_data.lock);
newpacket->data = packet_data_temp_buf;
@@ -349,6 +355,8 @@ static void packet_empty_list(void)
* to make sure there are no stale RBU packets left in memory
*/
memset(newpacket->data, 0, rbu_data.packetsize);
+ set_memory_wb((unsigned long)newpacket->data,
+ 1 << newpacket->ordernum);
free_pages((unsigned long) newpacket->data,
newpacket->ordernum);
kfree(newpacket);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d4f1259ff5a2..b6489cba2985 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -212,7 +212,7 @@ static int read_ec_data(acpi_handle handle, int cmd, unsigned long *data)
return 0;
}
}
- pr_err("timeout in read_ec_cmd\n");
+ pr_err("timeout in %s\n", __func__);
return -1;
}
@@ -1147,6 +1147,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo Legion Y530-15ICH",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH"),
+ },
+ },
+ {
.ident = "Lenovo Legion Y720-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 6cf9b7fa5bf0..e28bcf61b126 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Intel HID event & 5 button array driver
*
* Copyright (C) 2015 Alex Hung <alex.hung@canonical.com>
* Copyright (C) 2015 Andrew Lutomirski <luto@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/acpi.h>
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index 7344d841f4d9..3b81cb896fed 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -1,26 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2013 Matthew Garrett <mjg59@srcf.ucam.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
-#include <linux/init.h>
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/acpi.h>
MODULE_LICENSE("GPL");
@@ -53,12 +38,10 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
acpi = to_acpi_device(dev);
error = kstrtoul(buf, 0, &value);
-
if (error)
return error;
status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
-
if (ACPI_FAILURE(status))
return -EINVAL;
@@ -99,12 +82,10 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
acpi = to_acpi_device(dev);
error = kstrtoul(buf, 0, &value);
-
if (error)
return error;
status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
-
if (ACPI_FAILURE(status))
return -EINVAL;
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index bbe4c06c769f..64c2dc93472f 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -1,25 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2013 Matthew Garrett <mjg59@srcf.ucam.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
-#include <linux/init.h>
-#include <linux/module.h>
#include <linux/acpi.h>
+#include <linux/module.h>
MODULE_LICENSE("GPL");
@@ -44,6 +29,7 @@ static const struct acpi_device_id smartconnect_ids[] = {
{"INT33A0", 0},
{"", 0}
};
+MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
static struct acpi_driver smartconnect_driver = {
.owner = THIS_MODULE,
@@ -56,5 +42,3 @@ static struct acpi_driver smartconnect_driver = {
};
module_acpi_driver(smartconnect_driver);
-
-MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c
index c2257bd06f18..9ded8e2af312 100644
--- a/drivers/platform/x86/intel-wmi-thunderbolt.c
+++ b/drivers/platform/x86/intel-wmi-thunderbolt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WMI Thunderbolt driver
*
* Copyright (C) 2017 Dell Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -38,12 +30,16 @@ static ssize_t force_power_store(struct device *dev,
input.length = sizeof(u8);
input.pointer = &mode;
mode = hex_to_bin(buf[0]);
+ dev_dbg(dev, "force_power: storing %#x\n", mode);
if (mode == 0 || mode == 1) {
status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1,
&input, NULL);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(dev, "force_power: failed to evaluate ACPI method\n");
return -ENODEV;
+ }
} else {
+ dev_dbg(dev, "force_power: unsupported mode\n");
return -EINVAL;
}
return count;
@@ -95,4 +91,4 @@ module_wmi_driver(intel_wmi_thunderbolt_driver);
MODULE_ALIAS("wmi:" INTEL_WMI_THUNDERBOLT_GUID);
MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c
new file mode 100644
index 000000000000..9371603a0ac9
--- /dev/null
+++ b/drivers/platform/x86/intel_atomisp2_pm.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry
+ * Trail devices. The sole purpose of this driver is to allow the ISP to
+ * be put in D3.
+ *
+ * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on various non upstream patches for ISP support:
+ * Copyright (C) 2010-2017 Intel Corporation. All rights reserved.
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <asm/iosf_mbi.h>
+
+/* PCI configuration regs */
+#define PCI_INTERRUPT_CTRL 0x9c
+
+#define PCI_CSI_CONTROL 0xe8
+#define PCI_CSI_CONTROL_PORTS_OFF_MASK 0x7
+
+/* IOSF BT_MBI_UNIT_PMC regs */
+#define ISPSSPM0 0x39
+#define ISPSSPM0_ISPSSC_OFFSET 0
+#define ISPSSPM0_ISPSSC_MASK 0x00000003
+#define ISPSSPM0_ISPSSS_OFFSET 24
+#define ISPSSPM0_ISPSSS_MASK 0x03000000
+#define ISPSSPM0_IUNIT_POWER_ON 0x0
+#define ISPSSPM0_IUNIT_POWER_OFF 0x3
+
+static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ unsigned long timeout;
+ u32 val;
+
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, 0);
+
+ /*
+ * MRFLD IUNIT DPHY is located in an always-power-on island
+ * MRFLD HW design need all CSI ports are disabled before
+ * powering down the IUNIT.
+ */
+ pci_read_config_dword(dev, PCI_CSI_CONTROL, &val);
+ val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
+ pci_write_config_dword(dev, PCI_CSI_CONTROL, val);
+
+ /* Write 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
+ iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
+ ISPSSPM0_IUNIT_POWER_OFF, ISPSSPM0_ISPSSC_MASK);
+
+ /*
+ * There should be no IUNIT access while power-down is
+ * in progress HW sighting: 4567865
+ * Wait up to 50 ms for the IUNIT to shut down.
+ */
+ timeout = jiffies + msecs_to_jiffies(50);
+ while (1) {
+ /* Wait until ISPSSPM0 bit[25:24] shows 0x3 */
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &val);
+ val = (val & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
+ if (val == ISPSSPM0_IUNIT_POWER_OFF)
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->dev, "IUNIT power-off timeout.\n");
+ return -EBUSY;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ pm_runtime_allow(&dev->dev);
+ pm_runtime_put_sync_suspend(&dev->dev);
+
+ return 0;
+}
+
+static void isp_remove(struct pci_dev *dev)
+{
+ pm_runtime_get_sync(&dev->dev);
+ pm_runtime_forbid(&dev->dev);
+}
+
+static int isp_pci_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int isp_pci_resume(struct device *dev)
+{
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(isp_pm_ops, isp_pci_suspend,
+ isp_pci_resume, NULL);
+
+static const struct pci_device_id isp_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x22b8), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, isp_id_table);
+
+static struct pci_driver isp_pci_driver = {
+ .name = "intel_atomisp2_pm",
+ .id_table = isp_id_table,
+ .probe = isp_probe,
+ .remove = isp_remove,
+ .driver.pm = &isp_pm_ops,
+};
+
+module_pci_driver(isp_pci_driver);
+
+MODULE_DESCRIPTION("Intel AtomISP2 dummy / power-management drv (for suspend)");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel_bxtwc_tmu.c
index 227943a20212..951c105bafc1 100644
--- a/drivers/platform/x86/intel_bxtwc_tmu.c
+++ b/drivers/platform/x86/intel_bxtwc_tmu.c
@@ -1,21 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_bxtwc_tmu.c - Intel BXT Whiskey Cove PMIC TMU driver
+ * Intel BXT Whiskey Cove PMIC TMU driver
*
* Copyright (C) 2016 Intel Corporation. All rights reserved.
*
* This driver adds TMU (Time Management Unit) support for Intel BXT platform.
* It enables the alarm wake-up functionality in the TMU unit of Whiskey Cove
* PMIC.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index f40b1c192106..464fe93657b5 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Cherry Trail ACPI INT33FE pseudo device driver
*
* Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Some Intel Cherry Trail based device which ship with Windows 10, have
* this weird INT33FE ACPI device with a CRS table with 4 I2cSerialBusV2
* resources, for 4 different chips attached to various i2c busses:
@@ -257,4 +254,4 @@ module_platform_driver(cht_int33fe_driver);
MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
index 38b8e7cfe88c..0df2e82dd249 100644
--- a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
+++ b/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Power-button driver for Dollar Cove TI PMIC
* Copyright (C) 2014 Intel Corp
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index e89ad4964dc1..4b8f7305fc8a 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel INT0002 "Virtual GPIO" driver
*
@@ -9,10 +10,6 @@
*
* Author: Dyut Kumar Sil <dyut.k.sil@intel.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Some peripherals on Bay Trail and Cherry Trail platforms signal a Power
* Management Event (PME) to the Power Management Controller (PMC) to wakeup
* the system. When this happens software needs to clear the PME bus 0 status
@@ -57,11 +54,7 @@
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
static const struct x86_cpu_id int0002_cpu_ids[] = {
-/*
- * Limit ourselves to Cherry Trail for now, until testing shows we
- * need to handle the INT0002 device on Baytrail too.
- * ICPU(INTEL_FAM6_ATOM_SILVERMONT), * Valleyview, Bay Trail *
- */
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
{}
};
@@ -110,6 +103,21 @@ static void int0002_irq_mask(struct irq_data *data)
outl(gpe_en_reg, GPE0A_EN_PORT);
}
+static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct platform_device *pdev = to_platform_device(chip->parent);
+ int irq = platform_get_irq(pdev, 0);
+
+ /* Propagate to parent irq */
+ if (on)
+ enable_irq_wake(irq);
+ else
+ disable_irq_wake(irq);
+
+ return 0;
+}
+
static irqreturn_t int0002_irq(int irq, void *data)
{
struct gpio_chip *chip = data;
@@ -132,6 +140,7 @@ static struct irq_chip int0002_irqchip = {
.irq_ack = int0002_irq_ack,
.irq_mask = int0002_irq_mask,
.irq_unmask = int0002_irq_unmask,
+ .irq_set_wake = int0002_irq_set_wake,
};
static int int0002_probe(struct platform_device *pdev)
@@ -216,4 +225,4 @@ module_platform_driver(int0002_driver);
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Intel INT0002 Virtual GPIO driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index c5ece7ef08c6..225638a1b09e 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2009-2010 Intel Corporation
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
* Authors:
* Jesse Barnes <jbarnes@virtuousgeek.org>
*/
@@ -1697,6 +1686,6 @@ static struct pci_driver ips_pci_driver = {
module_pci_driver(ips_pci_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jesse Barnes <jbarnes@virtuousgeek.org>");
MODULE_DESCRIPTION("Intelligent Power Sharing Driver");
diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h
index 60f4e3ddbe9f..512ad234ad0d 100644
--- a/drivers/platform/x86/intel_ips.h
+++ b/drivers/platform/x86/intel_ips.h
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2010 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
*/
void ips_link_to_i915_driver(void);
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index ef9b0af8cdd3..77eb8709c931 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -1,25 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_menlow.c - Intel menlow Driver for thermal management extension
+ * Intel menlow Driver for thermal management extension
*
* Copyright (C) 2008 Intel Corp
* Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
* Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This driver creates the sys I/F for programming the sensors.
* It also implements the driver for intel menlow memory controller (hardware
@@ -29,20 +14,19 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pm.h>
+#include <linux/slab.h>
#include <linux/thermal.h>
-#include <linux/acpi.h>
+#include <linux/types.h>
MODULE_AUTHOR("Thomas Sujith");
MODULE_AUTHOR("Zhang Rui");
MODULE_DESCRIPTION("Intel Menlow platform specific driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
/*
* Memory controller device control
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 5ad44204a9c3..292bace83f1e 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Power button driver for Intel MID platforms.
*
@@ -5,18 +6,8 @@
*
* Author: Hong Liu <hong.liu@intel.com>
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
-#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/intel_msic.h>
@@ -121,12 +112,9 @@ static const struct mid_pb_ddata mrfld_ddata = {
.setup = mrfld_setup,
};
-#define ICPU(model, ddata) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
-
static const struct x86_cpu_id mid_pb_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SALTWELL_MID, mfld_ddata),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, mrfld_ddata),
+ INTEL_CPU_FAM6(ATOM_SALTWELL_MID, mfld_ddata),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, mrfld_ddata),
{}
};
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 008a76903cbf..f402e2e74a38 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -1,39 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_mid_thermal.c - Intel MID platform thermal driver
+ * Intel MID platform thermal driver
*
* Copyright (C) 2011 Intel Corporation
*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Author: Durgadoss R <durgadoss.r@intel.com>
*/
#define pr_fmt(fmt) "intel_mid_thermal: " fmt
-#include <linux/module.h>
-#include <linux/init.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/module.h>
#include <linux/param.h>
-#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/pm.h>
+#include <linux/slab.h>
#include <linux/thermal.h>
-#include <linux/mfd/intel_msic.h>
/* Number of thermal sensors */
#define MSIC_THERMAL_SENSORS 4
@@ -567,4 +551,4 @@ module_platform_driver(mid_thermal_driver);
MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index 5747f63c8d9f..3c0438ba385e 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * intel_oaktrail.c - Intel OakTrail Platform support.
+ * Intel OakTrail Platform support
*
* Copyright (C) 2010-2011 Intel Corporation
* Author: Yin Kangkai (kangkai.yin@intel.com)
@@ -8,21 +9,6 @@
* <cezary.jackiewicz (at) gmail.com>, based on MSI driver
* Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
* This driver does below things:
* 1. registers itself in the Linux backlight control in
* /sys/class/backlight/intel_oaktrail/
@@ -38,18 +24,18 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/acpi.h>
-#include <linux/fb.h>
-#include <linux/mutex.h>
+#include <linux/backlight.h>
+#include <linux/dmi.h>
#include <linux/err.h>
+#include <linux/fb.h>
#include <linux/i2c.h>
-#include <linux/backlight.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/dmi.h>
#include <linux/rfkill.h>
+
#include <acpi/video.h>
#define DRIVER_NAME "intel_oaktrail"
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 2d272a3e0176..6b31d410cb09 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Core SoC Power Management Controller Driver
*
@@ -6,16 +7,6 @@
*
* Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
* Vishwanath Somayaji <vishwanath.somayaji@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index 93a7e99e1f8b..be045348ad86 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Core SoC Power Management Controller Header File
*
@@ -6,16 +7,6 @@
*
* Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
* Vishwanath Somayaji <vishwanath.somayaji@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef PMC_CORE_H
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index e7edc8c63936..7964ba22ef8d 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -1,39 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_pmc_ipc.c: Driver for the Intel PMC IPC mechanism
+ * Driver for the Intel PMC IPC mechanism
*
* (C) Copyright 2014-2015 Intel Corporation
*
- * This driver is based on Intel SCU IPC driver(intel_scu_opc.c) by
+ * This driver is based on Intel SCU IPC driver(intel_scu_ipc.c) by
* Sreedhara DS <sreedhara.ds@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- *
* PMC running in ARC processor communicates with other entity running in IA
* core through IPC mechanism which in turn messaging between IA core ad PMC.
*/
-#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/device.h>
-#include <linux/pm.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <linux/pm_qos.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
#include <linux/sched.h>
-#include <linux/atomic.h>
-#include <linux/notifier.h>
-#include <linux/suspend.h>
-#include <linux/acpi.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/spinlock.h>
+#include <linux/suspend.h>
#include <asm/intel_pmc_ipc.h>
@@ -1029,7 +1024,7 @@ static void __exit intel_pmc_ipc_exit(void)
MODULE_AUTHOR("Zha Qipeng <qipeng.zha@intel.com>");
MODULE_DESCRIPTION("Intel PMC IPC driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
/* Some modules are dependent on this, so init earlier */
fs_initcall(intel_pmc_ipc_init);
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index 2efeab650345..79671927f4ef 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -1,25 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Intel P-Unit Mailbox IPC mechanism
*
* (C) Copyright 2015 Intel Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* The heart of the P-Unit is the Foxton microcontroller and its firmware,
* which provide mailbox interface for power management usage.
*/
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
#include <linux/acpi.h>
-#include <linux/delay.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
+
#include <asm/intel_punit_ipc.h>
/* IPC Mailbox registers */
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 75c8fef7a482..cdab916fbf92 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
+ * Driver for the Intel SCU IPC mechanism
*
* (C) Copyright 2008-2010,2015 Intel Corporation
* Author: Sreedhara DS (sreedhara.ds@intel.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- *
* SCU running in ARC processor communicates with other entity running in IA
* core through IPC mechanism which in turn messaging between IA core ad SCU.
* SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
@@ -16,14 +12,16 @@
* IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
* along with other APIs.
*/
+
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/pm.h>
-#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
#include <linux/sfi.h>
+
#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index aa454241489c..8afe6fa06d7b 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -1,32 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
+ * Driver for the Intel SCU IPC mechanism
*
* (C) Copyright 2008-2010 Intel Corporation
* Author: Sreedhara DS (sreedhara.ds@intel.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- *
- * This driver provides ioctl interfaces to call intel scu ipc driver api
+ * This driver provides IOCTL interfaces to call Intel SCU IPC driver API.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/fs.h>
#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
#include <asm/intel_scu_ipc.h>
static int major;
-/* ioctl commnds */
+/* IOCTL commands */
#define INTE_SCU_IPC_REGISTER_READ 0
#define INTE_SCU_IPC_REGISTER_WRITE 1
#define INTE_SCU_IPC_REGISTER_UPDATE 2
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index f378621b5fe9..d4040bb222b4 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel SoC Core Telemetry Driver
* Copyright (C) 2015, Intel Corporation.
* All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
* Telemetry Framework provides platform related PM and performance statistics.
* This file provides the core telemetry API implementation.
*/
@@ -460,4 +452,4 @@ module_exit(telemetry_module_exit);
MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>");
MODULE_DESCRIPTION("Intel SoC Telemetry Interface");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index cee08f236292..40bce560eb30 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel SOC Telemetry debugfs Driver: Currently supports APL
* Copyright (c) 2015, Intel Corporation.
* All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
* This file provides the debugfs interfaces for telemetry.
* /sys/kernel/debug/telemetry/pss_info: Shows Primary Control Sub-Sys Counters
* /sys/kernel/debug/telemetry/ioss_info: Shows IO Sub-System Counters
@@ -72,9 +64,6 @@
#define TELEM_IOSS_DX_D0IX_EVTS 25
#define TELEM_IOSS_PG_EVTS 30
-#define TELEM_DEBUGFS_CPU(model, data) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data}
-
#define TELEM_CHECK_AND_PARSE_EVTS(EVTID, EVTNUM, BUF, EVTLOG, EVTDAT, MASK) { \
if (evtlog[index].telem_evtid == (EVTID)) { \
for (idx = 0; idx < (EVTNUM); idx++) \
@@ -319,8 +308,8 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
};
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
- TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf),
- TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT, telem_apl_debugfs_conf),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf),
{}
};
@@ -951,12 +940,16 @@ static int __init telemetry_debugfs_init(void)
debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data;
err = telemetry_pltconfig_valid();
- if (err < 0)
+ if (err < 0) {
+ pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n");
return -ENODEV;
+ }
err = telemetry_debugfs_check_evts();
- if (err < 0)
+ if (err < 0) {
+ pr_info("telemetry_debugfs_check_evts failed\n");
return -EINVAL;
+ }
register_pm_notifier(&pm_notifier);
@@ -1037,4 +1030,4 @@ module_exit(telemetry_debugfs_exit);
MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>");
MODULE_DESCRIPTION("Intel SoC Telemetry debugfs Interface");
MODULE_VERSION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index fcc6bee51a42..df8565bad595 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel SOC Telemetry Platform Driver: Currently supports APL
* Copyright (c) 2015, Intel Corporation.
* All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
* This file provides the platform specific telemetry implementation for APL.
* It used the PUNIT and PMC IPC interfaces for configuring the counters.
* The accumulated results are fetched from SRAM.
@@ -1242,4 +1234,4 @@ module_exit(telemetry_module_exit);
MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>");
MODULE_DESCRIPTION("Intel SoC Telemetry Platform Driver");
MODULE_VERSION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c
index a6d5aa0c3c47..7b9cc841ab65 100644
--- a/drivers/platform/x86/intel_turbo_max_3.c
+++ b/drivers/platform/x86/intel_turbo_max_3.c
@@ -1,28 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
* Copyright (c) 2017, Intel Corporation.
* All rights reserved.
*
* Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
+#include <linux/cpufeature.h>
+#include <linux/cpuhotplug.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/topology.h>
#include <linux/workqueue.h>
-#include <linux/cpuhotplug.h>
-#include <linux/cpufeature.h>
+
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
new file mode 100644
index 000000000000..c0bb1f864dfe
--- /dev/null
+++ b/drivers/platform/x86/lg-laptop.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * lg-laptop.c - LG Gram ACPI features and hotkeys Driver
+ *
+ * Copyright (C) 2018 Matan Ziv-Av <matan@svgalib.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define LED_DEVICE(_name, max) struct led_classdev _name = { \
+ .name = __stringify(_name), \
+ .max_brightness = max, \
+ .brightness_set = _name##_set, \
+ .brightness_get = _name##_get, \
+}
+
+MODULE_AUTHOR("Matan Ziv-Av");
+MODULE_DESCRIPTION("LG WMI Hotkey Driver");
+MODULE_LICENSE("GPL");
+
+#define WMI_EVENT_GUID0 "E4FB94F9-7F2B-4173-AD1A-CD1D95086248"
+#define WMI_EVENT_GUID1 "023B133E-49D1-4E10-B313-698220140DC2"
+#define WMI_EVENT_GUID2 "37BE1AC0-C3F2-4B1F-BFBE-8FDEAF2814D6"
+#define WMI_EVENT_GUID3 "911BAD44-7DF8-4FBB-9319-BABA1C4B293B"
+#define WMI_METHOD_WMAB "C3A72B38-D3EF-42D3-8CBB-D5A57049F66D"
+#define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
+#define WMI_EVENT_GUID WMI_EVENT_GUID0
+
+#define WMAB_METHOD "\\XINI.WMAB"
+#define WMBB_METHOD "\\XINI.WMBB"
+#define SB_GGOV_METHOD "\\_SB.GGOV"
+#define GOV_TLED 0x2020008
+#define WM_GET 1
+#define WM_SET 2
+#define WM_KEY_LIGHT 0x400
+#define WM_TLED 0x404
+#define WM_FN_LOCK 0x407
+#define WM_BATT_LIMIT 0x61
+#define WM_READER_MODE 0xBF
+#define WM_FAN_MODE 0x33
+#define WMBB_USB_CHARGE 0x10B
+#define WMBB_BATT_LIMIT 0x10C
+
+#define PLATFORM_NAME "lg-laptop"
+
+MODULE_ALIAS("wmi:" WMI_EVENT_GUID0);
+MODULE_ALIAS("wmi:" WMI_EVENT_GUID1);
+MODULE_ALIAS("wmi:" WMI_EVENT_GUID2);
+MODULE_ALIAS("wmi:" WMI_EVENT_GUID3);
+MODULE_ALIAS("wmi:" WMI_METHOD_WMAB);
+MODULE_ALIAS("wmi:" WMI_METHOD_WMBB);
+MODULE_ALIAS("acpi*:LGEX0815:*");
+
+static struct platform_device *pf_device;
+static struct input_dev *wmi_input_dev;
+
+static u32 inited;
+#define INIT_INPUT_WMI_0 0x01
+#define INIT_INPUT_WMI_2 0x02
+#define INIT_INPUT_ACPI 0x04
+#define INIT_TPAD_LED 0x08
+#define INIT_KBD_LED 0x10
+#define INIT_SPARSE_KEYMAP 0x80
+
+static const struct key_entry wmi_keymap[] = {
+ {KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
+ {KE_KEY, 0x74, {KEY_F13} }, /* Touchpad toggle (F5) */
+ {KE_KEY, 0xf020000, {KEY_F14} }, /* Read mode (F9) */
+ {KE_KEY, 0x10000000, {KEY_F16} },/* Keyboard backlight (F8) - pressing
+ * this key both sends an event and
+ * changes backlight level.
+ */
+ {KE_KEY, 0x80, {KEY_RFKILL} },
+ {KE_END, 0}
+};
+
+static int ggov(u32 arg0)
+{
+ union acpi_object args[1];
+ union acpi_object *r;
+ acpi_status status;
+ acpi_handle handle;
+ struct acpi_object_list arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ int res;
+
+ args[0].type = ACPI_TYPE_INTEGER;
+ args[0].integer.value = arg0;
+
+ status = acpi_get_handle(NULL, (acpi_string) SB_GGOV_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Cannot get handle");
+ return -ENODEV;
+ }
+
+ arg.count = 1;
+ arg.pointer = args;
+
+ status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "GGOV: call failed.\n");
+ return -EINVAL;
+ }
+
+ r = buffer.pointer;
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EINVAL;
+ }
+
+ res = r->integer.value;
+ kfree(r);
+
+ return res;
+}
+
+static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
+{
+ union acpi_object args[3];
+ acpi_status status;
+ acpi_handle handle;
+ struct acpi_object_list arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ args[0].type = ACPI_TYPE_INTEGER;
+ args[0].integer.value = method;
+ args[1].type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = arg1;
+ args[2].type = ACPI_TYPE_INTEGER;
+ args[2].integer.value = arg2;
+
+ status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Cannot get handle");
+ return NULL;
+ }
+
+ arg.count = 3;
+ arg.pointer = args;
+
+ status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "WMAB: call failed.\n");
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
+{
+ union acpi_object args[3];
+ acpi_status status;
+ acpi_handle handle;
+ struct acpi_object_list arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ u8 buf[32];
+
+ *(u32 *)buf = method_id;
+ *(u32 *)(buf + 4) = arg1;
+ *(u32 *)(buf + 16) = arg2;
+ args[0].type = ACPI_TYPE_INTEGER;
+ args[0].integer.value = 0; /* ignored */
+ args[1].type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = 1; /* Must be 1 or 2. Does not matter which */
+ args[2].type = ACPI_TYPE_BUFFER;
+ args[2].buffer.length = 32;
+ args[2].buffer.pointer = buf;
+
+ status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Cannot get handle");
+ return NULL;
+ }
+
+ arg.count = 3;
+ arg.pointer = args;
+
+ status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "WMAB: call failed.\n");
+ return NULL;
+ }
+
+ return (union acpi_object *)buffer.pointer;
+}
+
+static void wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ long data = (long)context;
+
+ pr_debug("event guid %li\n", data);
+ status = wmi_get_event_data(value, &response);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+ if (!obj)
+ return;
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ int eventcode = obj->integer.value;
+ struct key_entry *key;
+
+ key =
+ sparse_keymap_entry_from_scancode(wmi_input_dev, eventcode);
+ if (key && key->type == KE_KEY)
+ sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
+ }
+
+ pr_debug("Type: %i Eventcode: 0x%llx\n", obj->type,
+ obj->integer.value);
+ kfree(response.pointer);
+}
+
+static void wmi_input_setup(void)
+{
+ acpi_status status;
+
+ wmi_input_dev = input_allocate_device();
+ if (wmi_input_dev) {
+ wmi_input_dev->name = "LG WMI hotkeys";
+ wmi_input_dev->phys = "wmi/input0";
+ wmi_input_dev->id.bustype = BUS_HOST;
+
+ if (sparse_keymap_setup(wmi_input_dev, wmi_keymap, NULL) ||
+ input_register_device(wmi_input_dev)) {
+ pr_info("Cannot initialize input device");
+ input_free_device(wmi_input_dev);
+ return;
+ }
+
+ inited |= INIT_SPARSE_KEYMAP;
+ status = wmi_install_notify_handler(WMI_EVENT_GUID0, wmi_notify,
+ (void *)0);
+ if (ACPI_SUCCESS(status))
+ inited |= INIT_INPUT_WMI_0;
+
+ status = wmi_install_notify_handler(WMI_EVENT_GUID2, wmi_notify,
+ (void *)2);
+ if (ACPI_SUCCESS(status))
+ inited |= INIT_INPUT_WMI_2;
+ } else {
+ pr_info("Cannot allocate input device");
+ }
+}
+
+static void acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct key_entry *key;
+
+ acpi_handle_debug(device->handle, "notify: %d\n", event);
+ if (inited & INIT_SPARSE_KEYMAP) {
+ key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
+ if (key && key->type == KE_KEY)
+ sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
+ }
+}
+
+static ssize_t fan_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ bool value;
+ union acpi_object *r;
+ u32 m;
+ int ret;
+
+ ret = kstrtobool(buffer, &value);
+ if (ret)
+ return ret;
+
+ r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ m = r->integer.value;
+ kfree(r);
+ r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
+ kfree(r);
+ r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ kfree(r);
+
+ return count;
+}
+
+static ssize_t fan_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = r->integer.value & 0x01;
+ kfree(r);
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t usb_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ bool value;
+ union acpi_object *r;
+ int ret;
+
+ ret = kstrtobool(buffer, &value);
+ if (ret)
+ return ret;
+
+ r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+ kfree(r);
+ return count;
+}
+
+static ssize_t usb_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_BUFFER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = !!r->buffer.pointer[0x10];
+
+ kfree(r);
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t reader_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ bool value;
+ union acpi_object *r;
+ int ret;
+
+ ret = kstrtobool(buffer, &value);
+ if (ret)
+ return ret;
+
+ r = lg_wmab(WM_READER_MODE, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+ kfree(r);
+ return count;
+}
+
+static ssize_t reader_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ r = lg_wmab(WM_READER_MODE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = !!r->integer.value;
+
+ kfree(r);
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t fn_lock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ bool value;
+ union acpi_object *r;
+ int ret;
+
+ ret = kstrtobool(buffer, &value);
+ if (ret)
+ return ret;
+
+ r = lg_wmab(WM_FN_LOCK, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+ kfree(r);
+ return count;
+}
+
+static ssize_t fn_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_BUFFER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = !!r->buffer.pointer[0];
+ kfree(r);
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t battery_care_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buffer, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value == 100 || value == 80) {
+ union acpi_object *r;
+
+ r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+ kfree(r);
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t battery_care_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ unsigned int status;
+ union acpi_object *r;
+
+ r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+ if (r->type != ACPI_TYPE_INTEGER) {
+ kfree(r);
+ return -EIO;
+ }
+
+ status = r->integer.value;
+ kfree(r);
+ if (status != 80 && status != 100)
+ status = 0;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static DEVICE_ATTR_RW(fan_mode);
+static DEVICE_ATTR_RW(usb_charge);
+static DEVICE_ATTR_RW(reader_mode);
+static DEVICE_ATTR_RW(fn_lock);
+static DEVICE_ATTR_RW(battery_care_limit);
+
+static struct attribute *dev_attributes[] = {
+ &dev_attr_fan_mode.attr,
+ &dev_attr_usb_charge.attr,
+ &dev_attr_reader_mode.attr,
+ &dev_attr_fn_lock.attr,
+ &dev_attr_battery_care_limit.attr,
+ NULL
+};
+
+static const struct attribute_group dev_attribute_group = {
+ .attrs = dev_attributes,
+};
+
+static void tpad_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ union acpi_object *r;
+
+ r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
+ kfree(r);
+}
+
+static enum led_brightness tpad_led_get(struct led_classdev *cdev)
+{
+ return ggov(GOV_TLED) > 0 ? LED_ON : LED_OFF;
+}
+
+static LED_DEVICE(tpad_led, 1);
+
+static void kbd_backlight_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ u32 val;
+ union acpi_object *r;
+
+ val = 0x22;
+ if (brightness <= LED_OFF)
+ val = 0;
+ if (brightness >= LED_FULL)
+ val = 0x24;
+ r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
+ kfree(r);
+}
+
+static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
+{
+ union acpi_object *r;
+ int val;
+
+ r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
+
+ if (!r)
+ return LED_OFF;
+
+ if (r->type != ACPI_TYPE_BUFFER || r->buffer.pointer[1] != 0x05) {
+ kfree(r);
+ return LED_OFF;
+ }
+
+ switch (r->buffer.pointer[0] & 0x27) {
+ case 0x24:
+ val = LED_FULL;
+ break;
+ case 0x22:
+ val = LED_HALF;
+ break;
+ default:
+ val = LED_OFF;
+ }
+
+ kfree(r);
+
+ return val;
+}
+
+static LED_DEVICE(kbd_backlight, 255);
+
+static void wmi_input_destroy(void)
+{
+ if (inited & INIT_INPUT_WMI_2)
+ wmi_remove_notify_handler(WMI_EVENT_GUID2);
+
+ if (inited & INIT_INPUT_WMI_0)
+ wmi_remove_notify_handler(WMI_EVENT_GUID0);
+
+ if (inited & INIT_SPARSE_KEYMAP)
+ input_unregister_device(wmi_input_dev);
+
+ inited &= ~(INIT_INPUT_WMI_0 | INIT_INPUT_WMI_2 | INIT_SPARSE_KEYMAP);
+}
+
+static struct platform_driver pf_driver = {
+ .driver = {
+ .name = PLATFORM_NAME,
+ }
+};
+
+static int acpi_add(struct acpi_device *device)
+{
+ int ret;
+
+ if (pf_device)
+ return 0;
+
+ ret = platform_driver_register(&pf_driver);
+ if (ret)
+ return ret;
+
+ pf_device = platform_device_register_simple(PLATFORM_NAME,
+ PLATFORM_DEVID_NONE,
+ NULL, 0);
+ if (IS_ERR(pf_device)) {
+ ret = PTR_ERR(pf_device);
+ pf_device = NULL;
+ pr_err("unable to register platform device\n");
+ goto out_platform_registered;
+ }
+
+ ret = sysfs_create_group(&pf_device->dev.kobj, &dev_attribute_group);
+ if (ret)
+ goto out_platform_device;
+
+ if (!led_classdev_register(&pf_device->dev, &kbd_backlight))
+ inited |= INIT_KBD_LED;
+
+ if (!led_classdev_register(&pf_device->dev, &tpad_led))
+ inited |= INIT_TPAD_LED;
+
+ wmi_input_setup();
+
+ return 0;
+
+out_platform_device:
+ platform_device_unregister(pf_device);
+out_platform_registered:
+ platform_driver_unregister(&pf_driver);
+ return ret;
+}
+
+static int acpi_remove(struct acpi_device *device)
+{
+ sysfs_remove_group(&pf_device->dev.kobj, &dev_attribute_group);
+ if (inited & INIT_KBD_LED)
+ led_classdev_unregister(&kbd_backlight);
+
+ if (inited & INIT_TPAD_LED)
+ led_classdev_unregister(&tpad_led);
+
+ wmi_input_destroy();
+ platform_device_unregister(pf_device);
+ pf_device = NULL;
+ platform_driver_unregister(&pf_driver);
+
+ return 0;
+}
+
+static const struct acpi_device_id device_ids[] = {
+ {"LGEX0815", 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, device_ids);
+
+static struct acpi_driver acpi_driver = {
+ .name = "LG Gram Laptop Support",
+ .class = "lg-laptop",
+ .ids = device_ids,
+ .ops = {
+ .add = acpi_add,
+ .remove = acpi_remove,
+ .notify = acpi_notify,
+ },
+ .owner = THIS_MODULE,
+};
+
+static int __init acpi_init(void)
+{
+ int result;
+
+ result = acpi_bus_register_driver(&acpi_driver);
+ if (result < 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error registering driver\n"));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit acpi_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_driver);
+}
+
+module_init(acpi_init);
+module_exit(acpi_exit);
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index d89936c93ba0..c2c3a1a19879 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -575,7 +575,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn201x_data = {
- .items = mlxplat_mlxcpld_msn21xx_items,
+ .items = mlxplat_mlxcpld_msn201x_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index cb204f973491..5f2d7ea912b5 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -42,10 +42,13 @@ static const struct ts_dmi_data chuwi_hi8_data = {
};
static const struct property_entry chuwi_hi8_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 6),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -56,6 +59,8 @@ static const struct ts_dmi_data chuwi_hi8_pro_data = {
};
static const struct property_entry chuwi_vi8_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1724),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
@@ -88,9 +93,9 @@ static const struct ts_dmi_data chuwi_vi10_data = {
static const struct property_entry connect_tablet9_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 9),
- PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1664),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 878),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"),
@@ -104,8 +109,10 @@ static const struct ts_dmi_data connect_tablet9_data = {
};
static const struct property_entry cube_iwork8_air_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1660),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 900),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1664),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 896),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
@@ -179,11 +186,14 @@ static const struct ts_dmi_data gp_electronic_t701_data = {
};
static const struct property_entry itworks_tw891_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1600),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 890),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 896),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
@@ -207,8 +217,10 @@ static const struct ts_dmi_data jumper_ezpad_6_pro_data = {
};
static const struct property_entry jumper_ezpad_mini3_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 23),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1700),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1138),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
@@ -237,6 +249,24 @@ static const struct ts_dmi_data onda_obook_20_plus_data = {
.properties = onda_obook_20_plus_props,
};
+static const struct property_entry onda_v80_plus_v3_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 22),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1698),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl3676-onda-v80-plus-v3.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data onda_v80_plus_v3_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = onda_v80_plus_v3_props,
+};
+
static const struct property_entry onda_v820w_32g_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -322,11 +352,14 @@ static const struct ts_dmi_data pov_mobii_wintab_p800w_v20_data = {
};
static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1800),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1794),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name",
"gsl3692-pov-mobii-wintab-p800w.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -366,6 +399,22 @@ static const struct ts_dmi_data teclast_x98plus2_data = {
.properties = teclast_x98plus2_props,
};
+static const struct property_entry trekstor_primebook_c11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1970),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1680-trekstor-primebook-c11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_primebook_c11_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = trekstor_primebook_c11_props,
+};
+
static const struct property_entry trekstor_primebook_c13_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
@@ -381,6 +430,22 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = {
.properties = trekstor_primebook_c13_props,
};
+static const struct property_entry trekstor_primetab_t13b_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1680-trekstor-primetab-t13b.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_primetab_t13b_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = trekstor_primetab_t13b_props,
+};
+
static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
@@ -397,6 +462,8 @@ static const struct ts_dmi_data trekstor_surftab_twin_10_1_data = {
};
static const struct property_entry trekstor_surftab_wintron70_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
PROPERTY_ENTRY_STRING("firmware-name",
@@ -556,6 +623,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* ONDA V80 plus v3 (P80PSBG9V3A01501) */
+ .driver_data = (void *)&onda_v80_plus_v3_data,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONDA"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "V80 PLUS")
+ },
+ },
+ {
/* ONDA V820w DualOS */
.driver_data = (void *)&onda_v820w_32g_data,
.matches = {
@@ -641,6 +716,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Trekstor Primebook C11 */
+ .driver_data = (void *)&trekstor_primebook_c11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
+ },
+ },
+ {
/* Trekstor Primebook C13 */
.driver_data = (void *)&trekstor_primebook_c13_data,
.matches = {
@@ -649,6 +732,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Trekstor Primetab T13B */
+ .driver_data = (void *)&trekstor_primetab_t13b_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Primetab T13B"),
+ },
+ },
+ {
/* TrekStor SurfTab twin 10.1 ST10432-8 */
.driver_data = (void *)&trekstor_surftab_twin_10_1_data,
.matches = {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 04791ea5d97b..bea35be68706 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -987,19 +987,19 @@ static struct bus_type wmi_bus_type = {
.remove = wmi_dev_remove,
};
-static struct device_type wmi_type_event = {
+static const struct device_type wmi_type_event = {
.name = "event",
.groups = wmi_event_groups,
.release = wmi_dev_release,
};
-static struct device_type wmi_type_method = {
+static const struct device_type wmi_type_method = {
.name = "method",
.groups = wmi_method_groups,
.release = wmi_dev_release,
};
-static struct device_type wmi_type_data = {
+static const struct device_type wmi_type_data = {
.name = "data",
.groups = wmi_data_groups,
.release = wmi_dev_release,
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 504d252716f2..27e5dd47a01f 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -447,10 +447,9 @@ config PWM_TEGRA
config PWM_TIECAP
tristate "ECAP PWM support"
- depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3
help
- PWM driver support for the ECAP APWM controller found on AM33XX
- TI SOC
+ PWM driver support for the ECAP APWM controller found on TI SOCs
To compile this driver as a module, choose M here: the module
will be called pwm-tiecap.
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index 5561b9e190f8..757230e1f575 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -30,6 +30,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
.clk_rate = 19200000,
.npwm = 1,
.base_unit_bits = 16,
+ .other_devices_aml_touches_pwm_regs = true,
};
/* Broxton */
@@ -60,6 +61,7 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev)
platform_set_drvdata(pdev, lpwm);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -74,13 +76,29 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev)
return pwm_lpss_remove(lpwm);
}
-static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops,
- pwm_lpss_suspend,
- pwm_lpss_resume);
+static int pwm_lpss_prepare(struct device *dev)
+{
+ struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
+
+ /*
+ * If other device's AML code touches the PWM regs on suspend/resume
+ * force runtime-resume the PWM controller to allow this.
+ */
+ if (lpwm->info->other_devices_aml_touches_pwm_regs)
+ return 0; /* Force runtime-resume */
+
+ return 1; /* If runtime-suspended leave as is */
+}
+
+static const struct dev_pm_ops pwm_lpss_platform_pm_ops = {
+ .prepare = pwm_lpss_prepare,
+ SET_SYSTEM_SLEEP_PM_OPS(pwm_lpss_suspend, pwm_lpss_resume)
+};
static const struct acpi_device_id pwm_lpss_acpi_match[] = {
{ "80860F09", (unsigned long)&pwm_lpss_byt_info },
{ "80862288", (unsigned long)&pwm_lpss_bsw_info },
+ { "80862289", (unsigned long)&pwm_lpss_bsw_info },
{ "80865AC8", (unsigned long)&pwm_lpss_bxt_info },
{ },
};
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 4721a264bac2..2ac3a2aa9e53 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -32,15 +32,6 @@
/* Size of each PWM register space if multiple */
#define PWM_SIZE 0x400
-#define MAX_PWMS 4
-
-struct pwm_lpss_chip {
- struct pwm_chip chip;
- void __iomem *regs;
- const struct pwm_lpss_boardinfo *info;
- u32 saved_ctrl[MAX_PWMS];
-};
-
static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
{
return container_of(chip, struct pwm_lpss_chip, chip);
@@ -97,7 +88,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
unsigned long long on_time_div;
unsigned long c = lpwm->info->clk_rate, base_unit_range;
unsigned long long base_unit, freq = NSEC_PER_SEC;
- u32 ctrl;
+ u32 orig_ctrl, ctrl;
do_div(freq, period_ns);
@@ -114,13 +105,17 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
do_div(on_time_div, period_ns);
on_time_div = 255ULL - on_time_div;
- ctrl = pwm_lpss_read(pwm);
+ orig_ctrl = ctrl = pwm_lpss_read(pwm);
ctrl &= ~PWM_ON_TIME_DIV_MASK;
ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
base_unit &= base_unit_range;
ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
ctrl |= on_time_div;
- pwm_lpss_write(pwm, ctrl);
+
+ if (orig_ctrl != ctrl) {
+ pwm_lpss_write(pwm, ctrl);
+ pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE);
+ }
}
static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
@@ -144,7 +139,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
- pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
ret = pwm_lpss_wait_for_update(pwm);
if (ret) {
@@ -157,7 +151,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret)
return ret;
pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
- pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
return pwm_lpss_wait_for_update(pwm);
}
} else if (pwm_is_enabled(pwm)) {
@@ -168,8 +161,42 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
+/* This function gets called once from pwmchip_add to get the initial state */
+static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct pwm_lpss_chip *lpwm = to_lpwm(chip);
+ unsigned long base_unit_range;
+ unsigned long long base_unit, freq, on_time_div;
+ u32 ctrl;
+
+ base_unit_range = BIT(lpwm->info->base_unit_bits);
+
+ ctrl = pwm_lpss_read(pwm);
+ on_time_div = 255 - (ctrl & PWM_ON_TIME_DIV_MASK);
+ base_unit = (ctrl >> PWM_BASE_UNIT_SHIFT) & (base_unit_range - 1);
+
+ freq = base_unit * lpwm->info->clk_rate;
+ do_div(freq, base_unit_range);
+ if (freq == 0)
+ state->period = NSEC_PER_SEC;
+ else
+ state->period = NSEC_PER_SEC / (unsigned long)freq;
+
+ on_time_div *= state->period;
+ do_div(on_time_div, 255);
+ state->duty_cycle = on_time_div;
+
+ state->polarity = PWM_POLARITY_NORMAL;
+ state->enabled = !!(ctrl & PWM_ENABLE);
+
+ if (state->enabled)
+ pm_runtime_get(chip->dev);
+}
+
static const struct pwm_ops pwm_lpss_ops = {
.apply = pwm_lpss_apply,
+ .get_state = pwm_lpss_get_state,
.owner = THIS_MODULE,
};
@@ -214,6 +241,12 @@ EXPORT_SYMBOL_GPL(pwm_lpss_probe);
int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
{
+ int i;
+
+ for (i = 0; i < lpwm->info->npwm; i++) {
+ if (pwm_is_enabled(&lpwm->chip.pwms[i]))
+ pm_runtime_put(lpwm->chip.dev);
+ }
return pwmchip_remove(&lpwm->chip);
}
EXPORT_SYMBOL_GPL(pwm_lpss_remove);
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index 7a4238ad1fcb..3236be835bd9 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -16,13 +16,25 @@
#include <linux/device.h>
#include <linux/pwm.h>
-struct pwm_lpss_chip;
+#define MAX_PWMS 4
+
+struct pwm_lpss_chip {
+ struct pwm_chip chip;
+ void __iomem *regs;
+ const struct pwm_lpss_boardinfo *info;
+ u32 saved_ctrl[MAX_PWMS];
+};
struct pwm_lpss_boardinfo {
unsigned long clk_rate;
unsigned int npwm;
unsigned long base_unit_bits;
bool bypass;
+ /*
+ * On some devices the _PS0/_PS3 AML code of the GPU (GFX0) device
+ * messes with the PWM0 controllers state,
+ */
+ bool other_devices_aml_touches_pwm_regs;
};
struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 748f614d5375..a41812fc6f95 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car PWM Timer driver
*
* Copyright (C) 2015 Renesas Electronics Corporation
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 29267d12fb4c..4a855a21b782 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Mobile TPU PWM driver
*
* Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index f8ebbece57b7..48c4595a0ffc 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -300,7 +300,6 @@ static const struct of_device_id tegra_pwm_of_match[] = {
{ .compatible = "nvidia,tegra186-pwm", .data = &tegra186_pwm_soc },
{ }
};
-
MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
static const struct dev_pm_ops tegra_pwm_pm_ops = {
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 7c71cdb8a9d8..ceb233dd6048 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -249,6 +249,7 @@ static void pwm_export_release(struct device *child)
static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
{
struct pwm_export *export;
+ char *pwm_prop[2];
int ret;
if (test_and_set_bit(PWMF_EXPORTED, &pwm->flags))
@@ -263,7 +264,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
export->pwm = pwm;
mutex_init(&export->lock);
- export->child.class = parent->class;
export->child.release = pwm_export_release;
export->child.parent = parent;
export->child.devt = MKDEV(0, 0);
@@ -277,6 +277,10 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
export = NULL;
return ret;
}
+ pwm_prop[0] = kasprintf(GFP_KERNEL, "EXPORT=pwm%u", pwm->hwpwm);
+ pwm_prop[1] = NULL;
+ kobject_uevent_env(&parent->kobj, KOBJ_CHANGE, pwm_prop);
+ kfree(pwm_prop[0]);
return 0;
}
@@ -289,6 +293,7 @@ static int pwm_unexport_match(struct device *child, void *data)
static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm)
{
struct device *child;
+ char *pwm_prop[2];
if (!test_and_clear_bit(PWMF_EXPORTED, &pwm->flags))
return -ENODEV;
@@ -297,6 +302,11 @@ static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm)
if (!child)
return -ENODEV;
+ pwm_prop[0] = kasprintf(GFP_KERNEL, "UNEXPORT=pwm%u", pwm->hwpwm);
+ pwm_prop[1] = NULL;
+ kobject_uevent_env(&parent->kobj, KOBJ_CHANGE, pwm_prop);
+ kfree(pwm_prop[0]);
+
/* for device_find_child() */
put_device(child);
device_unregister(child);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 052d4dd347f9..f0abd2608044 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -84,8 +84,16 @@ config KEYSTONE_REMOTEPROC
It's safe to say N here if you're not interested in the Keystone
DSPs or just want to use a bare minimum kernel.
-config QCOM_ADSP_PIL
- tristate "Qualcomm ADSP Peripheral Image Loader"
+config QCOM_RPROC_COMMON
+ tristate
+
+config QCOM_Q6V5_COMMON
+ tristate
+ depends on ARCH_QCOM
+ depends on QCOM_SMEM
+
+config QCOM_Q6V5_ADSP
+ tristate "Qualcomm Technology Inc ADSP Peripheral Image Loader"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
@@ -95,33 +103,41 @@ config QCOM_ADSP_PIL
select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
- select QCOM_SCM
help
- Say y here to support the TrustZone based Peripherial Image Loader
- for the Qualcomm ADSP remote processors.
+ Say y here to support the Peripheral Image Loader
+ for the Qualcomm Technology Inc. ADSP remote processors.
-config QCOM_RPROC_COMMON
- tristate
-
-config QCOM_Q6V5_COMMON
- tristate
- depends on ARCH_QCOM
+config QCOM_Q6V5_MSS
+ tristate "Qualcomm Hexagon V5 self-authenticating modem subsystem support"
+ depends on OF && ARCH_QCOM
depends on QCOM_SMEM
+ depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
+ depends on QCOM_SYSMON || QCOM_SYSMON=n
+ select MFD_SYSCON
+ select QCOM_Q6V5_COMMON
+ select QCOM_RPROC_COMMON
+ select QCOM_SCM
+ help
+ Say y here to support the Qualcomm self-authenticating modem
+ subsystem based on Hexagon V5.
-config QCOM_Q6V5_PIL
- tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
+config QCOM_Q6V5_PAS
+ tristate "Qualcomm Hexagon v5 Peripheral Authentication Service support"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
select MFD_SYSCON
+ select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
select QCOM_SCM
help
- Say y here to support the Qualcomm Peripherial Image Loader for the
- Hexagon V5 based remote processors.
+ Say y here to support the TrustZone based Peripherial Image Loader
+ for the Qualcomm Hexagon v5 based remote processors. This is commonly
+ used to control subsystems such as ADSP, Compute and Sensor.
config QCOM_Q6V5_WCSS
tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader"
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 03332fa7e2ee..ce5d061e92be 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -14,10 +14,11 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
-obj-$(CONFIG_QCOM_ADSP_PIL) += qcom_adsp_pil.o
obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o
-obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
+obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
+obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o
+obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o
obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index e230bef71be1..d200334577f6 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -226,7 +226,7 @@ static int da8xx_rproc_get_internal_memories(struct platform_device *pdev,
res->start & DA8XX_RPROC_LOCAL_ADDRESS_MASK;
drproc->mem[i].size = resource_size(res);
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %p da 0x%x\n",
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
mem_names[i], &drproc->mem[i].bus_addr,
drproc->mem[i].size, drproc->mem[i].cpu_addr,
drproc->mem[i].dev_addr);
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 61a760ee4aac..0d33e3079f0d 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -84,6 +84,7 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
else
dev_err(q6v5->dev, "fatal error without message\n");
+ q6v5->running = false;
rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR);
return IRQ_HANDLED;
@@ -150,8 +151,6 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
{
int ret;
- q6v5->running = false;
-
qcom_smem_state_update_bits(q6v5->state,
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
@@ -188,6 +187,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
init_completion(&q6v5->stop_done);
q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
+ if (q6v5->wdog_irq < 0) {
+ if (q6v5->wdog_irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to retrieve wdog IRQ: %d\n",
+ q6v5->wdog_irq);
+ return q6v5->wdog_irq;
+ }
+
ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
NULL, q6v5_wdog_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -198,6 +205,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
}
q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
+ if (q6v5->fatal_irq < 0) {
+ if (q6v5->fatal_irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to retrieve fatal IRQ: %d\n",
+ q6v5->fatal_irq);
+ return q6v5->fatal_irq;
+ }
+
ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
NULL, q6v5_fatal_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -208,6 +223,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
}
q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
+ if (q6v5->ready_irq < 0) {
+ if (q6v5->ready_irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to retrieve ready IRQ: %d\n",
+ q6v5->ready_irq);
+ return q6v5->ready_irq;
+ }
+
ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
NULL, q6v5_ready_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -218,6 +241,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
}
q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
+ if (q6v5->handover_irq < 0) {
+ if (q6v5->handover_irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to retrieve handover IRQ: %d\n",
+ q6v5->handover_irq);
+ return q6v5->handover_irq;
+ }
+
ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
NULL, q6v5_handover_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -229,6 +260,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
disable_irq(q6v5->handover_irq);
q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
+ if (q6v5->stop_irq < 0) {
+ if (q6v5->stop_irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to retrieve stop-ack IRQ: %d\n",
+ q6v5->stop_irq);
+ return q6v5->stop_irq;
+ }
+
ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
NULL, q6v5_stop_interrupt,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
new file mode 100644
index 000000000000..79374d1de311
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm Technology Inc. ADSP Peripheral Image Loader for SDM845.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "qcom_common.h"
+#include "qcom_q6v5.h"
+#include "remoteproc_internal.h"
+
+/* time out value */
+#define ACK_TIMEOUT 1000
+#define BOOT_FSM_TIMEOUT 10000
+/* mask values */
+#define EVB_MASK GENMASK(27, 4)
+/*QDSP6SS register offsets*/
+#define RST_EVB_REG 0x10
+#define CORE_START_REG 0x400
+#define BOOT_CMD_REG 0x404
+#define BOOT_STATUS_REG 0x408
+#define RET_CFG_REG 0x1C
+/*TCSR register offsets*/
+#define LPASS_MASTER_IDLE_REG 0x8
+#define LPASS_HALTACK_REG 0x4
+#define LPASS_PWR_ON_REG 0x10
+#define LPASS_HALTREQ_REG 0x0
+
+/* list of clocks required by ADSP PIL */
+static const char * const adsp_clk_id[] = {
+ "sway_cbcr", "lpass_aon", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
+ "qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core",
+};
+
+struct adsp_pil_data {
+ int crash_reason_smem;
+ const char *firmware_name;
+
+ const char *ssr_name;
+ const char *sysmon_name;
+ int ssctl_id;
+};
+
+struct qcom_adsp {
+ struct device *dev;
+ struct rproc *rproc;
+
+ struct qcom_q6v5 q6v5;
+
+ struct clk *xo;
+
+ int num_clks;
+ struct clk_bulk_data *clks;
+
+ void __iomem *qdsp6ss_base;
+
+ struct reset_control *pdc_sync_reset;
+ struct reset_control *cc_lpass_restart;
+
+ struct regmap *halt_map;
+ unsigned int halt_lpass;
+
+ int crash_reason_smem;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ phys_addr_t mem_phys;
+ phys_addr_t mem_reloc;
+ void *mem_region;
+ size_t mem_size;
+
+ struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_ssr ssr_subdev;
+ struct qcom_sysmon *sysmon;
+};
+
+static int qcom_adsp_shutdown(struct qcom_adsp *adsp)
+{
+ unsigned long timeout;
+ unsigned int val;
+ int ret;
+
+ /* Reset the retention logic */
+ val = readl(adsp->qdsp6ss_base + RET_CFG_REG);
+ val |= 0x1;
+ writel(val, adsp->qdsp6ss_base + RET_CFG_REG);
+
+ clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
+
+ /* QDSP6 master port needs to be explicitly halted */
+ ret = regmap_read(adsp->halt_map,
+ adsp->halt_lpass + LPASS_PWR_ON_REG, &val);
+ if (ret || !val)
+ goto reset;
+
+ ret = regmap_read(adsp->halt_map,
+ adsp->halt_lpass + LPASS_MASTER_IDLE_REG,
+ &val);
+ if (ret || val)
+ goto reset;
+
+ regmap_write(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
+
+ /* Wait for halt ACK from QDSP6 */
+ timeout = jiffies + msecs_to_jiffies(ACK_TIMEOUT);
+ for (;;) {
+ ret = regmap_read(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTACK_REG, &val);
+ if (ret || val || time_after(jiffies, timeout))
+ break;
+
+ usleep_range(1000, 1100);
+ }
+
+ ret = regmap_read(adsp->halt_map,
+ adsp->halt_lpass + LPASS_MASTER_IDLE_REG, &val);
+ if (ret || !val)
+ dev_err(adsp->dev, "port failed halt\n");
+
+reset:
+ /* Assert the LPASS PDC Reset */
+ reset_control_assert(adsp->pdc_sync_reset);
+ /* Place the LPASS processor into reset */
+ reset_control_assert(adsp->cc_lpass_restart);
+ /* wait after asserting subsystem restart from AOSS */
+ usleep_range(200, 300);
+
+ /* Clear the halt request for the AXIM and AHBM for Q6 */
+ regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
+
+ /* De-assert the LPASS PDC Reset */
+ reset_control_deassert(adsp->pdc_sync_reset);
+ /* Remove the LPASS reset */
+ reset_control_deassert(adsp->cc_lpass_restart);
+ /* wait after de-asserting subsystem restart from AOSS */
+ usleep_range(200, 300);
+
+ return 0;
+}
+
+static int adsp_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+
+ return qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
+ adsp->mem_region, adsp->mem_phys, adsp->mem_size,
+ &adsp->mem_reloc);
+}
+
+static int adsp_start(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+ unsigned int val;
+
+ qcom_q6v5_prepare(&adsp->q6v5);
+
+ ret = clk_prepare_enable(adsp->xo);
+ if (ret)
+ goto disable_irqs;
+
+ dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX);
+ ret = pm_runtime_get_sync(adsp->dev);
+ if (ret)
+ goto disable_xo_clk;
+
+ ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
+ if (ret) {
+ dev_err(adsp->dev, "adsp clk_enable failed\n");
+ goto disable_power_domain;
+ }
+
+ /* Program boot address */
+ writel(adsp->mem_phys >> 4, adsp->qdsp6ss_base + RST_EVB_REG);
+
+ /* De-assert QDSP6 stop core. QDSP6 will execute after out of reset */
+ writel(0x1, adsp->qdsp6ss_base + CORE_START_REG);
+
+ /* Trigger boot FSM to start QDSP6 */
+ writel(0x1, adsp->qdsp6ss_base + BOOT_CMD_REG);
+
+ /* Wait for core to come out of reset */
+ ret = readl_poll_timeout(adsp->qdsp6ss_base + BOOT_STATUS_REG,
+ val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
+ if (ret) {
+ dev_err(adsp->dev, "failed to bootup adsp\n");
+ goto disable_adsp_clks;
+ }
+
+ ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5 * HZ));
+ if (ret == -ETIMEDOUT) {
+ dev_err(adsp->dev, "start timed out\n");
+ goto disable_adsp_clks;
+ }
+
+ return 0;
+
+disable_adsp_clks:
+ clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
+disable_power_domain:
+ dev_pm_genpd_set_performance_state(adsp->dev, 0);
+ pm_runtime_put(adsp->dev);
+disable_xo_clk:
+ clk_disable_unprepare(adsp->xo);
+disable_irqs:
+ qcom_q6v5_unprepare(&adsp->q6v5);
+
+ return ret;
+}
+
+static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5)
+{
+ struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
+
+ clk_disable_unprepare(adsp->xo);
+ dev_pm_genpd_set_performance_state(adsp->dev, 0);
+ pm_runtime_put(adsp->dev);
+}
+
+static int adsp_stop(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int handover;
+ int ret;
+
+ ret = qcom_q6v5_request_stop(&adsp->q6v5);
+ if (ret == -ETIMEDOUT)
+ dev_err(adsp->dev, "timed out on wait\n");
+
+ ret = qcom_adsp_shutdown(adsp);
+ if (ret)
+ dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
+
+ handover = qcom_q6v5_unprepare(&adsp->q6v5);
+ if (handover)
+ qcom_adsp_pil_handover(&adsp->q6v5);
+
+ return ret;
+}
+
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int offset;
+
+ offset = da - adsp->mem_reloc;
+ if (offset < 0 || offset + len > adsp->mem_size)
+ return NULL;
+
+ return adsp->mem_region + offset;
+}
+
+static const struct rproc_ops adsp_ops = {
+ .start = adsp_start,
+ .stop = adsp_stop,
+ .da_to_va = adsp_da_to_va,
+ .parse_fw = qcom_register_dump_segments,
+ .load = adsp_load,
+};
+
+static int adsp_init_clock(struct qcom_adsp *adsp)
+{
+ int i, ret;
+
+ adsp->xo = devm_clk_get(adsp->dev, "xo");
+ if (IS_ERR(adsp->xo)) {
+ ret = PTR_ERR(adsp->xo);
+ if (ret != -EPROBE_DEFER)
+ dev_err(adsp->dev, "failed to get xo clock");
+ return ret;
+ }
+
+ adsp->num_clks = ARRAY_SIZE(adsp_clk_id);
+ adsp->clks = devm_kcalloc(adsp->dev, adsp->num_clks,
+ sizeof(*adsp->clks), GFP_KERNEL);
+ if (!adsp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < adsp->num_clks; i++)
+ adsp->clks[i].id = adsp_clk_id[i];
+
+ return devm_clk_bulk_get(adsp->dev, adsp->num_clks, adsp->clks);
+}
+
+static int adsp_init_reset(struct qcom_adsp *adsp)
+{
+ adsp->pdc_sync_reset = devm_reset_control_get_exclusive(adsp->dev,
+ "pdc_sync");
+ if (IS_ERR(adsp->pdc_sync_reset)) {
+ dev_err(adsp->dev, "failed to acquire pdc_sync reset\n");
+ return PTR_ERR(adsp->pdc_sync_reset);
+ }
+
+ adsp->cc_lpass_restart = devm_reset_control_get_exclusive(adsp->dev,
+ "cc_lpass");
+ if (IS_ERR(adsp->cc_lpass_restart)) {
+ dev_err(adsp->dev, "failed to acquire cc_lpass restart\n");
+ return PTR_ERR(adsp->cc_lpass_restart);
+ }
+
+ return 0;
+}
+
+static int adsp_init_mmio(struct qcom_adsp *adsp,
+ struct platform_device *pdev)
+{
+ struct device_node *syscon;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adsp->qdsp6ss_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!adsp->qdsp6ss_base) {
+ dev_err(adsp->dev, "failed to map QDSP6SS registers\n");
+ return -ENOMEM;
+ }
+
+ syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0);
+ if (!syscon) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ adsp->halt_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(adsp->halt_map))
+ return PTR_ERR(adsp->halt_map);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,halt-regs",
+ 1, &adsp->halt_lpass);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no offset in syscon\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
+{
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(adsp->dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+
+ adsp->mem_phys = adsp->mem_reloc = r.start;
+ adsp->mem_size = resource_size(&r);
+ adsp->mem_region = devm_ioremap_wc(adsp->dev,
+ adsp->mem_phys, adsp->mem_size);
+ if (!adsp->mem_region) {
+ dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, adsp->mem_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int adsp_probe(struct platform_device *pdev)
+{
+ const struct adsp_pil_data *desc;
+ struct qcom_adsp *adsp;
+ struct rproc *rproc;
+ int ret;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
+ desc->firmware_name, sizeof(*adsp));
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ adsp = (struct qcom_adsp *)rproc->priv;
+ adsp->dev = &pdev->dev;
+ adsp->rproc = rproc;
+ platform_set_drvdata(pdev, adsp);
+
+ ret = adsp_alloc_memory_region(adsp);
+ if (ret)
+ goto free_rproc;
+
+ ret = adsp_init_clock(adsp);
+ if (ret)
+ goto free_rproc;
+
+ pm_runtime_enable(adsp->dev);
+
+ ret = adsp_init_reset(adsp);
+ if (ret)
+ goto disable_pm;
+
+ ret = adsp_init_mmio(adsp, pdev);
+ if (ret)
+ goto disable_pm;
+
+ ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
+ qcom_adsp_pil_handover);
+ if (ret)
+ goto disable_pm;
+
+ qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
+ qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
+ adsp->sysmon = qcom_add_sysmon_subdev(rproc,
+ desc->sysmon_name,
+ desc->ssctl_id);
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(adsp->dev);
+free_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int adsp_remove(struct platform_device *pdev)
+{
+ struct qcom_adsp *adsp = platform_get_drvdata(pdev);
+
+ rproc_del(adsp->rproc);
+
+ qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
+ qcom_remove_sysmon_subdev(adsp->sysmon);
+ qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
+ pm_runtime_disable(adsp->dev);
+ rproc_free(adsp->rproc);
+
+ return 0;
+}
+
+static const struct adsp_pil_data adsp_resource_init = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct of_device_id adsp_of_match[] = {
+ { .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adsp_of_match);
+
+static struct platform_driver adsp_pil_driver = {
+ .probe = adsp_probe,
+ .remove = adsp_remove,
+ .driver = {
+ .name = "qcom_q6v5_adsp",
+ .of_match_table = adsp_of_match,
+ },
+};
+
+module_platform_driver(adsp_pil_driver);
+MODULE_DESCRIPTION("QTI SDM845 ADSP Peripheral Image Loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_mss.c
index d7a4b9eca5d2..01be7314e176 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -1,5 +1,5 @@
/*
- * Qualcomm Peripheral Image Loader
+ * Qualcomm self-authenticating modem subsystem remoteproc driver
*
* Copyright (C) 2016 Linaro Ltd.
* Copyright (C) 2014 Sony Mobile Communications AB
@@ -149,6 +149,7 @@ struct q6v5 {
u32 halt_nc;
struct reset_control *mss_restart;
+ struct reset_control *pdc_reset;
struct qcom_q6v5 q6v5;
@@ -166,6 +167,10 @@ struct q6v5 {
bool running;
+ bool dump_mba_loaded;
+ unsigned long dump_segment_mask;
+ unsigned long dump_complete_mask;
+
phys_addr_t mba_phys;
void *mba_region;
size_t mba_size;
@@ -347,10 +352,17 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
static int q6v5_reset_assert(struct q6v5 *qproc)
{
- if (qproc->has_alt_reset)
- return reset_control_reset(qproc->mss_restart);
- else
- return reset_control_assert(qproc->mss_restart);
+ int ret;
+
+ if (qproc->has_alt_reset) {
+ reset_control_assert(qproc->pdc_reset);
+ ret = reset_control_reset(qproc->mss_restart);
+ reset_control_deassert(qproc->pdc_reset);
+ } else {
+ ret = reset_control_assert(qproc->mss_restart);
+ }
+
+ return ret;
}
static int q6v5_reset_deassert(struct q6v5 *qproc)
@@ -358,9 +370,11 @@ static int q6v5_reset_deassert(struct q6v5 *qproc)
int ret;
if (qproc->has_alt_reset) {
+ reset_control_assert(qproc->pdc_reset);
writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
ret = reset_control_reset(qproc->mss_restart);
writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
+ reset_control_deassert(qproc->pdc_reset);
} else {
ret = reset_control_deassert(qproc->mss_restart);
}
@@ -669,6 +683,171 @@ static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
return true;
}
+static int q6v5_mba_load(struct q6v5 *qproc)
+{
+ int ret;
+ int xfermemop_ret;
+
+ qcom_q6v5_prepare(&qproc->q6v5);
+
+ ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable proxy supplies\n");
+ goto disable_irqs;
+ }
+
+ ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable proxy clocks\n");
+ goto disable_proxy_reg;
+ }
+
+ ret = q6v5_regulator_enable(qproc, qproc->active_regs,
+ qproc->active_reg_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable supplies\n");
+ goto disable_proxy_clk;
+ }
+
+ ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
+ qproc->reset_clk_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable reset clocks\n");
+ goto disable_vdd;
+ }
+
+ ret = q6v5_reset_deassert(qproc);
+ if (ret) {
+ dev_err(qproc->dev, "failed to deassert mss restart\n");
+ goto disable_reset_clks;
+ }
+
+ ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
+ qproc->active_clk_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable clocks\n");
+ goto assert_reset;
+ }
+
+ /* Assign MBA image access in DDR to q6 */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ qproc->mba_phys, qproc->mba_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mba memory failed: %d\n", ret);
+ goto disable_active_clks;
+ }
+
+ writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+
+ ret = q6v5proc_reset(qproc);
+ if (ret)
+ goto reclaim_mba;
+
+ ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "MBA boot timed out\n");
+ goto halt_axi_ports;
+ } else if (ret != RMB_MBA_XPU_UNLOCKED &&
+ ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
+ dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ qproc->dump_mba_loaded = true;
+ return 0;
+
+halt_axi_ports:
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+
+reclaim_mba:
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret) {
+ dev_err(qproc->dev,
+ "Failed to reclaim mba buffer, system may become unstable\n");
+ }
+
+disable_active_clks:
+ q6v5_clk_disable(qproc->dev, qproc->active_clks,
+ qproc->active_clk_count);
+assert_reset:
+ q6v5_reset_assert(qproc);
+disable_reset_clks:
+ q6v5_clk_disable(qproc->dev, qproc->reset_clks,
+ qproc->reset_clk_count);
+disable_vdd:
+ q6v5_regulator_disable(qproc, qproc->active_regs,
+ qproc->active_reg_count);
+disable_proxy_clk:
+ q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+disable_proxy_reg:
+ q6v5_regulator_disable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+disable_irqs:
+ qcom_q6v5_unprepare(&qproc->q6v5);
+
+ return ret;
+}
+
+static void q6v5_mba_reclaim(struct q6v5 *qproc)
+{
+ int ret;
+ u32 val;
+
+ qproc->dump_mba_loaded = false;
+
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+ if (qproc->version == MSS_MSM8996) {
+ /*
+ * To avoid high MX current during LPASS/MSS restart.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
+ QDSP6v56_CLAMP_QMC_MEM;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ }
+
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ false, qproc->mpss_phys,
+ qproc->mpss_size);
+ WARN_ON(ret);
+
+ q6v5_reset_assert(qproc);
+
+ q6v5_clk_disable(qproc->dev, qproc->reset_clks,
+ qproc->reset_clk_count);
+ q6v5_clk_disable(qproc->dev, qproc->active_clks,
+ qproc->active_clk_count);
+ q6v5_regulator_disable(qproc, qproc->active_regs,
+ qproc->active_reg_count);
+
+ /* In case of failure or coredump scenario where reclaiming MBA memory
+ * could not happen reclaim it here.
+ */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ qproc->mba_phys,
+ qproc->mba_size);
+ WARN_ON(ret);
+
+ ret = qcom_q6v5_unprepare(&qproc->q6v5);
+ if (ret) {
+ q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+ q6v5_regulator_disable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+ }
+}
+
static int q6v5_mpss_load(struct q6v5 *qproc)
{
const struct elf32_phdr *phdrs;
@@ -721,6 +900,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
}
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
+ qproc->mpss_reloc = mpss_reloc;
/* Load firmware segments */
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
@@ -784,80 +964,42 @@ release_firmware:
return ret < 0 ? ret : 0;
}
-static int q6v5_start(struct rproc *rproc)
+static void qcom_q6v5_dump_segment(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest)
{
- struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
- int xfermemop_ret;
- int ret;
-
- qcom_q6v5_prepare(&qproc->q6v5);
-
- ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
- qproc->proxy_reg_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable proxy supplies\n");
- goto disable_irqs;
- }
-
- ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
- qproc->proxy_clk_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable proxy clocks\n");
- goto disable_proxy_reg;
- }
+ int ret = 0;
+ struct q6v5 *qproc = rproc->priv;
+ unsigned long mask = BIT((unsigned long)segment->priv);
+ void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
- ret = q6v5_regulator_enable(qproc, qproc->active_regs,
- qproc->active_reg_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable supplies\n");
- goto disable_proxy_clk;
- }
+ /* Unlock mba before copying segments */
+ if (!qproc->dump_mba_loaded)
+ ret = q6v5_mba_load(qproc);
- ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
- qproc->reset_clk_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable reset clocks\n");
- goto disable_vdd;
- }
+ if (!ptr || ret)
+ memset(dest, 0xff, segment->size);
+ else
+ memcpy(dest, ptr, segment->size);
- ret = q6v5_reset_deassert(qproc);
- if (ret) {
- dev_err(qproc->dev, "failed to deassert mss restart\n");
- goto disable_reset_clks;
- }
+ qproc->dump_segment_mask |= mask;
- ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
- qproc->active_clk_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable clocks\n");
- goto assert_reset;
- }
-
- /* Assign MBA image access in DDR to q6 */
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
- qproc->mba_phys, qproc->mba_size);
- if (ret) {
- dev_err(qproc->dev,
- "assigning Q6 access to mba memory failed: %d\n", ret);
- goto disable_active_clks;
+ /* Reclaim mba after copying segments */
+ if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
+ if (qproc->dump_mba_loaded)
+ q6v5_mba_reclaim(qproc);
}
+}
- writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+static int q6v5_start(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+ int xfermemop_ret;
+ int ret;
- ret = q6v5proc_reset(qproc);
+ ret = q6v5_mba_load(qproc);
if (ret)
- goto reclaim_mba;
-
- ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
- if (ret == -ETIMEDOUT) {
- dev_err(qproc->dev, "MBA boot timed out\n");
- goto halt_axi_ports;
- } else if (ret != RMB_MBA_XPU_UNLOCKED &&
- ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
- dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
- ret = -EINVAL;
- goto halt_axi_ports;
- }
+ return ret;
dev_info(qproc->dev, "MBA booted, loading mpss\n");
@@ -877,6 +1019,9 @@ static int q6v5_start(struct rproc *rproc)
if (xfermemop_ret)
dev_err(qproc->dev,
"Failed to reclaim mba buffer system may become unstable\n");
+
+ /* Reset Dump Segment Mask */
+ qproc->dump_segment_mask = 0;
qproc->running = true;
return 0;
@@ -886,42 +1031,7 @@ reclaim_mpss:
false, qproc->mpss_phys,
qproc->mpss_size);
WARN_ON(xfermemop_ret);
-
-halt_axi_ports:
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
-
-reclaim_mba:
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
- qproc->mba_phys,
- qproc->mba_size);
- if (xfermemop_ret) {
- dev_err(qproc->dev,
- "Failed to reclaim mba buffer, system may become unstable\n");
- }
-
-disable_active_clks:
- q6v5_clk_disable(qproc->dev, qproc->active_clks,
- qproc->active_clk_count);
-
-assert_reset:
- q6v5_reset_assert(qproc);
-disable_reset_clks:
- q6v5_clk_disable(qproc->dev, qproc->reset_clks,
- qproc->reset_clk_count);
-disable_vdd:
- q6v5_regulator_disable(qproc, qproc->active_regs,
- qproc->active_reg_count);
-disable_proxy_clk:
- q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
- qproc->proxy_clk_count);
-disable_proxy_reg:
- q6v5_regulator_disable(qproc, qproc->proxy_regs,
- qproc->proxy_reg_count);
-
-disable_irqs:
- qcom_q6v5_unprepare(&qproc->q6v5);
+ q6v5_mba_reclaim(qproc);
return ret;
}
@@ -930,7 +1040,6 @@ static int q6v5_stop(struct rproc *rproc)
{
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
int ret;
- u32 val;
qproc->running = false;
@@ -938,40 +1047,7 @@ static int q6v5_stop(struct rproc *rproc)
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "timed out on wait\n");
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
- if (qproc->version == MSS_MSM8996) {
- /*
- * To avoid high MX current during LPASS/MSS restart.
- */
- val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
- QDSP6v56_CLAMP_QMC_MEM;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- }
-
-
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
- qproc->mpss_phys, qproc->mpss_size);
- WARN_ON(ret);
-
- q6v5_reset_assert(qproc);
-
- ret = qcom_q6v5_unprepare(&qproc->q6v5);
- if (ret) {
- q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
- qproc->proxy_clk_count);
- q6v5_regulator_disable(qproc, qproc->proxy_regs,
- qproc->proxy_reg_count);
- }
-
- q6v5_clk_disable(qproc->dev, qproc->reset_clks,
- qproc->reset_clk_count);
- q6v5_clk_disable(qproc->dev, qproc->active_clks,
- qproc->active_clk_count);
- q6v5_regulator_disable(qproc, qproc->active_regs,
- qproc->active_reg_count);
+ q6v5_mba_reclaim(qproc);
return 0;
}
@@ -988,10 +1064,52 @@ static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
return qproc->mpss_region + offset;
}
+static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
+ const struct firmware *mba_fw)
+{
+ const struct firmware *fw;
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ struct q6v5 *qproc = rproc->priv;
+ unsigned long i;
+ int ret;
+
+ ret = request_firmware(&fw, "modem.mdt", qproc->dev);
+ if (ret < 0) {
+ dev_err(qproc->dev, "unable to load modem.mdt\n");
+ return ret;
+ }
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+ qproc->dump_complete_mask = 0;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!q6v5_phdr_valid(phdr))
+ continue;
+
+ ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
+ phdr->p_memsz,
+ qcom_q6v5_dump_segment,
+ (void *)i);
+ if (ret)
+ break;
+
+ qproc->dump_complete_mask |= BIT(i);
+ }
+
+ release_firmware(fw);
+ return ret;
+}
+
static const struct rproc_ops q6v5_ops = {
.start = q6v5_start,
.stop = q6v5_stop,
.da_to_va = q6v5_da_to_va,
+ .parse_fw = qcom_q6v5_register_dump_segments,
.load = q6v5_load,
};
@@ -1066,12 +1184,21 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks,
static int q6v5_init_reset(struct q6v5 *qproc)
{
qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
- NULL);
+ "mss_restart");
if (IS_ERR(qproc->mss_restart)) {
dev_err(qproc->dev, "failed to acquire mss restart\n");
return PTR_ERR(qproc->mss_restart);
}
+ if (qproc->has_alt_reset) {
+ qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
+ "pdc_reset");
+ if (IS_ERR(qproc->pdc_reset)) {
+ dev_err(qproc->dev, "failed to acquire pdc reset\n");
+ return PTR_ERR(qproc->pdc_reset);
+ }
+ }
+
return 0;
}
@@ -1132,6 +1259,9 @@ static int q6v5_probe(struct platform_device *pdev)
if (!desc)
return -EINVAL;
+ if (desc->need_mem_protection && !qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
desc->hexagon_mba_image, sizeof(*qproc));
if (!rproc) {
@@ -1192,12 +1322,12 @@ static int q6v5_probe(struct platform_device *pdev)
}
qproc->active_reg_count = ret;
+ qproc->has_alt_reset = desc->has_alt_reset;
ret = q6v5_init_reset(qproc);
if (ret)
goto free_rproc;
qproc->version = desc->version;
- qproc->has_alt_reset = desc->has_alt_reset;
qproc->need_mem_protection = desc->need_mem_protection;
ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
@@ -1368,11 +1498,11 @@ static struct platform_driver q6v5_driver = {
.probe = q6v5_probe,
.remove = q6v5_remove,
.driver = {
- .name = "qcom-q6v5-pil",
+ .name = "qcom-q6v5-mss",
.of_match_table = q6v5_of_match,
},
};
module_platform_driver(q6v5_driver);
-MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
+MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_adsp_pil.c b/drivers/remoteproc/qcom_q6v5_pas.c
index d4339a6da616..b1e63fcd5fdf 100644
--- a/drivers/remoteproc/qcom_adsp_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -342,6 +342,16 @@ static const struct adsp_data adsp_resource_init = {
.ssctl_id = 0x14,
};
+static const struct adsp_data cdsp_resource_init = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
static const struct adsp_data slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
@@ -352,10 +362,24 @@ static const struct adsp_data slpi_resource_init = {
.ssctl_id = 0x16,
};
+static const struct adsp_data wcss_resource_init = {
+ .crash_reason_smem = 421,
+ .firmware_name = "wcnss.mdt",
+ .pas_id = 6,
+ .ssr_name = "mpss",
+ .sysmon_name = "wcnss",
+ .ssctl_id = 0x12,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
+ { .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
+ { .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
+ { .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
+ { .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
+ { .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
@@ -364,11 +388,11 @@ static struct platform_driver adsp_driver = {
.probe = adsp_probe,
.remove = adsp_remove,
.driver = {
- .name = "qcom_adsp_pil",
+ .name = "qcom_q6v5_pas",
.of_match_table = adsp_of_match,
},
};
module_platform_driver(adsp_driver);
-MODULE_DESCRIPTION("Qualcomm MSM8974/MSM8996 ADSP Peripherial Image Loader");
+MODULE_DESCRIPTION("Qualcomm Hexagon v5 Peripheral Authentication Service driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index aa6206706fe3..54ec38fc5dca 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -53,6 +53,11 @@ typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
void *, int offset, int avail);
+static int rproc_alloc_carveout(struct rproc *rproc,
+ struct rproc_mem_entry *mem);
+static int rproc_release_carveout(struct rproc *rproc,
+ struct rproc_mem_entry *mem);
+
/* Unique indices for remoteproc devices */
static DEFINE_IDA(rproc_dev_index);
@@ -140,6 +145,22 @@ static void rproc_disable_iommu(struct rproc *rproc)
iommu_domain_free(domain);
}
+static phys_addr_t rproc_va_to_pa(void *cpu_addr)
+{
+ /*
+ * Return physical address according to virtual address location
+ * - in vmalloc: if region ioremapped or defined as dma_alloc_coherent
+ * - in kernel: if region allocated in generic dma memory pool
+ */
+ if (is_vmalloc_addr(cpu_addr)) {
+ return page_to_phys(vmalloc_to_page(cpu_addr)) +
+ offset_in_page(cpu_addr);
+ }
+
+ WARN_ON(!virt_addr_valid(cpu_addr));
+ return virt_to_phys(cpu_addr);
+}
+
/**
* rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
* @rproc: handle of a remote processor
@@ -201,27 +222,128 @@ out:
}
EXPORT_SYMBOL(rproc_da_to_va);
+/**
+ * rproc_find_carveout_by_name() - lookup the carveout region by a name
+ * @rproc: handle of a remote processor
+ * @name,..: carveout name to find (standard printf format)
+ *
+ * Platform driver has the capability to register some pre-allacoted carveout
+ * (physically contiguous memory regions) before rproc firmware loading and
+ * associated resource table analysis. These regions may be dedicated memory
+ * regions internal to the coprocessor or specified DDR region with specific
+ * attributes
+ *
+ * This function is a helper function with which we can go over the
+ * allocated carveouts and return associated region characteristics like
+ * coprocessor address, length or processor virtual address.
+ *
+ * Return: a valid pointer on carveout entry on success or NULL on failure.
+ */
+struct rproc_mem_entry *
+rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
+{
+ va_list args;
+ char _name[32];
+ struct rproc_mem_entry *carveout, *mem = NULL;
+
+ if (!name)
+ return NULL;
+
+ va_start(args, name);
+ vsnprintf(_name, sizeof(_name), name, args);
+ va_end(args);
+
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ /* Compare carveout and requested names */
+ if (!strcmp(carveout->name, _name)) {
+ mem = carveout;
+ break;
+ }
+ }
+
+ return mem;
+}
+
+/**
+ * rproc_check_carveout_da() - Check specified carveout da configuration
+ * @rproc: handle of a remote processor
+ * @mem: pointer on carveout to check
+ * @da: area device address
+ * @len: associated area size
+ *
+ * This function is a helper function to verify requested device area (couple
+ * da, len) is part of specified carevout.
+ *
+ * Return: 0 if carveout match request else -ENOMEM
+ */
+int rproc_check_carveout_da(struct rproc *rproc, struct rproc_mem_entry *mem,
+ u32 da, u32 len)
+{
+ struct device *dev = &rproc->dev;
+ int delta = 0;
+
+ /* Check requested resource length */
+ if (len > mem->len) {
+ dev_err(dev, "Registered carveout doesn't fit len request\n");
+ return -ENOMEM;
+ }
+
+ if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) {
+ /* Update existing carveout da */
+ mem->da = da;
+ } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) {
+ delta = da - mem->da;
+
+ /* Check requested resource belongs to registered carveout */
+ if (delta < 0) {
+ dev_err(dev,
+ "Registered carveout doesn't fit da request\n");
+ return -ENOMEM;
+ }
+
+ if (delta + len > mem->len) {
+ dev_err(dev,
+ "Registered carveout doesn't fit len request\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
{
struct rproc *rproc = rvdev->rproc;
struct device *dev = &rproc->dev;
struct rproc_vring *rvring = &rvdev->vring[i];
struct fw_rsc_vdev *rsc;
- dma_addr_t dma;
- void *va;
int ret, size, notifyid;
+ struct rproc_mem_entry *mem;
/* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
- /*
- * Allocate non-cacheable memory for the vring. In the future
- * this call will also configure the IOMMU for us
- */
- va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
- if (!va) {
- dev_err(dev->parent, "dma_alloc_coherent failed\n");
- return -EINVAL;
+ rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
+
+ /* Search for pre-registered carveout */
+ mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
+ i);
+ if (mem) {
+ if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size))
+ return -ENOMEM;
+ } else {
+ /* Register carveout in in list */
+ mem = rproc_mem_entry_init(dev, 0, 0, size, rsc->vring[i].da,
+ rproc_alloc_carveout,
+ rproc_release_carveout,
+ "vdev%dvring%d",
+ rvdev->index, i);
+ if (!mem) {
+ dev_err(dev, "Can't allocate memory entry structure\n");
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, mem);
}
/*
@@ -232,7 +354,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
if (ret < 0) {
dev_err(dev, "idr_alloc failed: %d\n", ret);
- dma_free_coherent(dev->parent, size, va, dma);
return ret;
}
notifyid = ret;
@@ -241,21 +362,9 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
if (notifyid > rproc->max_notifyid)
rproc->max_notifyid = notifyid;
- dev_dbg(dev, "vring%d: va %pK dma %pad size 0x%x idr %d\n",
- i, va, &dma, size, notifyid);
-
- rvring->va = va;
- rvring->dma = dma;
rvring->notifyid = notifyid;
- /*
- * Let the rproc know the notifyid and da of this vring.
- * Not all platforms use dma_alloc_coherent to automatically
- * set up the iommu. In this case the device address (da) will
- * hold the physical address and not the device address.
- */
- rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
- rsc->vring[i].da = dma;
+ /* Let the rproc know the notifyid of this vring.*/
rsc->vring[i].notifyid = notifyid;
return 0;
}
@@ -287,12 +396,10 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
void rproc_free_vring(struct rproc_vring *rvring)
{
- int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
struct rproc *rproc = rvring->rvdev->rproc;
int idx = rvring->rvdev->vring - rvring;
struct fw_rsc_vdev *rsc;
- dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
idr_remove(&rproc->notifyids, rvring->notifyid);
/* reset resource entry info */
@@ -379,6 +486,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
rvdev->id = rsc->id;
rvdev->rproc = rproc;
+ rvdev->index = rproc->nb_vdev++;
/* parse the vrings */
for (i = 0; i < rsc->num_of_vrings; i++) {
@@ -423,9 +531,6 @@ void rproc_vdev_release(struct kref *ref)
for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
rvring = &rvdev->vring[id];
- if (!rvring->va)
- continue;
-
rproc_free_vring(rvring);
}
@@ -584,61 +689,31 @@ out:
}
/**
- * rproc_handle_carveout() - handle phys contig memory allocation requests
+ * rproc_alloc_carveout() - allocated specified carveout
* @rproc: rproc handle
- * @rsc: the resource entry
- * @avail: size of available data (for image validation)
- *
- * This function will handle firmware requests for allocation of physically
- * contiguous memory regions.
- *
- * These request entries should come first in the firmware's resource table,
- * as other firmware entries might request placing other data objects inside
- * these memory regions (e.g. data/code segments, trace resource entries, ...).
+ * @mem: the memory entry to allocate
*
- * Allocating memory this way helps utilizing the reserved physical memory
- * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
- * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
- * pressure is important; it may have a substantial impact on performance.
+ * This function allocate specified memory entry @mem using
+ * dma_alloc_coherent() as default allocator
*/
-static int rproc_handle_carveout(struct rproc *rproc,
- struct fw_rsc_carveout *rsc,
- int offset, int avail)
+static int rproc_alloc_carveout(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
{
- struct rproc_mem_entry *carveout, *mapping;
+ struct rproc_mem_entry *mapping = NULL;
struct device *dev = &rproc->dev;
dma_addr_t dma;
void *va;
int ret;
- if (sizeof(*rsc) > avail) {
- dev_err(dev, "carveout rsc is truncated\n");
- return -EINVAL;
- }
-
- /* make sure reserved bytes are zeroes */
- if (rsc->reserved) {
- dev_err(dev, "carveout rsc has non zero reserved bytes\n");
- return -EINVAL;
- }
-
- dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
- rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
-
- carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
- if (!carveout)
- return -ENOMEM;
-
- va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
+ va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
if (!va) {
dev_err(dev->parent,
- "failed to allocate dma memory: len 0x%x\n", rsc->len);
- ret = -ENOMEM;
- goto free_carv;
+ "failed to allocate dma memory: len 0x%x\n", mem->len);
+ return -ENOMEM;
}
dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
- va, &dma, rsc->len);
+ va, &dma, mem->len);
/*
* Ok, this is non-standard.
@@ -657,15 +732,23 @@ static int rproc_handle_carveout(struct rproc *rproc,
* to use the iommu-based DMA API: we expect 'dma' to contain the
* physical address in this case.
*/
- if (rproc->domain) {
+
+ if (mem->da != FW_RSC_ADDR_ANY) {
+ if (!rproc->domain) {
+ dev_err(dev->parent,
+ "Bad carveout rsc configuration\n");
+ ret = -ENOMEM;
+ goto dma_free;
+ }
+
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
ret = -ENOMEM;
goto dma_free;
}
- ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
- rsc->flags);
+ ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
+ mem->flags);
if (ret) {
dev_err(dev, "iommu_map failed: %d\n", ret);
goto free_mapping;
@@ -678,52 +761,219 @@ static int rproc_handle_carveout(struct rproc *rproc,
* We can't trust the remote processor not to change the
* resource table, so we must maintain this info independently.
*/
- mapping->da = rsc->da;
- mapping->len = rsc->len;
+ mapping->da = mem->da;
+ mapping->len = mem->len;
list_add_tail(&mapping->node, &rproc->mappings);
dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
- rsc->da, &dma);
+ mem->da, &dma);
+ } else {
+ mem->da = (u32)dma;
}
- /*
- * Some remote processors might need to know the pa
- * even though they are behind an IOMMU. E.g., OMAP4's
- * remote M3 processor needs this so it can control
- * on-chip hardware accelerators that are not behind
- * the IOMMU, and therefor must know the pa.
- *
- * Generally we don't want to expose physical addresses
- * if we don't have to (remote processors are generally
- * _not_ trusted), so we might want to do this only for
- * remote processor that _must_ have this (e.g. OMAP4's
- * dual M3 subsystem).
- *
- * Non-IOMMU processors might also want to have this info.
- * In this case, the device address and the physical address
- * are the same.
- */
- rsc->pa = dma;
-
- carveout->va = va;
- carveout->len = rsc->len;
- carveout->dma = dma;
- carveout->da = rsc->da;
-
- list_add_tail(&carveout->node, &rproc->carveouts);
+ mem->dma = (u32)dma;
+ mem->va = va;
return 0;
free_mapping:
kfree(mapping);
dma_free:
- dma_free_coherent(dev->parent, rsc->len, va, dma);
-free_carv:
- kfree(carveout);
+ dma_free_coherent(dev->parent, mem->len, va, dma);
return ret;
}
-/*
+/**
+ * rproc_release_carveout() - release acquired carveout
+ * @rproc: rproc handle
+ * @mem: the memory entry to release
+ *
+ * This function releases specified memory entry @mem allocated via
+ * rproc_alloc_carveout() function by @rproc.
+ */
+static int rproc_release_carveout(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = &rproc->dev;
+
+ /* clean up carveout allocations */
+ dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma);
+ return 0;
+}
+
+/**
+ * rproc_handle_carveout() - handle phys contig memory allocation requests
+ * @rproc: rproc handle
+ * @rsc: the resource entry
+ * @avail: size of available data (for image validation)
+ *
+ * This function will handle firmware requests for allocation of physically
+ * contiguous memory regions.
+ *
+ * These request entries should come first in the firmware's resource table,
+ * as other firmware entries might request placing other data objects inside
+ * these memory regions (e.g. data/code segments, trace resource entries, ...).
+ *
+ * Allocating memory this way helps utilizing the reserved physical memory
+ * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
+ * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
+ * pressure is important; it may have a substantial impact on performance.
+ */
+static int rproc_handle_carveout(struct rproc *rproc,
+ struct fw_rsc_carveout *rsc,
+ int offset, int avail)
+{
+ struct rproc_mem_entry *carveout;
+ struct device *dev = &rproc->dev;
+
+ if (sizeof(*rsc) > avail) {
+ dev_err(dev, "carveout rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (rsc->reserved) {
+ dev_err(dev, "carveout rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
+ rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
+
+ /*
+ * Check carveout rsc already part of a registered carveout,
+ * Search by name, then check the da and length
+ */
+ carveout = rproc_find_carveout_by_name(rproc, rsc->name);
+
+ if (carveout) {
+ if (carveout->rsc_offset != FW_RSC_ADDR_ANY) {
+ dev_err(dev,
+ "Carveout already associated to resource table\n");
+ return -ENOMEM;
+ }
+
+ if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len))
+ return -ENOMEM;
+
+ /* Update memory carveout with resource table info */
+ carveout->rsc_offset = offset;
+ carveout->flags = rsc->flags;
+
+ return 0;
+ }
+
+ /* Register carveout in in list */
+ carveout = rproc_mem_entry_init(dev, 0, 0, rsc->len, rsc->da,
+ rproc_alloc_carveout,
+ rproc_release_carveout, rsc->name);
+ if (!carveout) {
+ dev_err(dev, "Can't allocate memory entry structure\n");
+ return -ENOMEM;
+ }
+
+ carveout->flags = rsc->flags;
+ carveout->rsc_offset = offset;
+ rproc_add_carveout(rproc, carveout);
+
+ return 0;
+}
+
+/**
+ * rproc_add_carveout() - register an allocated carveout region
+ * @rproc: rproc handle
+ * @mem: memory entry to register
+ *
+ * This function registers specified memory entry in @rproc carveouts list.
+ * Specified carveout should have been allocated before registering.
+ */
+void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem)
+{
+ list_add_tail(&mem->node, &rproc->carveouts);
+}
+EXPORT_SYMBOL(rproc_add_carveout);
+
+/**
+ * rproc_mem_entry_init() - allocate and initialize rproc_mem_entry struct
+ * @dev: pointer on device struct
+ * @va: virtual address
+ * @dma: dma address
+ * @len: memory carveout length
+ * @da: device address
+ * @release: memory carveout function
+ * @name: carveout name
+ *
+ * This function allocates a rproc_mem_entry struct and fill it with parameters
+ * provided by client.
+ */
+struct rproc_mem_entry *
+rproc_mem_entry_init(struct device *dev,
+ void *va, dma_addr_t dma, int len, u32 da,
+ int (*alloc)(struct rproc *, struct rproc_mem_entry *),
+ int (*release)(struct rproc *, struct rproc_mem_entry *),
+ const char *name, ...)
+{
+ struct rproc_mem_entry *mem;
+ va_list args;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return mem;
+
+ mem->va = va;
+ mem->dma = dma;
+ mem->da = da;
+ mem->len = len;
+ mem->alloc = alloc;
+ mem->release = release;
+ mem->rsc_offset = FW_RSC_ADDR_ANY;
+ mem->of_resm_idx = -1;
+
+ va_start(args, name);
+ vsnprintf(mem->name, sizeof(mem->name), name, args);
+ va_end(args);
+
+ return mem;
+}
+EXPORT_SYMBOL(rproc_mem_entry_init);
+
+/**
+ * rproc_of_resm_mem_entry_init() - allocate and initialize rproc_mem_entry struct
+ * from a reserved memory phandle
+ * @dev: pointer on device struct
+ * @of_resm_idx: reserved memory phandle index in "memory-region"
+ * @len: memory carveout length
+ * @da: device address
+ * @name: carveout name
+ *
+ * This function allocates a rproc_mem_entry struct and fill it with parameters
+ * provided by client.
+ */
+struct rproc_mem_entry *
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
+ u32 da, const char *name, ...)
+{
+ struct rproc_mem_entry *mem;
+ va_list args;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return mem;
+
+ mem->da = da;
+ mem->len = len;
+ mem->rsc_offset = FW_RSC_ADDR_ANY;
+ mem->of_resm_idx = of_resm_idx;
+
+ va_start(args, name);
+ vsnprintf(mem->name, sizeof(mem->name), name, args);
+ va_end(args);
+
+ return mem;
+}
+EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
+
+/**
* A lookup table for resource handlers. The indices are defined in
* enum fw_resource_type.
*/
@@ -845,6 +1095,70 @@ static void rproc_unprepare_subdevices(struct rproc *rproc)
}
/**
+ * rproc_alloc_registered_carveouts() - allocate all carveouts registered
+ * in the list
+ * @rproc: the remote processor handle
+ *
+ * This function parses registered carveout list, performs allocation
+ * if alloc() ops registered and updates resource table information
+ * if rsc_offset set.
+ *
+ * Return: 0 on success
+ */
+static int rproc_alloc_registered_carveouts(struct rproc *rproc)
+{
+ struct rproc_mem_entry *entry, *tmp;
+ struct fw_rsc_carveout *rsc;
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
+ if (entry->alloc) {
+ ret = entry->alloc(rproc, entry);
+ if (ret) {
+ dev_err(dev, "Unable to allocate carveout %s: %d\n",
+ entry->name, ret);
+ return -ENOMEM;
+ }
+ }
+
+ if (entry->rsc_offset != FW_RSC_ADDR_ANY) {
+ /* update resource table */
+ rsc = (void *)rproc->table_ptr + entry->rsc_offset;
+
+ /*
+ * Some remote processors might need to know the pa
+ * even though they are behind an IOMMU. E.g., OMAP4's
+ * remote M3 processor needs this so it can control
+ * on-chip hardware accelerators that are not behind
+ * the IOMMU, and therefor must know the pa.
+ *
+ * Generally we don't want to expose physical addresses
+ * if we don't have to (remote processors are generally
+ * _not_ trusted), so we might want to do this only for
+ * remote processor that _must_ have this (e.g. OMAP4's
+ * dual M3 subsystem).
+ *
+ * Non-IOMMU processors might also want to have this info.
+ * In this case, the device address and the physical address
+ * are the same.
+ */
+
+ /* Use va if defined else dma to generate pa */
+ if (entry->va)
+ rsc->pa = (u32)rproc_va_to_pa(entry->va);
+ else
+ rsc->pa = (u32)entry->dma;
+
+ rsc->da = entry->da;
+ rsc->len = entry->len;
+ }
+ }
+
+ return 0;
+}
+
+/**
* rproc_coredump_cleanup() - clean up dump_segments list
* @rproc: the remote processor handle
*/
@@ -896,8 +1210,8 @@ static void rproc_resource_cleanup(struct rproc *rproc)
/* clean up carveout allocations */
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
- dma_free_coherent(dev->parent, entry->len, entry->va,
- entry->dma);
+ if (entry->release)
+ entry->release(rproc, entry);
list_del(&entry->node);
kfree(entry);
}
@@ -1009,6 +1323,9 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/* reset max_notifyid */
rproc->max_notifyid = -1;
+ /* reset handled vdev */
+ rproc->nb_vdev = 0;
+
/* handle fw resources which are required to boot rproc */
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
if (ret) {
@@ -1016,6 +1333,14 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
goto clean_up_resources;
}
+ /* Allocate carveout resources associated to rproc */
+ ret = rproc_alloc_registered_carveouts(rproc);
+ if (ret) {
+ dev_err(dev, "Failed to allocate associated carveouts: %d\n",
+ ret);
+ goto clean_up_resources;
+ }
+
ret = rproc_start(rproc, fw);
if (ret)
goto clean_up_resources;
@@ -1122,6 +1447,44 @@ int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
EXPORT_SYMBOL(rproc_coredump_add_segment);
/**
+ * rproc_coredump_add_custom_segment() - add custom coredump segment
+ * @rproc: handle of a remote processor
+ * @da: device address
+ * @size: size of segment
+ * @dumpfn: custom dump function called for each segment during coredump
+ * @priv: private data
+ *
+ * Add device memory to the list of segments to be included in the coredump
+ * and associate the segment with the given custom dump function and private
+ * data.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_custom_segment(struct rproc *rproc,
+ dma_addr_t da, size_t size,
+ void (*dumpfn)(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest),
+ void *priv)
+{
+ struct rproc_dump_segment *segment;
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = da;
+ segment->size = size;
+ segment->priv = priv;
+ segment->dump = dumpfn;
+
+ list_add_tail(&segment->node, &rproc->dump_segments);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
+
+/**
* rproc_coredump() - perform coredump
* @rproc: rproc handle
*
@@ -1183,14 +1546,18 @@ static void rproc_coredump(struct rproc *rproc)
phdr->p_flags = PF_R | PF_W | PF_X;
phdr->p_align = 0;
- ptr = rproc_da_to_va(rproc, segment->da, segment->size);
- if (!ptr) {
- dev_err(&rproc->dev,
- "invalid coredump segment (%pad, %zu)\n",
- &segment->da, segment->size);
- memset(data + offset, 0xff, segment->size);
+ if (segment->dump) {
+ segment->dump(rproc, segment, data + offset);
} else {
- memcpy(data + offset, ptr, segment->size);
+ ptr = rproc_da_to_va(rproc, segment->da, segment->size);
+ if (!ptr) {
+ dev_err(&rproc->dev,
+ "invalid coredump segment (%pad, %zu)\n",
+ &segment->da, segment->size);
+ memset(data + offset, 0xff, segment->size);
+ } else {
+ memcpy(data + offset, ptr, segment->size);
+ }
}
offset += phdr->p_filesz;
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index a5c29f2764a3..e90135c64af0 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -260,6 +260,7 @@ static int rproc_carveouts_show(struct seq_file *seq, void *p)
list_for_each_entry(carveout, &rproc->carveouts, node) {
seq_puts(seq, "Carveout memory entry:\n");
+ seq_printf(seq, "\tName: %s\n", carveout->name);
seq_printf(seq, "\tVirtual address: %pK\n", carveout->va);
seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 7570beb035b5..f6cad243d7ca 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -60,6 +60,8 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw);
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
const struct firmware *fw);
+struct rproc_mem_entry *
+rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...);
static inline
int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 47be411400e5..3a4c3d7cafca 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -48,6 +48,11 @@ static ssize_t firmware_store(struct device *dev,
}
len = strcspn(buf, "\n");
+ if (!len) {
+ dev_err(dev, "can't provide a NULL firmware\n");
+ err = -EINVAL;
+ goto out;
+ }
p = kstrndup(buf, len, GFP_KERNEL);
if (!p) {
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index bbecd44df7e8..de21f620b882 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -76,7 +76,9 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev);
struct device *dev = &rproc->dev;
+ struct rproc_mem_entry *mem;
struct rproc_vring *rvring;
+ struct fw_rsc_vdev *rsc;
struct virtqueue *vq;
void *addr;
int len, size;
@@ -88,8 +90,14 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
if (!name)
return NULL;
+ /* Search allocated memory region by name */
+ mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
+ id);
+ if (!mem || !mem->va)
+ return ERR_PTR(-ENOMEM);
+
rvring = &rvdev->vring[id];
- addr = rvring->va;
+ addr = mem->va;
len = rvring->len;
/* zero vring */
@@ -114,6 +122,10 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
rvring->vq = vq;
vq->priv = rvring;
+ /* Update vring in resource table */
+ rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
+ rsc->vring[id].da = mem->da;
+
return vq;
}
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index e2ce4e638258..f46c787733e8 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -792,9 +792,6 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
return -EAGAIN;
}
- if (WARN(chunk_size % 4, "Incoming data must be word aligned\n"))
- return -EINVAL;
-
rcid = le16_to_cpu(hdr.msg.param1);
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c
index 2b5cf2790954..64a5ce324c7f 100644
--- a/drivers/rpmsg/qcom_glink_smem.c
+++ b/drivers/rpmsg/qcom_glink_smem.c
@@ -89,15 +89,11 @@ static void glink_smem_rx_peak(struct qcom_glink_pipe *np,
tail -= pipe->native.length;
len = min_t(size_t, count, pipe->native.length - tail);
- if (len) {
- __ioread32_copy(data, pipe->fifo + tail,
- len / sizeof(u32));
- }
+ if (len)
+ memcpy_fromio(data, pipe->fifo + tail, len);
- if (len != count) {
- __ioread32_copy(data + len, pipe->fifo,
- (count - len) / sizeof(u32));
- }
+ if (len != count)
+ memcpy_fromio(data + len, pipe->fifo, (count - len));
}
static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
@@ -205,7 +201,7 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent,
dev->parent = parent;
dev->of_node = node;
dev->release = qcom_glink_smem_release;
- dev_set_name(dev, "%s:%s", node->parent->name, node->name);
+ dev_set_name(dev, "%pOFn:%pOFn", node->parent, node);
ret = device_register(dev);
if (ret) {
pr_err("failed to register glink edge\n");
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 8da83a4ebadc..4abbeea782fa 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1122,8 +1122,10 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
channel->edge = edge;
channel->name = kstrdup(name, GFP_KERNEL);
- if (!channel->name)
- return ERR_PTR(-ENOMEM);
+ if (!channel->name) {
+ ret = -ENOMEM;
+ goto free_channel;
+ }
spin_lock_init(&channel->tx_lock);
spin_lock_init(&channel->recv_lock);
@@ -1173,6 +1175,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
free_name_and_channel:
kfree(channel->name);
+free_channel:
kfree(channel);
return ERR_PTR(ret);
@@ -1454,7 +1457,7 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
edge->dev.release = qcom_smd_edge_release;
edge->dev.of_node = node;
edge->dev.groups = qcom_smd_edge_groups;
- dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name);
+ dev_set_name(&edge->dev, "%s:%pOFn", dev_name(parent), node);
ret = device_register(&edge->dev);
if (ret) {
pr_err("failed to register smd edge\n");
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index a76b963a7e50..eea5ebbb5119 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -167,9 +167,9 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
return 0;
}
-static ssize_t rpmsg_eptdev_read(struct file *filp, char __user *buf,
- size_t len, loff_t *f_pos)
+static ssize_t rpmsg_eptdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *filp = iocb->ki_filp;
struct rpmsg_eptdev *eptdev = filp->private_data;
unsigned long flags;
struct sk_buff *skb;
@@ -205,8 +205,8 @@ static ssize_t rpmsg_eptdev_read(struct file *filp, char __user *buf,
if (!skb)
return -EFAULT;
- use = min_t(size_t, len, skb->len);
- if (copy_to_user(buf, skb->data, use))
+ use = min_t(size_t, iov_iter_count(to), skb->len);
+ if (copy_to_iter(skb->data, use, to) != use)
use = -EFAULT;
kfree_skb(skb);
@@ -214,16 +214,21 @@ static ssize_t rpmsg_eptdev_read(struct file *filp, char __user *buf,
return use;
}
-static ssize_t rpmsg_eptdev_write(struct file *filp, const char __user *buf,
- size_t len, loff_t *f_pos)
+static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
{
+ struct file *filp = iocb->ki_filp;
struct rpmsg_eptdev *eptdev = filp->private_data;
+ size_t len = iov_iter_count(from);
void *kbuf;
int ret;
- kbuf = memdup_user(buf, len);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
+ kbuf = kzalloc(len, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (!copy_from_iter_full(kbuf, len, from))
+ return -EFAULT;
if (mutex_lock_interruptible(&eptdev->ept_lock)) {
ret = -ERESTARTSYS;
@@ -281,8 +286,8 @@ static const struct file_operations rpmsg_eptdev_fops = {
.owner = THIS_MODULE,
.open = rpmsg_eptdev_open,
.release = rpmsg_eptdev_release,
- .read = rpmsg_eptdev_read,
- .write = rpmsg_eptdev_write,
+ .read_iter = rpmsg_eptdev_read_iter,
+ .write_iter = rpmsg_eptdev_write_iter,
.poll = rpmsg_eptdev_poll,
.unlocked_ioctl = rpmsg_eptdev_ioctl,
.compat_ioctl = rpmsg_eptdev_ioctl,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 16a4e8528bbc..8f3a2eeb28dc 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -8,7 +8,7 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 5b8af2782282..2b0c36c2c568 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -19,7 +19,7 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compat.h>
#include <asm/ccwdev.h>
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 8af4948dae80..72dd2471ec1e 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -13,7 +13,7 @@
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/list.h>
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 8f5c1d7f751a..97b6f197f007 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -9,7 +9,7 @@
#include <linux/kernel_stat.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 05293babb031..2d655a97b959 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -143,7 +143,9 @@ static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int secon
static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
-static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
+static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ unsigned char *cdb, int use_sg,
+ TW_SG_Entry *sglistarg);
static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
@@ -278,7 +280,7 @@ out:
static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
{
int request_id = 0;
- char cdb[TW_MAX_CDB_LEN];
+ unsigned char cdb[TW_MAX_CDB_LEN];
TW_SG_Entry sglist[1];
int finished = 0, count = 0;
TW_Command_Full *full_command_packet;
@@ -423,7 +425,7 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
/* This function will read the aen queue from the isr */
static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
{
- char cdb[TW_MAX_CDB_LEN];
+ unsigned char cdb[TW_MAX_CDB_LEN];
TW_SG_Entry sglist[1];
TW_Command_Full *full_command_packet;
int retval = 1;
@@ -1798,7 +1800,9 @@ out:
static DEF_SCSI_QCMD(twa_scsi_queue)
/* This function hands scsi cdb's to the firmware */
-static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
+static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ unsigned char *cdb, int use_sg,
+ TW_SG_Entry *sglistarg)
{
TW_Command_Full *full_command_packet;
TW_Command_Apache *command_packet;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 266bdac75304..480cf82700e9 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -287,7 +287,9 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
} /* End twl_post_command_packet() */
/* This function hands scsi cdb's to the firmware */
-static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
+static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ unsigned char *cdb, int use_sg,
+ TW_SG_Entry_ISO *sglistarg)
{
TW_Command_Full *full_command_packet;
TW_Command_Apache *command_packet;
@@ -372,7 +374,7 @@ out:
/* This function will read the aen queue from the isr */
static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
{
- char cdb[TW_MAX_CDB_LEN];
+ unsigned char cdb[TW_MAX_CDB_LEN];
TW_SG_Entry_ISO sglist[1];
TW_Command_Full *full_command_packet;
int retval = 1;
@@ -554,7 +556,7 @@ out:
static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
{
int request_id = 0;
- char cdb[TW_MAX_CDB_LEN];
+ unsigned char cdb[TW_MAX_CDB_LEN];
TW_SG_Entry_ISO sglist[1];
int finished = 0, count = 0;
TW_Command_Full *full_command_packet;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 70988c381268..f07444d30b21 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -538,7 +538,7 @@ config SCSI_HPTIOP
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
+ depends on (PCI || ISA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
---help---
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
@@ -1175,12 +1175,12 @@ config SCSI_LPFC_DEBUG_FS
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
- depends on (EISA || MCA) && SCSI
+ depends on EISA && SCSI
select SCSI_SPI_ATTRS
---help---
This driver is for NCR53c710 based SCSI host adapters.
- It currently supports Compaq EISA cards and NCR MCA cards
+ It currently supports Compaq EISA cards.
config SCSI_DC395x
tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support"
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 4d7b0e0adbf7..301b3cad15f8 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -269,7 +269,7 @@ static LIST_HEAD(aha152x_host_list);
/* DEFINES */
/* For PCMCIA cards, always use AUTOCONF */
-#if defined(PCMCIA) || defined(MODULE)
+#if defined(AHA152X_PCMCIA) || defined(MODULE)
#if !defined(AUTOCONF)
#define AUTOCONF
#endif
@@ -297,7 +297,7 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
#define DELAY_DEFAULT 1000
-#if defined(PCMCIA)
+#if defined(AHA152X_PCMCIA)
#define IRQ_MIN 0
#define IRQ_MAX 16
#else
@@ -328,7 +328,7 @@ MODULE_AUTHOR("Jürgen Fischer");
MODULE_DESCRIPTION(AHA152X_REVID);
MODULE_LICENSE("GPL");
-#if !defined(PCMCIA)
+#if !defined(AHA152X_PCMCIA)
#if defined(MODULE)
static int io[] = {0, 0};
module_param_hw_array(io, int, ioport, NULL, 0);
@@ -391,7 +391,7 @@ static struct isapnp_device_id id_table[] = {
MODULE_DEVICE_TABLE(isapnp, id_table);
#endif /* ISAPNP */
-#endif /* !PCMCIA */
+#endif /* !AHA152X_PCMCIA */
static struct scsi_host_template aha152x_driver_template;
@@ -863,7 +863,7 @@ void aha152x_release(struct Scsi_Host *shpnt)
if (shpnt->irq)
free_irq(shpnt->irq, shpnt);
-#if !defined(PCMCIA)
+#if !defined(AHA152X_PCMCIA)
if (shpnt->io_port)
release_region(shpnt->io_port, IO_RANGE);
#endif
@@ -2924,7 +2924,7 @@ static struct scsi_host_template aha152x_driver_template = {
.slave_alloc = aha152x_adjust_queue,
};
-#if !defined(PCMCIA)
+#if !defined(AHA152X_PCMCIA)
static int setup_count;
static struct aha152x_setup setup[2];
@@ -3392,4 +3392,4 @@ static int __init aha152x_setup(char *str)
__setup("aha152x=", aha152x_setup);
#endif
-#endif /* !PCMCIA */
+#endif /* !AHA152X_PCMCIA */
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 3df1428df317..311d23c727ce 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -790,12 +790,11 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
slot->n_elem = n_elem;
slot->slot_tag = tag;
- slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
+ slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
if (!slot->buf) {
rc = -ENOMEM;
goto err_out_tag;
}
- memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
tei.task = task;
tei.hdr = &mvi->slot[tag];
@@ -1906,8 +1905,7 @@ static void mvs_work_queue(struct work_struct *work)
if (phy->phy_event & PHY_PLUG_OUT) {
u32 tmp;
- struct sas_identify_frame *id;
- id = (struct sas_identify_frame *)phy->frame_rcvd;
+
tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
phy->phy_event &= ~PHY_PLUG_OUT;
if (!(tmp & PHY_READY_MASK)) {
diff --git a/drivers/scsi/pcmcia/aha152x_core.c b/drivers/scsi/pcmcia/aha152x_core.c
index dba3716511c5..24b89228b241 100644
--- a/drivers/scsi/pcmcia/aha152x_core.c
+++ b/drivers/scsi/pcmcia/aha152x_core.c
@@ -1,3 +1,3 @@
-#define PCMCIA 1
+#define AHA152X_PCMCIA 1
#define AHA152X_STAT 1
#include "aha152x.c"
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b28f159fdaee..0bb9ac6ece92 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -218,7 +218,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
mutex_lock(&ha->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
- mutex_unlock(&vha->hw->optrom_mutex);
+ mutex_unlock(&ha->optrom_mutex);
return -EAGAIN;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index c72d8012fe2a..6fe20c27acc1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -425,7 +425,7 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__qla24xx_handle_gpdb_event(vha, ea);
}
-int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_work_evt *e;
@@ -680,7 +680,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport);
break;
}
- /* drop through */
+ /* fall through */
default:
if (fcport_is_smaller(fcport)) {
/* local adapter is bigger */
@@ -1551,7 +1551,8 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
}
-void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea)
+static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
+ struct event_arg *ea)
{
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC post PRLI\n",
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 86fb8b21aa71..032635321ad6 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1195,8 +1195,8 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
* @sp: SRB command to process
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
- * @tot_prot_dsds:
- * @fw_prot_opts:
+ * @tot_prot_dsds: Total number of segments with protection information
+ * @fw_prot_opts: Protection options to be passed to firmware
*/
inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d73b04e40590..30d3090842f8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -25,7 +25,7 @@ static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
@@ -144,7 +144,7 @@ qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
/**
* qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
@@ -3109,7 +3109,7 @@ done:
/**
* qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2f3e5075ae76..191b6b7c8747 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3478,9 +3478,9 @@ qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
/**
* qla2x00_set_serdes_params() -
* @vha: HA context
- * @sw_em_1g:
- * @sw_em_2g:
- * @sw_em_4g:
+ * @sw_em_1g: serial link options
+ * @sw_em_2g: serial link options
+ * @sw_em_4g: serial link options
*
* Returns
*/
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 521a51370554..60f964c53c01 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -2212,7 +2212,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct bsg_job *bsg_job;
struct fc_bsg_reply *bsg_reply;
struct srb_iocb *iocb_job;
- int res;
+ int res = 0;
struct qla_mt_iocb_rsp_fx00 fstatus;
uint8_t *fw_sts_ptr;
@@ -2624,7 +2624,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
* qlafx00_multistatus_entry() - Process Multi response queue entries.
* @vha: SCSI driver HA context
* @rsp: response queue
- * @pkt:
+ * @pkt: received packet
*/
static void
qlafx00_multistatus_entry(struct scsi_qla_host *vha,
@@ -2681,12 +2681,10 @@ qlafx00_multistatus_entry(struct scsi_qla_host *vha,
* @vha: SCSI driver HA context
* @rsp: response queue
* @pkt: Entry pointer
- * @estatus:
- * @etype:
*/
static void
qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
- struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+ struct sts_entry_fx00 *pkt)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -2695,9 +2693,6 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
struct req_que *req = NULL;
int res = DID_ERROR << 16;
- ql_dbg(ql_dbg_async, vha, 0x507f,
- "type of error status in response: 0x%x\n", estatus);
-
req = ha->req_q_map[que];
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
@@ -2745,9 +2740,11 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
if (pkt->entry_status != 0 &&
pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+ ql_dbg(ql_dbg_async, vha, 0x507f,
+ "type of error status in response: 0x%x\n",
+ pkt->entry_status);
qlafx00_error_entry(vha, rsp,
- (struct sts_entry_fx00 *)pkt, pkt->entry_status,
- pkt->entry_type);
+ (struct sts_entry_fx00 *)pkt);
continue;
}
@@ -2867,7 +2864,7 @@ qlafx00_async_event(scsi_qla_host_t *vha)
/**
* qlafx00x_mbx_completion() - Process mailbox command completions.
* @vha: SCSI driver HA context
- * @mb0:
+ * @mb0: value to be written into mailbox register 0
*/
static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
@@ -2893,7 +2890,7 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
/**
* qlafx00_intr_handler() - Process interrupts for the ISPFX00.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 121e18b3b9f8..f2f54806f4da 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2010,7 +2010,7 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
/**
* qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 3a2b0282df14..fe856b602e03 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -3878,7 +3878,7 @@ out:
#define PF_BITS_MASK (0xF << 16)
/**
* qla8044_intr_handler() - Process interrupts for the ISP8044
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8794e54f43a9..518f15141170 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1749,7 +1749,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
static void
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
- int cnt, status;
+ int cnt;
unsigned long flags;
srb_t *sp;
scsi_qla_host_t *vha = qp->vha;
@@ -1799,8 +1799,8 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
if (!sp_get(sp)) {
spin_unlock_irqrestore
(qp->qp_lock_ptr, flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
+ qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
spin_lock_irqsave
(qp->qp_lock_ptr, flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 4499c787165f..2a3055c799fb 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2229,7 +2229,7 @@ qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
/**
* qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip.
- * @ha:
+ * @ha: host adapter
* @man_id: Flash manufacturer ID
* @flash_id: Flash ID
*/
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 39828207bc1d..c4504740f0e2 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -4540,7 +4540,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
case QLA_TGT_CLEAR_TS:
case QLA_TGT_ABORT_TS:
abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
- /* drop through */
+ /* fall through */
case QLA_TGT_CLEAR_ACA:
h = qlt_find_qphint(vha, mcmd->unpacked_lun);
mcmd->qpair = h->qpair;
@@ -6598,9 +6598,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
* qla_tgt_lport_register - register lport with external module
*
* @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
- * @phys_wwpn:
- * @npiv_wwpn:
- * @npiv_wwnn:
+ * @phys_wwpn: physical port WWPN
+ * @npiv_wwpn: NPIV WWPN
+ * @npiv_wwnn: NPIV WWNN
* @callback: lport initialization callback for tcm_qla2xxx code
*/
int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index 153b3f3cc795..a5136901dd8a 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -59,7 +59,7 @@
#define KMSG_COMPONENT "SFI"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index c16dd16afe6a..0fdda6f62953 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -1,6 +1,6 @@
menuconfig ION
bool "Ion Memory Manager"
- depends on HAVE_MEMBLOCK && HAS_DMA && MMU
+ depends on HAS_DMA && MMU
select GENERIC_ALLOCATOR
select DMA_SHARED_BUFFER
help
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index db5cf67047ad..b3620a8f2d9f 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -31,6 +31,8 @@ source "drivers/staging/media/mt9t031/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/sunxi/Kconfig"
+
source "drivers/staging/media/tegra-vde/Kconfig"
source "drivers/staging/media/zoran/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 503fbe47fa58..42948f805548 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074/
obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
+obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_ZORAN) += zoran/
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 5e42490331b7..5e9769ea8a50 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1135,10 +1135,6 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
- if (vb->state != VB2_BUF_STATE_ACTIVE &&
- vb->state != VB2_BUF_STATE_PREPARED)
- return 0;
-
/* Initialize buffer */
vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
if (vb2_plane_vaddr(vb, 0) &&
@@ -1429,7 +1425,8 @@ static int vpfe_qbuf(struct file *file, void *priv,
return -EACCES;
}
- return vb2_qbuf(&video->buffer_queue, p);
+ return vb2_qbuf(&video->buffer_queue,
+ video->video_dev.v4l2_dev->mdev, p);
}
/*
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 026b9cbe581d..4b344a4a3706 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -350,7 +350,7 @@ static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
sd->ctrl_handler,
- NULL);
+ NULL, true);
if (ret)
return ret;
}
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
index 6df189135db8..8cf773eef9da 100644
--- a/drivers/staging/media/imx/imx-media-fim.c
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -463,7 +463,7 @@ int imx_media_fim_add_controls(struct imx_media_fim *fim)
{
/* add the FIM controls to the calling subdev ctrl handler */
return v4l2_ctrl_add_handler(fim->sd->ctrl_handler,
- &fim->ctrl_handler, NULL);
+ &fim->ctrl_handler, NULL, false);
}
EXPORT_SYMBOL_GPL(imx_media_fim_add_controls);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index c1322aeaf01e..c2c5a9cd8642 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -802,9 +802,10 @@ iss_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
static int
iss_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
+ struct iss_video *video = video_drvdata(file);
struct iss_video_fh *vfh = to_iss_video_fh(fh);
- return vb2_qbuf(&vfh->queue, b);
+ return vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
}
static int
diff --git a/drivers/staging/media/sunxi/Kconfig b/drivers/staging/media/sunxi/Kconfig
new file mode 100644
index 000000000000..c78d92240ceb
--- /dev/null
+++ b/drivers/staging/media/sunxi/Kconfig
@@ -0,0 +1,15 @@
+config VIDEO_SUNXI
+ bool "Allwinner sunXi family Video Devices"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ If you have an Allwinner SoC based on the sunXi family, say Y.
+
+ Note that this option doesn't include new drivers in the
+ kernel: saying N will just cause Kconfig to skip all the
+ questions about Allwinner media devices.
+
+if VIDEO_SUNXI
+
+source "drivers/staging/media/sunxi/cedrus/Kconfig"
+
+endif
diff --git a/drivers/staging/media/sunxi/Makefile b/drivers/staging/media/sunxi/Makefile
new file mode 100644
index 000000000000..cee2846c3ecf
--- /dev/null
+++ b/drivers/staging/media/sunxi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += cedrus/
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
new file mode 100644
index 000000000000..a7a34e89c42d
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -0,0 +1,14 @@
+config VIDEO_SUNXI_CEDRUS
+ tristate "Allwinner Cedrus VPU driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on HAS_DMA
+ depends on OF
+ select SUNXI_SRAM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the VPU found in Allwinner SoCs, also known as the Cedar
+ video engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunxi-cedrus.
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
new file mode 100644
index 000000000000..e9dc68b7bcb6
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
+
+sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o cedrus_mpeg2.o
diff --git a/drivers/staging/media/sunxi/cedrus/TODO b/drivers/staging/media/sunxi/cedrus/TODO
new file mode 100644
index 000000000000..ec277ece47af
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/TODO
@@ -0,0 +1,7 @@
+Before this stateless decoder driver can leave the staging area:
+* The Request API needs to be stabilized;
+* The codec-specific controls need to be thoroughly reviewed to ensure they
+ cover all intended uses cases;
+* Userspace support for the Request API needs to be reviewed;
+* Another stateless decoder driver should be submitted;
+* At least one stateless encoder driver should be submitted.
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
new file mode 100644
index 000000000000..82558455384a
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_video.h"
+#include "cedrus_dec.h"
+#include "cedrus_hw.h"
+
+static const struct cedrus_control cedrus_controls[] = {
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
+ .elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params),
+ .codec = CEDRUS_CODEC_MPEG2,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ .elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization),
+ .codec = CEDRUS_CODEC_MPEG2,
+ .required = false,
+ },
+};
+
+#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
+
+void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id)
+{
+ unsigned int i;
+
+ for (i = 0; ctx->ctrls[i]; i++)
+ if (ctx->ctrls[i]->id == id)
+ return ctx->ctrls[i]->p_cur.p;
+
+ return NULL;
+}
+
+static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
+{
+ struct v4l2_ctrl_handler *hdl = &ctx->hdl;
+ struct v4l2_ctrl *ctrl;
+ unsigned int ctrl_size;
+ unsigned int i;
+
+ v4l2_ctrl_handler_init(hdl, CEDRUS_CONTROLS_COUNT);
+ if (hdl->error) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize control handler\n");
+ return hdl->error;
+ }
+
+ ctrl_size = sizeof(ctrl) * CEDRUS_CONTROLS_COUNT + 1;
+
+ ctx->ctrls = kzalloc(ctrl_size, GFP_KERNEL);
+ memset(ctx->ctrls, 0, ctrl_size);
+
+ for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
+ struct v4l2_ctrl_config cfg = { 0 };
+
+ cfg.elem_size = cedrus_controls[i].elem_size;
+ cfg.id = cedrus_controls[i].id;
+
+ ctrl = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+ if (hdl->error) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to create new custom control\n");
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(ctx->ctrls);
+ return hdl->error;
+ }
+
+ ctx->ctrls[i] = ctrl;
+ }
+
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ return 0;
+}
+
+static int cedrus_request_validate(struct media_request *req)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *parent_hdl, *hdl;
+ struct cedrus_ctx *ctx = NULL;
+ struct v4l2_ctrl *ctrl_test;
+ unsigned int count;
+ unsigned int i;
+
+ count = vb2_request_buffer_cnt(req);
+ if (!count) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "No buffer was provided with the request\n");
+ return -ENOENT;
+ } else if (count > 1) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "More than one buffer was provided with the request\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(obj, &req->objects, list) {
+ struct vb2_buffer *vb;
+
+ if (vb2_request_object_is_buffer(obj)) {
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ break;
+ }
+ }
+
+ if (!ctx)
+ return -ENOENT;
+
+ parent_hdl = &ctx->hdl;
+
+ hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
+ if (!hdl) {
+ v4l2_info(&ctx->dev->v4l2_dev, "Missing codec control(s)\n");
+ return -ENOENT;
+ }
+
+ for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
+ if (cedrus_controls[i].codec != ctx->current_codec ||
+ !cedrus_controls[i].required)
+ continue;
+
+ ctrl_test = v4l2_ctrl_request_hdl_ctrl_find(hdl,
+ cedrus_controls[i].id);
+ if (!ctrl_test) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "Missing required codec control\n");
+ return -ENOENT;
+ }
+ }
+
+ v4l2_ctrl_request_hdl_put(hdl);
+
+ return vb2_request_validate(req);
+}
+
+static int cedrus_open(struct file *file)
+{
+ struct cedrus_dev *dev = video_drvdata(file);
+ struct cedrus_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock(&dev->dev_mutex);
+ return -ENOMEM;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+
+ ret = cedrus_init_ctrls(dev, ctx);
+ if (ret)
+ goto err_free;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ &cedrus_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_ctrls;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(&ctx->hdl);
+err_free:
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int cedrus_release(struct file *file)
+{
+ struct cedrus_dev *dev = video_drvdata(file);
+ struct cedrus_ctx *ctx = container_of(file->private_data,
+ struct cedrus_ctx, fh);
+
+ mutex_lock(&dev->dev_mutex);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx->ctrls);
+
+ v4l2_fh_exit(&ctx->fh);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations cedrus_fops = {
+ .owner = THIS_MODULE,
+ .open = cedrus_open,
+ .release = cedrus_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device cedrus_video_device = {
+ .name = CEDRUS_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &cedrus_fops,
+ .ioctl_ops = &cedrus_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static const struct v4l2_m2m_ops cedrus_m2m_ops = {
+ .device_run = cedrus_device_run,
+};
+
+static const struct media_device_ops cedrus_m2m_media_ops = {
+ .req_validate = cedrus_request_validate,
+ .req_queue = vb2_m2m_request_queue,
+};
+
+static int cedrus_probe(struct platform_device *pdev)
+{
+ struct cedrus_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vfd = cedrus_video_device;
+ dev->dev = &pdev->dev;
+ dev->pdev = pdev;
+
+ ret = cedrus_hw_probe(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to probe hardware\n");
+ return ret;
+ }
+
+ dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2;
+
+ mutex_init(&dev->dev_mutex);
+ spin_lock_init(&dev->irq_lock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register V4L2 device\n");
+ return ret;
+ }
+
+ vfd = &dev->vfd;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s", cedrus_video_device.name);
+ video_set_drvdata(vfd, dev);
+
+ dev->m2m_dev = v4l2_m2m_init(&cedrus_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+
+ goto err_video;
+ }
+
+ dev->mdev.dev = &pdev->dev;
+ strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
+
+ media_device_init(&dev->mdev);
+ dev->mdev.ops = &cedrus_m2m_media_ops;
+ dev->v4l2_dev.mdev = &dev->mdev;
+
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M media controller\n");
+ goto err_m2m;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_v4l2;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register media device\n");
+ goto err_m2m_mc;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ return 0;
+
+err_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+err_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+err_video:
+ video_unregister_device(&dev->vfd);
+err_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int cedrus_remove(struct platform_device *pdev)
+{
+ struct cedrus_dev *dev = platform_get_drvdata(pdev);
+
+ if (media_devnode_is_registered(dev->mdev.devnode)) {
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+ media_device_cleanup(&dev->mdev);
+ }
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ cedrus_hw_remove(dev);
+
+ return 0;
+}
+
+static const struct cedrus_variant sun4i_a10_cedrus_variant = {
+ /* No particular capability. */
+};
+
+static const struct cedrus_variant sun5i_a13_cedrus_variant = {
+ /* No particular capability. */
+};
+
+static const struct cedrus_variant sun7i_a20_cedrus_variant = {
+ /* No particular capability. */
+};
+
+static const struct cedrus_variant sun8i_a33_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+};
+
+static const struct cedrus_variant sun8i_h3_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+};
+
+static const struct of_device_id cedrus_dt_match[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-video-engine",
+ .data = &sun4i_a10_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-video-engine",
+ .data = &sun5i_a13_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-video-engine",
+ .data = &sun7i_a20_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-a33-video-engine",
+ .data = &sun8i_a33_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-video-engine",
+ .data = &sun8i_h3_cedrus_variant,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cedrus_dt_match);
+
+static struct platform_driver cedrus_driver = {
+ .probe = cedrus_probe,
+ .remove = cedrus_remove,
+ .driver = {
+ .name = CEDRUS_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cedrus_dt_match),
+ },
+};
+module_platform_driver(cedrus_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Florent Revest <florent.revest@free-electrons.com>");
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_DESCRIPTION("Cedrus VPU driver");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
new file mode 100644
index 000000000000..3f61248c57ac
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_H_
+#define _CEDRUS_H_
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <linux/platform_device.h>
+
+#define CEDRUS_NAME "cedrus"
+
+#define CEDRUS_CAPABILITY_UNTILED BIT(0)
+
+enum cedrus_codec {
+ CEDRUS_CODEC_MPEG2,
+
+ CEDRUS_CODEC_LAST,
+};
+
+enum cedrus_irq_status {
+ CEDRUS_IRQ_NONE,
+ CEDRUS_IRQ_ERROR,
+ CEDRUS_IRQ_OK,
+};
+
+struct cedrus_control {
+ u32 id;
+ u32 elem_size;
+ enum cedrus_codec codec;
+ unsigned char required:1;
+};
+
+struct cedrus_mpeg2_run {
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
+ const struct v4l2_ctrl_mpeg2_quantization *quantization;
+};
+
+struct cedrus_run {
+ struct vb2_v4l2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+
+ union {
+ struct cedrus_mpeg2_run mpeg2;
+ };
+};
+
+struct cedrus_buffer {
+ struct v4l2_m2m_buffer m2m_buf;
+};
+
+struct cedrus_ctx {
+ struct v4l2_fh fh;
+ struct cedrus_dev *dev;
+
+ struct v4l2_pix_format src_fmt;
+ struct v4l2_pix_format dst_fmt;
+ enum cedrus_codec current_codec;
+
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl **ctrls;
+
+ struct vb2_buffer *dst_bufs[VIDEO_MAX_FRAME];
+};
+
+struct cedrus_dec_ops {
+ void (*irq_clear)(struct cedrus_ctx *ctx);
+ void (*irq_disable)(struct cedrus_ctx *ctx);
+ enum cedrus_irq_status (*irq_status)(struct cedrus_ctx *ctx);
+ void (*setup)(struct cedrus_ctx *ctx, struct cedrus_run *run);
+ int (*start)(struct cedrus_ctx *ctx);
+ void (*stop)(struct cedrus_ctx *ctx);
+ void (*trigger)(struct cedrus_ctx *ctx);
+};
+
+struct cedrus_variant {
+ unsigned int capabilities;
+};
+
+struct cedrus_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct media_device mdev;
+ struct media_pad pad[2];
+ struct platform_device *pdev;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct cedrus_dec_ops *dec_ops[CEDRUS_CODEC_LAST];
+
+ /* Device file mutex */
+ struct mutex dev_mutex;
+ /* Interrupt spinlock */
+ spinlock_t irq_lock;
+
+ void __iomem *base;
+
+ struct clk *mod_clk;
+ struct clk *ahb_clk;
+ struct clk *ram_clk;
+
+ struct reset_control *rstc;
+
+ unsigned int capabilities;
+};
+
+extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
+
+static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val)
+{
+ writel(val, dev->base + reg);
+}
+
+static inline u32 cedrus_read(struct cedrus_dev *dev, u32 reg)
+{
+ return readl(dev->base + reg);
+}
+
+static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
+ struct v4l2_pix_format *pix_fmt,
+ unsigned int plane)
+{
+ dma_addr_t addr = vb2_dma_contig_plane_dma_addr(buf, 0);
+
+ return addr + (pix_fmt ? (dma_addr_t)pix_fmt->bytesperline *
+ pix_fmt->height * plane : 0);
+}
+
+static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
+ unsigned int index,
+ unsigned int plane)
+{
+ struct vb2_buffer *buf = ctx->dst_bufs[index];
+
+ return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
+}
+
+static inline struct cedrus_buffer *
+vb2_v4l2_to_cedrus_buffer(const struct vb2_v4l2_buffer *p)
+{
+ return container_of(p, struct cedrus_buffer, m2m_buf.vb);
+}
+
+static inline struct cedrus_buffer *
+vb2_to_cedrus_buffer(const struct vb2_buffer *p)
+{
+ return vb2_v4l2_to_cedrus_buffer(to_vb2_v4l2_buffer(p));
+}
+
+void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
new file mode 100644
index 000000000000..e40180a33951
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_dec.h"
+#include "cedrus_hw.h"
+
+void cedrus_device_run(void *priv)
+{
+ struct cedrus_ctx *ctx = priv;
+ struct cedrus_dev *dev = ctx->dev;
+ struct cedrus_run run = { 0 };
+ struct media_request *src_req;
+ unsigned long flags;
+
+ run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Apply request(s) controls if needed. */
+ src_req = run.src->vb2_buf.req_obj.req;
+
+ if (src_req)
+ v4l2_ctrl_request_setup(src_req, &ctx->hdl);
+
+ spin_lock_irqsave(&ctx->dev->irq_lock, flags);
+
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ run.mpeg2.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
+ run.mpeg2.quantization = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
+ break;
+
+ default:
+ break;
+ }
+
+ dev->dec_ops[ctx->current_codec]->setup(ctx, &run);
+
+ spin_unlock_irqrestore(&ctx->dev->irq_lock, flags);
+
+ /* Complete request(s) controls if needed. */
+
+ if (src_req)
+ v4l2_ctrl_request_complete(src_req, &ctx->hdl);
+
+ spin_lock_irqsave(&ctx->dev->irq_lock, flags);
+
+ dev->dec_ops[ctx->current_codec]->trigger(ctx);
+
+ spin_unlock_irqrestore(&ctx->dev->irq_lock, flags);
+}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.h b/drivers/staging/media/sunxi/cedrus/cedrus_dec.h
new file mode 100644
index 000000000000..4f423d3a1cad
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_DEC_H_
+#define _CEDRUS_DEC_H_
+
+extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
+
+void cedrus_device_work(struct work_struct *work);
+void cedrus_device_run(void *priv);
+
+int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
new file mode 100644
index 000000000000..32adbcbe6175
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
+{
+ u32 reg = 0;
+
+ /*
+ * FIXME: This is only valid on 32-bits DDR's, we should test
+ * it on the A13/A33.
+ */
+ reg |= VE_MODE_REC_WR_MODE_2MB;
+ reg |= VE_MODE_DDR_MODE_BW_128;
+
+ switch (codec) {
+ case CEDRUS_CODEC_MPEG2:
+ reg |= VE_MODE_DEC_MPEG;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ cedrus_write(dev, VE_MODE, reg);
+
+ return 0;
+}
+
+void cedrus_engine_disable(struct cedrus_dev *dev)
+{
+ cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
+}
+
+void cedrus_dst_format_set(struct cedrus_dev *dev,
+ struct v4l2_pix_format *fmt)
+{
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ u32 chroma_size;
+ u32 reg;
+
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
+
+ reg = VE_PRIMARY_OUT_FMT_NV12;
+ cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
+
+ reg = VE_CHROMA_BUF_LEN_SDRT(chroma_size / 2);
+ cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
+
+ reg = chroma_size / 2;
+ cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
+
+ reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
+ VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
+ cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
+
+ break;
+ case V4L2_PIX_FMT_SUNXI_TILED_NV12:
+ default:
+ reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
+ cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
+
+ reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
+ cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
+
+ break;
+ }
+}
+
+static irqreturn_t cedrus_bh(int irq, void *data)
+{
+ struct cedrus_dev *dev = data;
+ struct cedrus_ctx *ctx;
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_HANDLED;
+ }
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cedrus_irq(int irq, void *data)
+{
+ struct cedrus_dev *dev = data;
+ struct cedrus_ctx *ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ enum vb2_buffer_state state;
+ enum cedrus_irq_status status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return IRQ_NONE;
+ }
+
+ status = dev->dec_ops[ctx->current_codec]->irq_status(ctx);
+ if (status == CEDRUS_IRQ_NONE) {
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ return IRQ_NONE;
+ }
+
+ dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
+ dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (!src_buf || !dst_buf) {
+ v4l2_err(&dev->v4l2_dev,
+ "Missing source and/or destination buffers\n");
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return IRQ_HANDLED;
+ }
+
+ if (status == CEDRUS_IRQ_ERROR)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ state = VB2_BUF_STATE_DONE;
+
+ v4l2_m2m_buf_done(src_buf, state);
+ v4l2_m2m_buf_done(dst_buf, state);
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+int cedrus_hw_probe(struct cedrus_dev *dev)
+{
+ const struct cedrus_variant *variant;
+ struct resource *res;
+ int irq_dec;
+ int ret;
+
+ variant = of_device_get_match_data(dev->dev);
+ if (!variant)
+ return -EINVAL;
+
+ dev->capabilities = variant->capabilities;
+
+ irq_dec = platform_get_irq(dev->pdev, 0);
+ if (irq_dec <= 0) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get IRQ\n");
+
+ return irq_dec;
+ }
+ ret = devm_request_threaded_irq(dev->dev, irq_dec, cedrus_irq,
+ cedrus_bh, 0, dev_name(dev->dev),
+ dev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to request IRQ\n");
+
+ return ret;
+ }
+
+ /*
+ * The VPU is only able to handle bus addresses so we have to subtract
+ * the RAM offset to the physcal addresses.
+ *
+ * This information will eventually be obtained from device-tree.
+ */
+
+#ifdef PHYS_PFN_OFFSET
+ dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+#endif
+
+ ret = of_reserved_mem_device_init(dev->dev);
+ if (ret && ret != -ENODEV) {
+ v4l2_err(&dev->v4l2_dev, "Failed to reserve memory\n");
+
+ return ret;
+ }
+
+ ret = sunxi_sram_claim(dev->dev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to claim SRAM\n");
+
+ goto err_mem;
+ }
+
+ dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
+ if (IS_ERR(dev->ahb_clk)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get AHB clock\n");
+
+ ret = PTR_ERR(dev->ahb_clk);
+ goto err_sram;
+ }
+
+ dev->mod_clk = devm_clk_get(dev->dev, "mod");
+ if (IS_ERR(dev->mod_clk)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get MOD clock\n");
+
+ ret = PTR_ERR(dev->mod_clk);
+ goto err_sram;
+ }
+
+ dev->ram_clk = devm_clk_get(dev->dev, "ram");
+ if (IS_ERR(dev->ram_clk)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get RAM clock\n");
+
+ ret = PTR_ERR(dev->ram_clk);
+ goto err_sram;
+ }
+
+ dev->rstc = devm_reset_control_get(dev->dev, NULL);
+ if (IS_ERR(dev->rstc)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get reset control\n");
+
+ ret = PTR_ERR(dev->rstc);
+ goto err_sram;
+ }
+
+ res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
+ dev->base = devm_ioremap_resource(dev->dev, res);
+ if (!dev->base) {
+ v4l2_err(&dev->v4l2_dev, "Failed to map registers\n");
+
+ ret = -ENOMEM;
+ goto err_sram;
+ }
+
+ ret = clk_set_rate(dev->mod_clk, CEDRUS_CLOCK_RATE_DEFAULT);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to set clock rate\n");
+
+ goto err_sram;
+ }
+
+ ret = clk_prepare_enable(dev->ahb_clk);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to enable AHB clock\n");
+
+ goto err_sram;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to enable MOD clock\n");
+
+ goto err_ahb_clk;
+ }
+
+ ret = clk_prepare_enable(dev->ram_clk);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to enable RAM clock\n");
+
+ goto err_mod_clk;
+ }
+
+ ret = reset_control_reset(dev->rstc);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to apply reset\n");
+
+ goto err_ram_clk;
+ }
+
+ return 0;
+
+err_ram_clk:
+ clk_disable_unprepare(dev->ram_clk);
+err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+err_ahb_clk:
+ clk_disable_unprepare(dev->ahb_clk);
+err_sram:
+ sunxi_sram_release(dev->dev);
+err_mem:
+ of_reserved_mem_device_release(dev->dev);
+
+ return ret;
+}
+
+void cedrus_hw_remove(struct cedrus_dev *dev)
+{
+ reset_control_assert(dev->rstc);
+
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->ahb_clk);
+
+ sunxi_sram_release(dev->dev);
+
+ of_reserved_mem_device_release(dev->dev);
+}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
new file mode 100644
index 000000000000..b43c77d54b95
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_HW_H_
+#define _CEDRUS_HW_H_
+
+#define CEDRUS_CLOCK_RATE_DEFAULT 320000000
+
+int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec);
+void cedrus_engine_disable(struct cedrus_dev *dev);
+
+void cedrus_dst_format_set(struct cedrus_dev *dev,
+ struct v4l2_pix_format *fmt);
+
+int cedrus_hw_probe(struct cedrus_dev *dev);
+void cedrus_hw_remove(struct cedrus_dev *dev);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
new file mode 100644
index 000000000000..9abd39cae38c
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ */
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+/* Default MPEG-2 quantization coefficients, from the specification. */
+
+static const u8 intra_quantization_matrix_default[64] = {
+ 8, 16, 16, 19, 16, 19, 22, 22,
+ 22, 22, 22, 22, 26, 24, 26, 27,
+ 27, 27, 26, 26, 26, 26, 27, 27,
+ 27, 29, 29, 29, 34, 34, 34, 29,
+ 29, 29, 27, 27, 29, 29, 32, 32,
+ 34, 34, 37, 38, 37, 35, 35, 34,
+ 35, 38, 38, 40, 40, 40, 48, 48,
+ 46, 46, 56, 56, 58, 69, 69, 83
+};
+
+static const u8 non_intra_quantization_matrix_default[64] = {
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16
+};
+
+static enum cedrus_irq_status cedrus_mpeg2_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ reg = cedrus_read(dev, VE_DEC_MPEG_STATUS);
+ reg &= VE_DEC_MPEG_STATUS_CHECK_MASK;
+
+ if (!reg)
+ return CEDRUS_IRQ_NONE;
+
+ if (reg & VE_DEC_MPEG_STATUS_CHECK_ERROR ||
+ !(reg & VE_DEC_MPEG_STATUS_SUCCESS))
+ return CEDRUS_IRQ_ERROR;
+
+ return CEDRUS_IRQ_OK;
+}
+
+static void cedrus_mpeg2_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_MPEG_STATUS, VE_DEC_MPEG_STATUS_CHECK_MASK);
+}
+
+static void cedrus_mpeg2_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_DEC_MPEG_CTRL);
+
+ reg &= ~VE_DEC_MPEG_CTRL_IRQ_MASK;
+
+ cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
+}
+
+static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
+ const struct v4l2_mpeg2_sequence *sequence;
+ const struct v4l2_mpeg2_picture *picture;
+ const struct v4l2_ctrl_mpeg2_quantization *quantization;
+ dma_addr_t src_buf_addr, dst_luma_addr, dst_chroma_addr;
+ dma_addr_t fwd_luma_addr, fwd_chroma_addr;
+ dma_addr_t bwd_luma_addr, bwd_chroma_addr;
+ struct cedrus_dev *dev = ctx->dev;
+ const u8 *matrix;
+ unsigned int i;
+ u32 reg;
+
+ slice_params = run->mpeg2.slice_params;
+ sequence = &slice_params->sequence;
+ picture = &slice_params->picture;
+
+ quantization = run->mpeg2.quantization;
+
+ /* Activate MPEG engine. */
+ cedrus_engine_enable(dev, CEDRUS_CODEC_MPEG2);
+
+ /* Set intra quantization matrix. */
+
+ if (quantization && quantization->load_intra_quantiser_matrix)
+ matrix = quantization->intra_quantiser_matrix;
+ else
+ matrix = intra_quantization_matrix_default;
+
+ for (i = 0; i < 64; i++) {
+ reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
+ reg |= VE_DEC_MPEG_IQMINPUT_FLAG_INTRA;
+
+ cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg);
+ }
+
+ /* Set non-intra quantization matrix. */
+
+ if (quantization && quantization->load_non_intra_quantiser_matrix)
+ matrix = quantization->non_intra_quantiser_matrix;
+ else
+ matrix = non_intra_quantization_matrix_default;
+
+ for (i = 0; i < 64; i++) {
+ reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
+ reg |= VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA;
+
+ cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg);
+ }
+
+ /* Set MPEG picture header. */
+
+ reg = VE_DEC_MPEG_MP12HDR_SLICE_TYPE(picture->picture_coding_type);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 0, picture->f_code[0][0]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 1, picture->f_code[0][1]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 0, picture->f_code[1][0]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 1, picture->f_code[1][1]);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(picture->intra_dc_precision);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(picture->picture_structure);
+ reg |= VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(picture->top_field_first);
+ reg |= VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(picture->frame_pred_frame_dct);
+ reg |= VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(picture->concealment_motion_vectors);
+ reg |= VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(picture->q_scale_type);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(picture->intra_vlc_format);
+ reg |= VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(picture->alternate_scan);
+ reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(0);
+ reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(0);
+
+ cedrus_write(dev, VE_DEC_MPEG_MP12HDR, reg);
+
+ /* Set frame dimensions. */
+
+ reg = VE_DEC_MPEG_PICCODEDSIZE_WIDTH(sequence->horizontal_size);
+ reg |= VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(sequence->vertical_size);
+
+ cedrus_write(dev, VE_DEC_MPEG_PICCODEDSIZE, reg);
+
+ reg = VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(ctx->src_fmt.width);
+ reg |= VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(ctx->src_fmt.height);
+
+ cedrus_write(dev, VE_DEC_MPEG_PICBOUNDSIZE, reg);
+
+ /* Forward and backward prediction reference buffers. */
+
+ fwd_luma_addr = cedrus_dst_buf_addr(ctx,
+ slice_params->forward_ref_index,
+ 0);
+ fwd_chroma_addr = cedrus_dst_buf_addr(ctx,
+ slice_params->forward_ref_index,
+ 1);
+
+ cedrus_write(dev, VE_DEC_MPEG_FWD_REF_LUMA_ADDR, fwd_luma_addr);
+ cedrus_write(dev, VE_DEC_MPEG_FWD_REF_CHROMA_ADDR, fwd_chroma_addr);
+
+ bwd_luma_addr = cedrus_dst_buf_addr(ctx,
+ slice_params->backward_ref_index,
+ 0);
+ bwd_chroma_addr = cedrus_dst_buf_addr(ctx,
+ slice_params->backward_ref_index,
+ 1);
+
+ cedrus_write(dev, VE_DEC_MPEG_BWD_REF_LUMA_ADDR, bwd_luma_addr);
+ cedrus_write(dev, VE_DEC_MPEG_BWD_REF_CHROMA_ADDR, bwd_chroma_addr);
+
+ /* Destination luma and chroma buffers. */
+
+ dst_luma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 0);
+ dst_chroma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 1);
+
+ cedrus_write(dev, VE_DEC_MPEG_REC_LUMA, dst_luma_addr);
+ cedrus_write(dev, VE_DEC_MPEG_REC_CHROMA, dst_chroma_addr);
+
+ /* Source offset and length in bits. */
+
+ cedrus_write(dev, VE_DEC_MPEG_VLD_OFFSET,
+ slice_params->data_bit_offset);
+
+ reg = slice_params->bit_size - slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_MPEG_VLD_LEN, reg);
+
+ /* Source beginning and end addresses. */
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
+
+ reg = VE_DEC_MPEG_VLD_ADDR_BASE(src_buf_addr);
+ reg |= VE_DEC_MPEG_VLD_ADDR_VALID_PIC_DATA;
+ reg |= VE_DEC_MPEG_VLD_ADDR_LAST_PIC_DATA;
+ reg |= VE_DEC_MPEG_VLD_ADDR_FIRST_PIC_DATA;
+
+ cedrus_write(dev, VE_DEC_MPEG_VLD_ADDR, reg);
+
+ reg = src_buf_addr + DIV_ROUND_UP(slice_params->bit_size, 8);
+ cedrus_write(dev, VE_DEC_MPEG_VLD_END_ADDR, reg);
+
+ /* Macroblock address: start at the beginning. */
+ reg = VE_DEC_MPEG_MBADDR_Y(0) | VE_DEC_MPEG_MBADDR_X(0);
+ cedrus_write(dev, VE_DEC_MPEG_MBADDR, reg);
+
+ /* Clear previous errors. */
+ cedrus_write(dev, VE_DEC_MPEG_ERROR, 0);
+
+ /* Clear correct macroblocks register. */
+ cedrus_write(dev, VE_DEC_MPEG_CRTMBADDR, 0);
+
+ /* Enable appropriate interruptions and components. */
+
+ reg = VE_DEC_MPEG_CTRL_IRQ_MASK | VE_DEC_MPEG_CTRL_MC_NO_WRITEBACK |
+ VE_DEC_MPEG_CTRL_MC_CACHE_EN;
+
+ cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
+}
+
+static void cedrus_mpeg2_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ /* Trigger MPEG engine. */
+ reg = VE_DEC_MPEG_TRIGGER_HW_MPEG_VLD | VE_DEC_MPEG_TRIGGER_MPEG2 |
+ VE_DEC_MPEG_TRIGGER_MB_BOUNDARY;
+
+ cedrus_write(dev, VE_DEC_MPEG_TRIGGER, reg);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_mpeg2 = {
+ .irq_clear = cedrus_mpeg2_irq_clear,
+ .irq_disable = cedrus_mpeg2_irq_disable,
+ .irq_status = cedrus_mpeg2_irq_status,
+ .setup = cedrus_mpeg2_setup,
+ .trigger = cedrus_mpeg2_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
new file mode 100644
index 000000000000..de2d6b6f64bf
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (c) 2013-2016 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _CEDRUS_REGS_H_
+#define _CEDRUS_REGS_H_
+
+/*
+ * Common acronyms and contractions used in register descriptions:
+ * * VLD : Variable-Length Decoder
+ * * IQ: Inverse Quantization
+ * * IDCT: Inverse Discrete Cosine Transform
+ * * MC: Motion Compensation
+ * * STCD: Start Code Detect
+ * * SDRT: Scale Down and Rotate
+ */
+
+#define VE_ENGINE_DEC_MPEG 0x100
+#define VE_ENGINE_DEC_H264 0x200
+
+#define VE_MODE 0x00
+
+#define VE_MODE_REC_WR_MODE_2MB (0x01 << 20)
+#define VE_MODE_REC_WR_MODE_1MB (0x00 << 20)
+#define VE_MODE_DDR_MODE_BW_128 (0x03 << 16)
+#define VE_MODE_DDR_MODE_BW_256 (0x02 << 16)
+#define VE_MODE_DISABLED (0x07 << 0)
+#define VE_MODE_DEC_H265 (0x04 << 0)
+#define VE_MODE_DEC_H264 (0x01 << 0)
+#define VE_MODE_DEC_MPEG (0x00 << 0)
+
+#define VE_PRIMARY_CHROMA_BUF_LEN 0xc4
+#define VE_PRIMARY_FB_LINE_STRIDE 0xc8
+
+#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) (((s) << 16) & GENMASK(31, 16))
+#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) (((s) << 0) & GENMASK(15, 0))
+
+#define VE_CHROMA_BUF_LEN 0xe8
+
+#define VE_SECONDARY_OUT_FMT_TILED_32_NV12 (0x00 << 30)
+#define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30)
+#define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30)
+#define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30)
+#define VE_CHROMA_BUF_LEN_SDRT(l) ((l) & GENMASK(27, 0))
+
+#define VE_PRIMARY_OUT_FMT 0xec
+
+#define VE_PRIMARY_OUT_FMT_TILED_32_NV12 (0x00 << 4)
+#define VE_PRIMARY_OUT_FMT_TILED_128_NV12 (0x01 << 4)
+#define VE_PRIMARY_OUT_FMT_YU12 (0x02 << 4)
+#define VE_PRIMARY_OUT_FMT_YV12 (0x03 << 4)
+#define VE_PRIMARY_OUT_FMT_NV12 (0x04 << 4)
+#define VE_PRIMARY_OUT_FMT_NV21 (0x05 << 4)
+#define VE_SECONDARY_OUT_FMT_EXT_TILED_32_NV12 (0x00 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_TILED_128_NV12 (0x01 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_YU12 (0x02 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_YV12 (0x03 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_NV12 (0x04 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_NV21 (0x05 << 0)
+
+#define VE_VERSION 0xf0
+
+#define VE_VERSION_SHIFT 16
+
+#define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00)
+
+#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) (((t) << 28) & GENMASK(30, 28))
+#define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x))
+#define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \
+ (((__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
+
+#define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \
+ (((p) << 10) & GENMASK(11, 10))
+#define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \
+ (((s) << 8) & GENMASK(9, 8))
+#define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \
+ ((v) ? BIT(7) : 0)
+#define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \
+ ((v) ? BIT(6) : 0)
+#define VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(v) \
+ ((v) ? BIT(5) : 0)
+#define VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(v) \
+ ((v) ? BIT(4) : 0)
+#define VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(v) \
+ ((v) ? BIT(3) : 0)
+#define VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(v) \
+ ((v) ? BIT(2) : 0)
+#define VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(v) \
+ ((v) ? BIT(1) : 0)
+#define VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(v) \
+ ((v) ? BIT(0) : 0)
+
+#define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08)
+
+#define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \
+ ((DIV_ROUND_UP((w), 16) << 8) & GENMASK(15, 8))
+#define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \
+ ((DIV_ROUND_UP((h), 16) << 0) & GENMASK(7, 0))
+
+#define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c)
+
+#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) (((w) << 16) & GENMASK(27, 16))
+#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) (((h) << 0) & GENMASK(11, 0))
+
+#define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10)
+
+#define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8))
+#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(0, 7))
+
+#define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14)
+
+#define VE_DEC_MPEG_CTRL_MC_CACHE_EN BIT(31)
+#define VE_DEC_MPEG_CTRL_SW_VLD BIT(27)
+#define VE_DEC_MPEG_CTRL_SW_IQ_IS BIT(17)
+#define VE_DEC_MPEG_CTRL_QP_AC_DC_OUT_EN BIT(14)
+#define VE_DEC_MPEG_CTRL_ROTATE_SCALE_OUT_EN BIT(8)
+#define VE_DEC_MPEG_CTRL_MC_NO_WRITEBACK BIT(7)
+#define VE_DEC_MPEG_CTRL_ROTATE_IRQ_EN BIT(6)
+#define VE_DEC_MPEG_CTRL_VLD_DATA_REQ_IRQ_EN BIT(5)
+#define VE_DEC_MPEG_CTRL_ERROR_IRQ_EN BIT(4)
+#define VE_DEC_MPEG_CTRL_FINISH_IRQ_EN BIT(3)
+#define VE_DEC_MPEG_CTRL_IRQ_MASK \
+ (VE_DEC_MPEG_CTRL_FINISH_IRQ_EN | VE_DEC_MPEG_CTRL_ERROR_IRQ_EN | \
+ VE_DEC_MPEG_CTRL_VLD_DATA_REQ_IRQ_EN)
+
+#define VE_DEC_MPEG_TRIGGER (VE_ENGINE_DEC_MPEG + 0x18)
+
+#define VE_DEC_MPEG_TRIGGER_MB_BOUNDARY BIT(31)
+
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_420 (0x00 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_411 (0x01 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_422 (0x02 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_444 (0x03 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_422T (0x04 << 27)
+
+#define VE_DEC_MPEG_TRIGGER_MPEG1 (0x01 << 24)
+#define VE_DEC_MPEG_TRIGGER_MPEG2 (0x02 << 24)
+#define VE_DEC_MPEG_TRIGGER_JPEG (0x03 << 24)
+#define VE_DEC_MPEG_TRIGGER_MPEG4 (0x04 << 24)
+#define VE_DEC_MPEG_TRIGGER_VP62 (0x05 << 24)
+
+#define VE_DEC_MPEG_TRIGGER_VP62_AC_GET_BITS BIT(7)
+
+#define VE_DEC_MPEG_TRIGGER_STCD_VC1 (0x02 << 4)
+#define VE_DEC_MPEG_TRIGGER_STCD_MPEG2 (0x01 << 4)
+#define VE_DEC_MPEG_TRIGGER_STCD_AVC (0x00 << 4)
+
+#define VE_DEC_MPEG_TRIGGER_HW_MPEG_VLD (0x0f << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_JPEG_VLD (0x0e << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_MB (0x0d << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_ROTATE (0x0c << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_VP6_VLD (0x0b << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_MAF (0x0a << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_STCD_END (0x09 << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_STCD_BEGIN (0x08 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_MC (0x07 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_IQ (0x06 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_IDCT (0x05 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_SCALE (0x04 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_VP6 (0x03 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_VP62_AC_GET_BITS (0x02 << 0)
+
+#define VE_DEC_MPEG_STATUS (VE_ENGINE_DEC_MPEG + 0x1c)
+
+#define VE_DEC_MPEG_STATUS_START_DETECT_BUSY BIT(27)
+#define VE_DEC_MPEG_STATUS_VP6_BIT BIT(26)
+#define VE_DEC_MPEG_STATUS_VP6_BIT_BUSY BIT(25)
+#define VE_DEC_MPEG_STATUS_MAF_BUSY BIT(23)
+#define VE_DEC_MPEG_STATUS_VP6_MVP_BUSY BIT(22)
+#define VE_DEC_MPEG_STATUS_JPEG_BIT_END BIT(21)
+#define VE_DEC_MPEG_STATUS_JPEG_RESTART_ERROR BIT(20)
+#define VE_DEC_MPEG_STATUS_JPEG_MARKER BIT(19)
+#define VE_DEC_MPEG_STATUS_ROTATE_BUSY BIT(18)
+#define VE_DEC_MPEG_STATUS_DEBLOCKING_BUSY BIT(17)
+#define VE_DEC_MPEG_STATUS_SCALE_DOWN_BUSY BIT(16)
+#define VE_DEC_MPEG_STATUS_IQIS_BUF_EMPTY BIT(15)
+#define VE_DEC_MPEG_STATUS_IDCT_BUF_EMPTY BIT(14)
+#define VE_DEC_MPEG_STATUS_VE_BUSY BIT(13)
+#define VE_DEC_MPEG_STATUS_MC_BUSY BIT(12)
+#define VE_DEC_MPEG_STATUS_IDCT_BUSY BIT(11)
+#define VE_DEC_MPEG_STATUS_IQIS_BUSY BIT(10)
+#define VE_DEC_MPEG_STATUS_DCAC_BUSY BIT(9)
+#define VE_DEC_MPEG_STATUS_VLD_BUSY BIT(8)
+#define VE_DEC_MPEG_STATUS_ROTATE_SUCCESS BIT(3)
+#define VE_DEC_MPEG_STATUS_VLD_DATA_REQ BIT(2)
+#define VE_DEC_MPEG_STATUS_ERROR BIT(1)
+#define VE_DEC_MPEG_STATUS_SUCCESS BIT(0)
+#define VE_DEC_MPEG_STATUS_CHECK_MASK \
+ (VE_DEC_MPEG_STATUS_SUCCESS | VE_DEC_MPEG_STATUS_ERROR | \
+ VE_DEC_MPEG_STATUS_VLD_DATA_REQ)
+#define VE_DEC_MPEG_STATUS_CHECK_ERROR \
+ (VE_DEC_MPEG_STATUS_ERROR | VE_DEC_MPEG_STATUS_VLD_DATA_REQ)
+
+#define VE_DEC_MPEG_VLD_ADDR (VE_ENGINE_DEC_MPEG + 0x28)
+
+#define VE_DEC_MPEG_VLD_ADDR_FIRST_PIC_DATA BIT(30)
+#define VE_DEC_MPEG_VLD_ADDR_LAST_PIC_DATA BIT(29)
+#define VE_DEC_MPEG_VLD_ADDR_VALID_PIC_DATA BIT(28)
+#define VE_DEC_MPEG_VLD_ADDR_BASE(a) \
+ ({ \
+ u32 _tmp = (a); \
+ u32 _lo = _tmp & GENMASK(27, 4); \
+ u32 _hi = (_tmp >> 28) & GENMASK(3, 0); \
+ (_lo | _hi); \
+ })
+
+#define VE_DEC_MPEG_VLD_OFFSET (VE_ENGINE_DEC_MPEG + 0x2c)
+#define VE_DEC_MPEG_VLD_LEN (VE_ENGINE_DEC_MPEG + 0x30)
+#define VE_DEC_MPEG_VLD_END_ADDR (VE_ENGINE_DEC_MPEG + 0x34)
+
+#define VE_DEC_MPEG_REC_LUMA (VE_ENGINE_DEC_MPEG + 0x48)
+#define VE_DEC_MPEG_REC_CHROMA (VE_ENGINE_DEC_MPEG + 0x4c)
+#define VE_DEC_MPEG_FWD_REF_LUMA_ADDR (VE_ENGINE_DEC_MPEG + 0x50)
+#define VE_DEC_MPEG_FWD_REF_CHROMA_ADDR (VE_ENGINE_DEC_MPEG + 0x54)
+#define VE_DEC_MPEG_BWD_REF_LUMA_ADDR (VE_ENGINE_DEC_MPEG + 0x58)
+#define VE_DEC_MPEG_BWD_REF_CHROMA_ADDR (VE_ENGINE_DEC_MPEG + 0x5c)
+
+#define VE_DEC_MPEG_IQMINPUT (VE_ENGINE_DEC_MPEG + 0x80)
+
+#define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14)
+#define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14)
+#define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \
+ (((v) & GENMASK(7, 0)) | (((i) << 8) & GENMASK(13, 8)))
+
+#define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4)
+#define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8)
+#define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc)
+#define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0)
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
new file mode 100644
index 000000000000..5c5fce678b93
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_video.h"
+#include "cedrus_dec.h"
+#include "cedrus_hw.h"
+
+#define CEDRUS_DECODE_SRC BIT(0)
+#define CEDRUS_DECODE_DST BIT(1)
+
+#define CEDRUS_MIN_WIDTH 16U
+#define CEDRUS_MIN_HEIGHT 16U
+#define CEDRUS_MAX_WIDTH 3840U
+#define CEDRUS_MAX_HEIGHT 2160U
+
+static struct cedrus_format cedrus_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_MPEG2_SLICE,
+ .directions = CEDRUS_DECODE_SRC,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
+ .directions = CEDRUS_DECODE_DST,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .directions = CEDRUS_DECODE_DST,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ },
+};
+
+#define CEDRUS_FORMATS_COUNT ARRAY_SIZE(cedrus_formats)
+
+static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct cedrus_ctx, fh);
+}
+
+static struct cedrus_format *cedrus_find_format(u32 pixelformat, u32 directions,
+ unsigned int capabilities)
+{
+ struct cedrus_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
+ fmt = &cedrus_formats[i];
+
+ if (fmt->capabilities && (fmt->capabilities & capabilities) !=
+ fmt->capabilities)
+ continue;
+
+ if (fmt->pixelformat == pixelformat &&
+ (fmt->directions & directions) != 0)
+ break;
+ }
+
+ if (i == CEDRUS_FORMATS_COUNT)
+ return NULL;
+
+ return &cedrus_formats[i];
+}
+
+static bool cedrus_check_format(u32 pixelformat, u32 directions,
+ unsigned int capabilities)
+{
+ return cedrus_find_format(pixelformat, directions, capabilities);
+}
+
+static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
+{
+ unsigned int width = pix_fmt->width;
+ unsigned int height = pix_fmt->height;
+ unsigned int sizeimage = pix_fmt->sizeimage;
+ unsigned int bytesperline = pix_fmt->bytesperline;
+
+ pix_fmt->field = V4L2_FIELD_NONE;
+
+ /* Limit to hardware min/max. */
+ width = clamp(width, CEDRUS_MIN_WIDTH, CEDRUS_MAX_WIDTH);
+ height = clamp(height, CEDRUS_MIN_HEIGHT, CEDRUS_MAX_HEIGHT);
+
+ switch (pix_fmt->pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ /* Zero bytes per line for encoded source. */
+ bytesperline = 0;
+
+ break;
+
+ case V4L2_PIX_FMT_SUNXI_TILED_NV12:
+ /* 32-aligned stride. */
+ bytesperline = ALIGN(width, 32);
+
+ /* 32-aligned height. */
+ height = ALIGN(height, 32);
+
+ /* Luma plane size. */
+ sizeimage = bytesperline * height;
+
+ /* Chroma plane size. */
+ sizeimage += bytesperline * height / 2;
+
+ break;
+
+ case V4L2_PIX_FMT_NV12:
+ /* 16-aligned stride. */
+ bytesperline = ALIGN(width, 16);
+
+ /* 16-aligned height. */
+ height = ALIGN(height, 16);
+
+ /* Luma plane size. */
+ sizeimage = bytesperline * height;
+
+ /* Chroma plane size. */
+ sizeimage += bytesperline * height / 2;
+
+ break;
+ }
+
+ pix_fmt->width = width;
+ pix_fmt->height = height;
+
+ pix_fmt->bytesperline = bytesperline;
+ pix_fmt->sizeimage = sizeimage;
+}
+
+static int cedrus_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, CEDRUS_NAME, sizeof(cap->driver));
+ strscpy(cap->card, CEDRUS_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", CEDRUS_NAME);
+
+ return 0;
+}
+
+static int cedrus_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
+ u32 direction)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct cedrus_dev *dev = ctx->dev;
+ unsigned int capabilities = dev->capabilities;
+ struct cedrus_format *fmt;
+ unsigned int i, index;
+
+ /* Index among formats that match the requested direction. */
+ index = 0;
+
+ for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
+ fmt = &cedrus_formats[i];
+
+ if (fmt->capabilities && (fmt->capabilities & capabilities) !=
+ fmt->capabilities)
+ continue;
+
+ if (!(cedrus_formats[i].directions & direction))
+ continue;
+
+ if (index == f->index)
+ break;
+
+ index++;
+ }
+
+ /* Matched format. */
+ if (i < CEDRUS_FORMATS_COUNT) {
+ f->pixelformat = cedrus_formats[i].pixelformat;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cedrus_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return cedrus_enum_fmt(file, f, CEDRUS_DECODE_DST);
+}
+
+static int cedrus_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return cedrus_enum_fmt(file, f, CEDRUS_DECODE_SRC);
+}
+
+static int cedrus_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+
+ /* Fall back to dummy default by lack of hardware configuration. */
+ if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
+ cedrus_prepare_format(&f->fmt.pix);
+
+ return 0;
+ }
+
+ f->fmt.pix = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int cedrus_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+
+ /* Fall back to dummy default by lack of hardware configuration. */
+ if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
+ f->fmt.pix.sizeimage = SZ_1K;
+ cedrus_prepare_format(&f->fmt.pix);
+
+ return 0;
+ }
+
+ f->fmt.pix = ctx->src_fmt;
+
+ return 0;
+}
+
+static int cedrus_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct cedrus_dev *dev = ctx->dev;
+ struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+
+ if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
+ dev->capabilities))
+ return -EINVAL;
+
+ cedrus_prepare_format(pix_fmt);
+
+ return 0;
+}
+
+static int cedrus_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct cedrus_dev *dev = ctx->dev;
+ struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+
+ if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
+ dev->capabilities))
+ return -EINVAL;
+
+ /* Source image size has to be provided by userspace. */
+ if (pix_fmt->sizeimage == 0)
+ return -EINVAL;
+
+ cedrus_prepare_format(pix_fmt);
+
+ return 0;
+}
+
+static int cedrus_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct cedrus_dev *dev = ctx->dev;
+ int ret;
+
+ ret = cedrus_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ ctx->dst_fmt = f->fmt.pix;
+
+ cedrus_dst_format_set(dev, &ctx->dst_fmt);
+
+ return 0;
+}
+
+static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ int ret;
+
+ ret = cedrus_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ctx->src_fmt = f->fmt.pix;
+
+ /* Propagate colorspace information to capture. */
+ ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
+ ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
+ ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->dst_fmt.quantization = f->fmt.pix.quantization;
+
+ return 0;
+}
+
+const struct v4l2_ioctl_ops cedrus_ioctl_ops = {
+ .vidioc_querycap = cedrus_querycap,
+
+ .vidioc_enum_fmt_vid_cap = cedrus_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cedrus_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cedrus_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cedrus_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = cedrus_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = cedrus_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = cedrus_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = cedrus_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cedrus_dev *dev = ctx->dev;
+ struct v4l2_pix_format *pix_fmt;
+ u32 directions;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ directions = CEDRUS_DECODE_SRC;
+ pix_fmt = &ctx->src_fmt;
+ } else {
+ directions = CEDRUS_DECODE_DST;
+ pix_fmt = &ctx->dst_fmt;
+ }
+
+ if (!cedrus_check_format(pix_fmt->pixelformat, directions,
+ dev->capabilities))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < pix_fmt->sizeimage)
+ return -EINVAL;
+ } else {
+ sizes[0] = pix_fmt->sizeimage;
+ *nplanes = 1;
+ }
+
+ return 0;
+}
+
+static void cedrus_queue_cleanup(struct vb2_queue *vq, u32 state)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned long flags;
+
+ for (;;) {
+ spin_lock_irqsave(&ctx->dev->irq_lock, flags);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ spin_unlock_irqrestore(&ctx->dev->irq_lock, flags);
+
+ if (!vbuf)
+ return;
+
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->hdl);
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
+
+static int cedrus_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type))
+ ctx->dst_bufs[vb->index] = vb;
+
+ return 0;
+}
+
+static void cedrus_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type))
+ ctx->dst_bufs[vb->index] = NULL;
+}
+
+static int cedrus_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+
+ return 0;
+}
+
+static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cedrus_dev *dev = ctx->dev;
+ int ret = 0;
+
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ ctx->current_codec = CEDRUS_CODEC_MPEG2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ dev->dec_ops[ctx->current_codec]->start)
+ ret = dev->dec_ops[ctx->current_codec]->start(ctx);
+
+ if (ret)
+ cedrus_queue_cleanup(vq, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void cedrus_stop_streaming(struct vb2_queue *vq)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cedrus_dev *dev = ctx->dev;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ dev->dec_ops[ctx->current_codec]->stop)
+ dev->dec_ops[ctx->current_codec]->stop(ctx);
+
+ cedrus_queue_cleanup(vq, VB2_BUF_STATE_ERROR);
+}
+
+static void cedrus_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void cedrus_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
+}
+
+static struct vb2_ops cedrus_qops = {
+ .queue_setup = cedrus_queue_setup,
+ .buf_prepare = cedrus_buf_prepare,
+ .buf_init = cedrus_buf_init,
+ .buf_cleanup = cedrus_buf_cleanup,
+ .buf_queue = cedrus_buf_queue,
+ .buf_request_complete = cedrus_buf_request_complete,
+ .start_streaming = cedrus_start_streaming,
+ .stop_streaming = cedrus_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct cedrus_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct cedrus_buffer);
+ src_vq->min_buffers_needed = 1;
+ src_vq->ops = &cedrus_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = ctx->dev->dev;
+ src_vq->supports_requests = true;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct cedrus_buffer);
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->ops = &cedrus_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = ctx->dev->dev;
+
+ return vb2_queue_init(dst_vq);
+}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.h b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
new file mode 100644
index 000000000000..0e4f7a8cccf2
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_VIDEO_H_
+#define _CEDRUS_VIDEO_H_
+
+struct cedrus_format {
+ u32 pixelformat;
+ u32 directions;
+ unsigned int capabilities;
+};
+
+extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
+
+int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+
+#endif
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1227872227dc..36b742932c72 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1245,8 +1245,7 @@ static int iscsit_do_rx_data(
return -1;
memset(&msg, 0, sizeof(struct msghdr));
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
- count->iov, count->iov_count, data);
+ iov_iter_kvec(&msg.msg_iter, READ, count->iov, count->iov_count, data);
while (msg_data_left(&msg)) {
rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
@@ -1302,8 +1301,7 @@ int tx_data(
memset(&msg, 0, sizeof(struct msghdr));
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC,
- iov, iov_count, data);
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, iov_count, data);
while (msg_data_left(&msg)) {
int tx_loop = sock_sendmsg(conn->sock, &msg);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index e46ca968009c..4f134b0c3e29 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -268,7 +268,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
}
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, GOOD, rd_len + 4);
return 0;
}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 16751ae55d7b..49b110d1b972 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -303,7 +303,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
len += sg->length;
}
- iov_iter_bvec(&iter, ITER_BVEC | is_write, bvec, sgl_nents, len);
+ iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
aio_cmd->cmd = cmd;
aio_cmd->len = len;
@@ -353,7 +353,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
len += sg->length;
}
- iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
+ iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
if (is_write)
ret = vfs_iter_write(fd, &iter, &pos, 0);
else
@@ -490,7 +490,7 @@ fd_execute_write_same(struct se_cmd *cmd)
len += se_dev->dev_attrib.block_size;
}
- iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len);
+ iov_iter_bvec(&iter, READ, bvec, nolb, len);
ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
kfree(bvec);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4cf33e2cc705..e31e4fc31aa1 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -205,19 +205,19 @@ void transport_subsystem_check_init(void)
if (sub_api_initialized)
return;
- ret = request_module("target_core_iblock");
+ ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
if (ret != 0)
pr_err("Unable to load target_core_iblock\n");
- ret = request_module("target_core_file");
+ ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
if (ret != 0)
pr_err("Unable to load target_core_file\n");
- ret = request_module("target_core_pscsi");
+ ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
if (ret != 0)
pr_err("Unable to load target_core_pscsi\n");
- ret = request_module("target_core_user");
+ ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
if (ret != 0)
pr_err("Unable to load target_core_user\n");
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6ab982309e6a..d6ebc1cf6aa9 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -290,10 +290,12 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
int delay)
{
if (delay > 1000)
- mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+ mod_delayed_work(system_freezable_power_efficient_wq,
+ &tz->poll_queue,
round_jiffies(msecs_to_jiffies(delay)));
else if (delay)
- mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+ mod_delayed_work(system_freezable_power_efficient_wq,
+ &tz->poll_queue,
msecs_to_jiffies(delay));
else
cancel_delayed_work(&tz->poll_queue);
@@ -1102,8 +1104,9 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
mutex_unlock(&thermal_list_lock);
ida_simple_remove(&thermal_cdev_ida, cdev->id);
- device_unregister(&cdev->device);
+ device_del(&cdev->device);
thermal_cooling_device_destroy_sysfs(cdev);
+ put_device(&cdev->device);
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 79ad30d34949..b929c7ae3a27 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -24,7 +24,7 @@
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-mapping.h>
#include <linux/fs_uart_pd.h>
#include <linux/of_address.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 4eba17f3d293..56fc527015cb 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
@@ -19,7 +19,7 @@
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index e3bff068dc3c..6a1cd03bfe39 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
@@ -19,7 +19,7 @@
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 165653a5e45d..d2652dccc699 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -12,7 +12,7 @@
#include <linux/console.h>
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/io.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
@@ -94,7 +94,7 @@ static void * __init xdbc_get_page(dma_addr_t *dma_addr)
{
void *virt;
- virt = alloc_bootmem_pages_nopanic(PAGE_SIZE);
+ virt = memblock_alloc_nopanic(PAGE_SIZE, PAGE_SIZE);
if (!virt)
return NULL;
@@ -191,7 +191,7 @@ static void __init xdbc_free_ring(struct xdbc_ring *ring)
if (!seg)
return;
- free_bootmem(seg->dma, PAGE_SIZE);
+ memblock_free(seg->dma, PAGE_SIZE);
ring->segment = NULL;
}
@@ -675,10 +675,10 @@ int __init early_xdbc_setup_hardware(void)
xdbc_free_ring(&xdbc.in_ring);
if (xdbc.table_dma)
- free_bootmem(xdbc.table_dma, PAGE_SIZE);
+ memblock_free(xdbc.table_dma, PAGE_SIZE);
if (xdbc.out_dma)
- free_bootmem(xdbc.out_dma, PAGE_SIZE);
+ memblock_free(xdbc.out_dma, PAGE_SIZE);
xdbc.table_base = NULL;
xdbc.out_buf = NULL;
@@ -997,8 +997,8 @@ free_and_quit:
xdbc_free_ring(&xdbc.evt_ring);
xdbc_free_ring(&xdbc.out_ring);
xdbc_free_ring(&xdbc.in_ring);
- free_bootmem(xdbc.table_dma, PAGE_SIZE);
- free_bootmem(xdbc.out_dma, PAGE_SIZE);
+ memblock_free(xdbc.table_dma, PAGE_SIZE);
+ memblock_free(xdbc.out_dma, PAGE_SIZE);
writel(0, &xdbc.xdbc_reg->control);
early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 9e33d5206d54..f2497cb96abb 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -166,7 +166,7 @@ int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
unsigned long flags;
int ret;
- ret = vb2_qbuf(&queue->queue, buf);
+ ret = vb2_qbuf(&queue->queue, NULL, buf);
if (ret < 0)
return ret;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 9756752c0681..45da3e01c7b0 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -309,7 +309,7 @@ int usbip_recv(struct socket *sock, void *buf, int size)
if (!sock || !buf || !size)
return -EINVAL;
- iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);
+ iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
usbip_dbg_xmit("enter\n");
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index c84333eb5eb5..9de5ed38da83 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -21,7 +21,7 @@ config VFIO_VIRQFD
menuconfig VFIO
tristate "VFIO Non-Privileged userspace driver framework"
depends on IOMMU_API
- select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM_SMMU || ARM_SMMU_V3)
+ select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64)
select ANON_INODES
help
VFIO provides a framework for secure userspace device drivers.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index cddb453a1ba5..50cdedfca9fe 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -434,10 +434,14 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
+
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
+ vdev->nointx || vdev->pdev->is_virtfn)
+ return 0;
+
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
- if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
- return 1;
+ return pin ? 1 : 0;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 115a36f6f403..423ea1f98441 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1180,8 +1180,10 @@ static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos)
return -ENOMEM;
ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags);
- if (ret)
+ if (ret) {
+ kfree(vdev->msi_perm);
return ret;
+ }
return len;
}
@@ -1610,6 +1612,15 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
}
/*
+ * Nag about hardware bugs, hopefully to have vendors fix them, but at least
+ * to collect a list of dependencies for the VF INTx pin quirk below.
+ */
+static const struct pci_device_id known_bogus_vf_intx_pin[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) },
+ {}
+};
+
+/*
* For each device we allocate a pci_config_map that indicates the
* capability occupying each dword and thus the struct perm_bits we
* use for read and write. We also allocate a virtualized config
@@ -1674,6 +1685,24 @@ int vfio_config_init(struct vfio_pci_device *vdev)
if (pdev->is_virtfn) {
*(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor);
*(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
+
+ /*
+ * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register
+ * does not apply to VFs and VFs must implement this register
+ * as read-only with value zero. Userspace is not readily able
+ * to identify whether a device is a VF and thus that the pin
+ * definition on the device is bogus should it violate this
+ * requirement. We already virtualize the pin register for
+ * other purposes, so we simply need to replace the bogus value
+ * and consider VFs when we determine INTx IRQ count.
+ */
+ if (vconfig[PCI_INTERRUPT_PIN] &&
+ !pci_match_id(known_bogus_vf_intx_pin, pdev))
+ pci_warn(pdev,
+ "Hardware bug: VF reports bogus INTx pin %d\n",
+ vconfig[PCI_INTERRUPT_PIN]);
+
+ vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
}
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c24bb690680b..50dffe83714c 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -203,6 +203,19 @@ struct vhost_scsi {
int vs_events_nr; /* num of pending events, protected by vq->mutex */
};
+/*
+ * Context for processing request and control queue operations.
+ */
+struct vhost_scsi_ctx {
+ int head;
+ unsigned int out, in;
+ size_t req_size, rsp_size;
+ size_t out_size, in_size;
+ u8 *target, *lunp;
+ void *req;
+ struct iov_iter out_iter;
+};
+
static struct workqueue_struct *vhost_scsi_workqueue;
/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
@@ -800,24 +813,120 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
pr_err("Faulted on virtio_scsi_cmd_resp\n");
}
+static int
+vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc)
+{
+ int ret = -ENXIO;
+
+ vc->head = vhost_get_vq_desc(vq, vq->iov,
+ ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
+ NULL, NULL);
+
+ pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
+ vc->head, vc->out, vc->in);
+
+ /* On error, stop handling until the next kick. */
+ if (unlikely(vc->head < 0))
+ goto done;
+
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (vc->head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
+ vhost_disable_notify(&vs->dev, vq);
+ ret = -EAGAIN;
+ }
+ goto done;
+ }
+
+ /*
+ * Get the size of request and response buffers.
+ * FIXME: Not correct for BIDI operation
+ */
+ vc->out_size = iov_length(vq->iov, vc->out);
+ vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
+
+ /*
+ * Copy over the virtio-scsi request header, which for a
+ * ANY_LAYOUT enabled guest may span multiple iovecs, or a
+ * single iovec may contain both the header + outgoing
+ * WRITE payloads.
+ *
+ * copy_from_iter() will advance out_iter, so that it will
+ * point at the start of the outgoing WRITE payload, if
+ * DMA_TO_DEVICE is set.
+ */
+ iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
+ ret = 0;
+
+done:
+ return ret;
+}
+
+static int
+vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
+{
+ if (unlikely(vc->in_size < vc->rsp_size)) {
+ vq_err(vq,
+ "Response buf too small, need min %zu bytes got %zu",
+ vc->rsp_size, vc->in_size);
+ return -EINVAL;
+ } else if (unlikely(vc->out_size < vc->req_size)) {
+ vq_err(vq,
+ "Request buf too small, need min %zu bytes got %zu",
+ vc->req_size, vc->out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
+ struct vhost_scsi_tpg **tpgp)
+{
+ int ret = -EIO;
+
+ if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
+ &vc->out_iter))) {
+ vq_err(vq, "Faulted on copy_from_iter\n");
+ } else if (unlikely(*vc->lunp != 1)) {
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
+ vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
+ } else {
+ struct vhost_scsi_tpg **vs_tpg, *tpg;
+
+ vs_tpg = vq->private_data; /* validated at handler entry */
+
+ tpg = READ_ONCE(vs_tpg[*vc->target]);
+ if (unlikely(!tpg)) {
+ vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
+ } else {
+ if (tpgp)
+ *tpgp = tpg;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
static void
vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{
struct vhost_scsi_tpg **vs_tpg, *tpg;
struct virtio_scsi_cmd_req v_req;
struct virtio_scsi_cmd_req_pi v_req_pi;
+ struct vhost_scsi_ctx vc;
struct vhost_scsi_cmd *cmd;
- struct iov_iter out_iter, in_iter, prot_iter, data_iter;
+ struct iov_iter in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- unsigned int out = 0, in = 0;
- int head, ret, prot_bytes;
- size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
- size_t out_size, in_size;
+ int ret, prot_bytes;
u16 lun;
- u8 *target, *lunp, task_attr;
+ u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
- void *req, *cdb;
+ void *cdb;
mutex_lock(&vq->mutex);
/*
@@ -828,85 +937,47 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
if (!vs_tpg)
goto out;
+ memset(&vc, 0, sizeof(vc));
+ vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+
vhost_disable_notify(&vs->dev, vq);
for (;;) {
- head = vhost_get_vq_desc(vq, vq->iov,
- ARRAY_SIZE(vq->iov), &out, &in,
- NULL, NULL);
- pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
- head, out, in);
- /* On error, stop handling until the next kick. */
- if (unlikely(head < 0))
- break;
- /* Nothing new? Wait for eventfd to tell us they refilled. */
- if (head == vq->num) {
- if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
- vhost_disable_notify(&vs->dev, vq);
- continue;
- }
- break;
- }
- /*
- * Check for a sane response buffer so we can report early
- * errors back to the guest.
- */
- if (unlikely(vq->iov[out].iov_len < rsp_size)) {
- vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
- " size, got %zu bytes\n", vq->iov[out].iov_len);
- break;
- }
+ ret = vhost_scsi_get_desc(vs, vq, &vc);
+ if (ret)
+ goto err;
+
/*
* Setup pointers and values based upon different virtio-scsi
* request header if T10_PI is enabled in KVM guest.
*/
if (t10_pi) {
- req = &v_req_pi;
- req_size = sizeof(v_req_pi);
- lunp = &v_req_pi.lun[0];
- target = &v_req_pi.lun[1];
+ vc.req = &v_req_pi;
+ vc.req_size = sizeof(v_req_pi);
+ vc.lunp = &v_req_pi.lun[0];
+ vc.target = &v_req_pi.lun[1];
} else {
- req = &v_req;
- req_size = sizeof(v_req);
- lunp = &v_req.lun[0];
- target = &v_req.lun[1];
+ vc.req = &v_req;
+ vc.req_size = sizeof(v_req);
+ vc.lunp = &v_req.lun[0];
+ vc.target = &v_req.lun[1];
}
- /*
- * FIXME: Not correct for BIDI operation
- */
- out_size = iov_length(vq->iov, out);
- in_size = iov_length(&vq->iov[out], in);
/*
- * Copy over the virtio-scsi request header, which for a
- * ANY_LAYOUT enabled guest may span multiple iovecs, or a
- * single iovec may contain both the header + outgoing
- * WRITE payloads.
- *
- * copy_from_iter() will advance out_iter, so that it will
- * point at the start of the outgoing WRITE payload, if
- * DMA_TO_DEVICE is set.
+ * Validate the size of request and response buffers.
+ * Check for a sane response buffer so we can report
+ * early errors back to the guest.
*/
- iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
+ ret = vhost_scsi_chk_size(vq, &vc);
+ if (ret)
+ goto err;
- if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
- vq_err(vq, "Faulted on copy_from_iter\n");
- vhost_scsi_send_bad_target(vs, vq, head, out);
- continue;
- }
- /* virtio-scsi spec requires byte 0 of the lun to be 1 */
- if (unlikely(*lunp != 1)) {
- vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
- vhost_scsi_send_bad_target(vs, vq, head, out);
- continue;
- }
+ ret = vhost_scsi_get_req(vq, &vc, &tpg);
+ if (ret)
+ goto err;
+
+ ret = -EIO; /* bad target on any error from here on */
- tpg = READ_ONCE(vs_tpg[*target]);
- if (unlikely(!tpg)) {
- /* Target does not exist, fail the request */
- vhost_scsi_send_bad_target(vs, vq, head, out);
- continue;
- }
/*
* Determine data_direction by calculating the total outgoing
* iovec sizes + incoming iovec sizes vs. virtio-scsi request +
@@ -924,17 +995,17 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
*/
prot_bytes = 0;
- if (out_size > req_size) {
+ if (vc.out_size > vc.req_size) {
data_direction = DMA_TO_DEVICE;
- exp_data_len = out_size - req_size;
- data_iter = out_iter;
- } else if (in_size > rsp_size) {
+ exp_data_len = vc.out_size - vc.req_size;
+ data_iter = vc.out_iter;
+ } else if (vc.in_size > vc.rsp_size) {
data_direction = DMA_FROM_DEVICE;
- exp_data_len = in_size - rsp_size;
+ exp_data_len = vc.in_size - vc.rsp_size;
- iov_iter_init(&in_iter, READ, &vq->iov[out], in,
- rsp_size + exp_data_len);
- iov_iter_advance(&in_iter, rsp_size);
+ iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
+ vc.rsp_size + exp_data_len);
+ iov_iter_advance(&in_iter, vc.rsp_size);
data_iter = in_iter;
} else {
data_direction = DMA_NONE;
@@ -950,21 +1021,20 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
if (data_direction != DMA_TO_DEVICE) {
vq_err(vq, "Received non zero pi_bytesout,"
" but wrong data_direction\n");
- vhost_scsi_send_bad_target(vs, vq, head, out);
- continue;
+ goto err;
}
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
} else if (v_req_pi.pi_bytesin) {
if (data_direction != DMA_FROM_DEVICE) {
vq_err(vq, "Received non zero pi_bytesin,"
" but wrong data_direction\n");
- vhost_scsi_send_bad_target(vs, vq, head, out);
- continue;
+ goto err;
}
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
}
/*
- * Set prot_iter to data_iter, and advance past any
+ * Set prot_iter to data_iter and truncate it to
+ * prot_bytes, and advance data_iter past any
* preceeding prot_bytes that may be present.
*
* Also fix up the exp_data_len to reflect only the
@@ -973,6 +1043,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
if (prot_bytes) {
exp_data_len -= prot_bytes;
prot_iter = data_iter;
+ iov_iter_truncate(&prot_iter, prot_bytes);
iov_iter_advance(&data_iter, prot_bytes);
}
tag = vhost64_to_cpu(vq, v_req_pi.tag);
@@ -996,8 +1067,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vq_err(vq, "Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
- vhost_scsi_send_bad_target(vs, vq, head, out);
- continue;
+ goto err;
}
cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
exp_data_len + prot_bytes,
@@ -1005,13 +1075,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
if (IS_ERR(cmd)) {
vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
PTR_ERR(cmd));
- vhost_scsi_send_bad_target(vs, vq, head, out);
- continue;
+ goto err;
}
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
- cmd->tvc_resp_iov = vq->iov[out];
- cmd->tvc_in_iovs = in;
+ cmd->tvc_resp_iov = vq->iov[vc.out];
+ cmd->tvc_in_iovs = vc.in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
cmd->tvc_cdb[0], cmd->tvc_lun);
@@ -1019,14 +1088,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
" %d\n", cmd, exp_data_len, prot_bytes, data_direction);
if (data_direction != DMA_NONE) {
- ret = vhost_scsi_mapal(cmd,
- prot_bytes, &prot_iter,
- exp_data_len, &data_iter);
- if (unlikely(ret)) {
+ if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
+ &prot_iter, exp_data_len,
+ &data_iter))) {
vq_err(vq, "Failed to map iov to sgl\n");
vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
- vhost_scsi_send_bad_target(vs, vq, head, out);
- continue;
+ goto err;
}
}
/*
@@ -1034,7 +1101,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
* complete the virtio-scsi request in TCM callback context via
* vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
*/
- cmd->tvc_vq_desc = head;
+ cmd->tvc_vq_desc = vc.head;
/*
* Dispatch cmd descriptor for cmwq execution in process
* context provided by vhost_scsi_workqueue. This also ensures
@@ -1043,6 +1110,166 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
*/
INIT_WORK(&cmd->work, vhost_scsi_submission_work);
queue_work(vhost_scsi_workqueue, &cmd->work);
+ ret = 0;
+err:
+ /*
+ * ENXIO: No more requests, or read error, wait for next kick
+ * EINVAL: Invalid response buffer, drop the request
+ * EIO: Respond with bad target
+ * EAGAIN: Pending request
+ */
+ if (ret == -ENXIO)
+ break;
+ else if (ret == -EIO)
+ vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ }
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void
+vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc)
+{
+ struct virtio_scsi_ctrl_tmf_resp __user *resp;
+ struct virtio_scsi_ctrl_tmf_resp rsp;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ resp = vq->iov[vc->out].iov_base;
+ ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
+}
+
+static void
+vhost_scsi_send_an_resp(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc)
+{
+ struct virtio_scsi_ctrl_an_resp __user *resp;
+ struct virtio_scsi_ctrl_an_resp rsp;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
+ rsp.response = VIRTIO_SCSI_S_OK;
+ resp = vq->iov[vc->out].iov_base;
+ ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
+}
+
+static void
+vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+{
+ union {
+ __virtio32 type;
+ struct virtio_scsi_ctrl_an_req an;
+ struct virtio_scsi_ctrl_tmf_req tmf;
+ } v_req;
+ struct vhost_scsi_ctx vc;
+ size_t typ_size;
+ int ret;
+
+ mutex_lock(&vq->mutex);
+ /*
+ * We can handle the vq only after the endpoint is setup by calling the
+ * VHOST_SCSI_SET_ENDPOINT ioctl.
+ */
+ if (!vq->private_data)
+ goto out;
+
+ memset(&vc, 0, sizeof(vc));
+
+ vhost_disable_notify(&vs->dev, vq);
+
+ for (;;) {
+ ret = vhost_scsi_get_desc(vs, vq, &vc);
+ if (ret)
+ goto err;
+
+ /*
+ * Get the request type first in order to setup
+ * other parameters dependent on the type.
+ */
+ vc.req = &v_req.type;
+ typ_size = sizeof(v_req.type);
+
+ if (unlikely(!copy_from_iter_full(vc.req, typ_size,
+ &vc.out_iter))) {
+ vq_err(vq, "Faulted on copy_from_iter tmf type\n");
+ /*
+ * The size of the response buffer depends on the
+ * request type and must be validated against it.
+ * Since the request type is not known, don't send
+ * a response.
+ */
+ continue;
+ }
+
+ switch (v_req.type) {
+ case VIRTIO_SCSI_T_TMF:
+ vc.req = &v_req.tmf;
+ vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
+ vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
+ vc.lunp = &v_req.tmf.lun[0];
+ vc.target = &v_req.tmf.lun[1];
+ break;
+ case VIRTIO_SCSI_T_AN_QUERY:
+ case VIRTIO_SCSI_T_AN_SUBSCRIBE:
+ vc.req = &v_req.an;
+ vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
+ vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
+ vc.lunp = &v_req.an.lun[0];
+ vc.target = NULL;
+ break;
+ default:
+ vq_err(vq, "Unknown control request %d", v_req.type);
+ continue;
+ }
+
+ /*
+ * Validate the size of request and response buffers.
+ * Check for a sane response buffer so we can report
+ * early errors back to the guest.
+ */
+ ret = vhost_scsi_chk_size(vq, &vc);
+ if (ret)
+ goto err;
+
+ /*
+ * Get the rest of the request now that its size is known.
+ */
+ vc.req += typ_size;
+ vc.req_size -= typ_size;
+
+ ret = vhost_scsi_get_req(vq, &vc, NULL);
+ if (ret)
+ goto err;
+
+ if (v_req.type == VIRTIO_SCSI_T_TMF)
+ vhost_scsi_send_tmf_reject(vs, vq, &vc);
+ else
+ vhost_scsi_send_an_resp(vs, vq, &vc);
+err:
+ /*
+ * ENXIO: No more requests, or read error, wait for next kick
+ * EINVAL: Invalid response buffer, drop the request
+ * EIO: Respond with bad target
+ * EAGAIN: Pending request
+ */
+ if (ret == -ENXIO)
+ break;
+ else if (ret == -EIO)
+ vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
}
out:
mutex_unlock(&vq->mutex);
@@ -1050,7 +1277,12 @@ out:
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
pr_debug("%s: The handling func for control queue.\n", __func__);
+ vhost_scsi_ctl_handle_vq(vs, vq);
}
static void
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f52008bb8df7..3a5f81a66d34 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -30,6 +30,7 @@
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/interval_tree_generic.h>
+#include <linux/nospec.h>
#include "vhost.h"
@@ -1387,6 +1388,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
if (idx >= d->nvqs)
return -ENOBUFS;
+ idx = array_index_nospec(idx, d->nvqs);
vq = d->vqs[idx];
mutex_lock(&vq->mutex);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 591a13a59787..e413f54208f4 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2,6 +2,12 @@
# fbdev configuration
#
+config FB_CMDLINE
+ bool
+
+config FB_NOTIFY
+ bool
+
menuconfig FB
tristate "Support for frame buffer devices"
select FB_CMDLINE
@@ -41,7 +47,6 @@ menuconfig FB
config FIRMWARE_EDID
bool "Enable firmware EDID"
depends on FB
- default n
---help---
This enables access to the EDID transferred from the firmware.
On the i386, this is from the Video BIOS. Enable this if DDC/I2C
@@ -54,23 +59,15 @@ config FIRMWARE_EDID
combination with certain motherboards and monitors are known to
suffer from this problem.
-config FB_CMDLINE
- bool
-
-config FB_NOTIFY
- bool
-
config FB_DDC
tristate
depends on FB
select I2C_ALGOBIT
select I2C
- default n
config FB_BOOT_VESA_SUPPORT
bool
depends on FB
- default n
---help---
If true, at least one selected framebuffer driver can take advantage
of VESA video modes set at an early boot stage via the vga= parameter.
@@ -78,7 +75,6 @@ config FB_BOOT_VESA_SUPPORT
config FB_CFB_FILLRECT
tristate
depends on FB
- default n
---help---
Include the cfb_fillrect function for generic software rectangle
filling. This is used by drivers that don't provide their own
@@ -87,7 +83,6 @@ config FB_CFB_FILLRECT
config FB_CFB_COPYAREA
tristate
depends on FB
- default n
---help---
Include the cfb_copyarea function for generic software area copying.
This is used by drivers that don't provide their own (accelerated)
@@ -96,7 +91,6 @@ config FB_CFB_COPYAREA
config FB_CFB_IMAGEBLIT
tristate
depends on FB
- default n
---help---
Include the cfb_imageblit function for generic software image
blitting. This is used by drivers that don't provide their own
@@ -105,7 +99,6 @@ config FB_CFB_IMAGEBLIT
config FB_CFB_REV_PIXELS_IN_BYTE
bool
depends on FB
- default n
---help---
Allow generic frame-buffer functions to work on displays with 1, 2
and 4 bits per pixel depths which has opposite order of pixels in
@@ -114,7 +107,6 @@ config FB_CFB_REV_PIXELS_IN_BYTE
config FB_SYS_FILLRECT
tristate
depends on FB
- default n
---help---
Include the sys_fillrect function for generic software rectangle
filling. This is used by drivers that don't provide their own
@@ -123,7 +115,6 @@ config FB_SYS_FILLRECT
config FB_SYS_COPYAREA
tristate
depends on FB
- default n
---help---
Include the sys_copyarea function for generic software area copying.
This is used by drivers that don't provide their own (accelerated)
@@ -132,7 +123,6 @@ config FB_SYS_COPYAREA
config FB_SYS_IMAGEBLIT
tristate
depends on FB
- default n
---help---
Include the sys_imageblit function for generic software image
blitting. This is used by drivers that don't provide their own
@@ -141,7 +131,6 @@ config FB_SYS_IMAGEBLIT
config FB_PROVIDE_GET_FB_UNMAPPED_AREA
bool
depends on FB
- default n
---help---
Allow generic frame-buffer to provide get_fb_unmapped_area
function.
@@ -173,7 +162,6 @@ endchoice
config FB_SYS_FOPS
tristate
depends on FB
- default n
config FB_DEFERRED_IO
bool
@@ -187,7 +175,6 @@ config FB_HECUBA
config FB_SVGALIB
tristate
depends on FB
- default n
---help---
Common utility functions useful to fbdev drivers of VGA-based
cards.
@@ -195,19 +182,16 @@ config FB_SVGALIB
config FB_MACMODES
tristate
depends on FB
- default n
config FB_BACKLIGHT
bool
depends on FB
select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
- default n
config FB_MODE_HELPERS
bool "Enable Video Mode Handling Helpers"
depends on FB
- default n
---help---
This enables functions for handling video modes using the
Generalized Timing Formula and the EDID parser. A few drivers rely
@@ -218,7 +202,6 @@ config FB_MODE_HELPERS
config FB_TILEBLITTING
bool "Enable Tile Blitting Support"
depends on FB
- default n
---help---
This enables tile blitting. Tile blitting is a drawing technique
where the screen is divided into rectangular sections (tiles), whereas
@@ -329,16 +312,9 @@ config FB_ACORN
hardware found in Acorn RISC PCs and other ARM-based machines. If
unsure, say N.
-config FB_CLPS711X_OLD
- tristate
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
-
config FB_CLPS711X
tristate "CLPS711X LCD support"
depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
- select FB_CLPS711X_OLD if ARCH_CLPS711X && !ARCH_MULTIPLATFORM
select BACKLIGHT_LCD_SUPPORT
select FB_MODE_HELPERS
select FB_SYS_FILLRECT
@@ -936,7 +912,6 @@ config FB_NVIDIA_I2C
config FB_NVIDIA_DEBUG
bool "Lots of debug output"
depends on FB_NVIDIA
- default n
help
Say Y here if you want the nVidia driver to output all sorts
of debugging information to provide to the maintainer when
@@ -983,7 +958,6 @@ config FB_RIVA_I2C
config FB_RIVA_DEBUG
bool "Lots of debug output"
depends on FB_RIVA
- default n
help
Say Y here if you want the Riva driver to output all sorts
of debugging information to provide to the maintainer when
@@ -1266,7 +1240,6 @@ config FB_RADEON_BACKLIGHT
config FB_RADEON_DEBUG
bool "Lots of debug output from Radeon driver"
depends on FB_RADEON
- default n
help
Say Y here if you want the Radeon driver to output all sorts
of debugging information to provide to the maintainer when
@@ -1399,7 +1372,6 @@ config FB_SAVAGE_I2C
config FB_SAVAGE_ACCEL
bool "Enable Console Acceleration"
depends on FB_SAVAGE
- default n
help
This option will compile in console acceleration support. If
the resulting framebuffer console has bothersome glitches, then
@@ -1456,8 +1428,6 @@ if FB_VIA
config FB_VIA_DIRECT_PROCFS
bool "direct hardware access via procfs (DEPRECATED)(DANGEROUS)"
- depends on FB_VIA
- default n
help
Allow direct hardware access to some output registers via procfs.
This is dangerous but may provide the only chance to get the
@@ -1466,8 +1436,6 @@ config FB_VIA_DIRECT_PROCFS
config FB_VIA_X_COMPATIBILITY
bool "X server compatibility"
- depends on FB_VIA
- default n
help
This option reduces the functionality (power saving, ...) of the
framebuffer to avoid negative impact on the OpenChrome X server.
@@ -1692,7 +1660,6 @@ config FB_WM8505
config FB_WMT_GE_ROPS
bool "VT8500/WM8xxx accelerated raster ops support"
depends on (FB = y) && (FB_VT8500 || FB_WM8505)
- default n
help
This adds support for accelerated raster operations on the
VIA VT8500 and Wondermedia 85xx series SoCs.
@@ -1802,17 +1769,14 @@ config FB_PXA
config FB_PXA_OVERLAY
bool "Support PXA27x/PXA3xx Overlay(s) as framebuffer"
- default n
depends on FB_PXA && (PXA27x || PXA3xx)
config FB_PXA_SMARTPANEL
bool "PXA Smartpanel LCD support"
- default n
depends on FB_PXA
config FB_PXA_PARAMETERS
bool "PXA LCD command line parameters"
- default n
depends on FB_PXA
---help---
Enable the use of kernel command line or module parameters
@@ -1850,7 +1814,6 @@ config FB_MBX
config FB_MBX_DEBUG
bool "Enable debugging info via debugfs"
depends on FB_MBX && DEBUG_FS
- default n
---help---
Enable this if you want debugging information using the debug
filesystem (debugfs)
@@ -2240,7 +2203,7 @@ config FB_MX3
config FB_BROADSHEET
tristate "E-Ink Broadsheet/Epson S1D13521 controller support"
- depends on FB
+ depends on FB && (ARCH_PXA || COMPILE_TEST)
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@@ -2308,10 +2271,6 @@ config FB_SIMPLE
Configuration re: surface address, size, and format must be provided
through device tree, or plain old platform data.
-source "drivers/video/fbdev/omap/Kconfig"
-source "drivers/video/fbdev/omap2/Kconfig"
-source "drivers/video/fbdev/mmp/Kconfig"
-
config FB_SSD1307
tristate "Solomon SSD1307 framebuffer support"
depends on FB && I2C
@@ -2341,3 +2300,7 @@ config FB_SM712
This driver is also available as a module. The module will be
called sm712fb. If you want to compile it as a module, say M
here and read <file:Documentation/kbuild/modules.txt>.
+
+source "drivers/video/fbdev/omap/Kconfig"
+source "drivers/video/fbdev/omap2/Kconfig"
+source "drivers/video/fbdev/mmp/Kconfig"
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 13c900320c2c..846b0c9ea9db 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o
obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o
obj-$(CONFIG_FB_ARC) += arcfb.o
obj-$(CONFIG_FB_CLPS711X) += clps711x-fb.o
-obj-$(CONFIG_FB_CLPS711X_OLD) += clps711xfb.o
obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o
obj-$(CONFIG_FB_GRVGA) += grvga.o
obj-$(CONFIG_FB_PM2) += pm2fb.o
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index 7e87d0d61658..a48741aab240 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -419,6 +419,8 @@ static int arcfb_ioctl(struct fb_info *info,
schedule();
finish_wait(&arcfb_waitq, &wait);
}
+ /* fall through */
+
case FBIO_GETCONTROL2:
{
unsigned char ctl2;
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 076d24afbd72..4ed55e6bbb84 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <video/of_videomode.h>
#include <video/of_display_timing.h>
#include <linux/regulator/consumer.h>
#include <video/videomode.h>
@@ -1028,11 +1029,11 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
struct device *dev = &sinfo->pdev->dev;
struct device_node *np =dev->of_node;
struct device_node *display_np;
- struct device_node *timings_np;
- struct display_timings *timings;
struct atmel_lcdfb_power_ctrl_gpio *og;
bool is_gpio_power = false;
+ struct fb_videomode fb_vm;
struct gpio_desc *gpiod;
+ struct videomode vm;
int ret = -ENOENT;
int i;
@@ -1105,44 +1106,18 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
pdata->lcdcon_is_backlight = of_property_read_bool(display_np, "atmel,lcdcon-backlight");
pdata->lcdcon_pol_negative = of_property_read_bool(display_np, "atmel,lcdcon-backlight-inverted");
- timings = of_get_display_timings(display_np);
- if (!timings) {
- dev_err(dev, "failed to get display timings\n");
- ret = -EINVAL;
+ ret = of_get_videomode(display_np, &vm, OF_USE_NATIVE_MODE);
+ if (ret) {
+ dev_err(dev, "failed to get videomode from DT\n");
goto put_display_node;
}
- timings_np = of_get_child_by_name(display_np, "display-timings");
- if (!timings_np) {
- dev_err(dev, "failed to find display-timings node\n");
- ret = -ENODEV;
+ ret = fb_videomode_from_videomode(&vm, &fb_vm);
+ if (ret < 0)
goto put_display_node;
- }
- for (i = 0; i < of_get_child_count(timings_np); i++) {
- struct videomode vm;
- struct fb_videomode fb_vm;
-
- ret = videomode_from_timings(timings, &vm, i);
- if (ret < 0)
- goto put_timings_node;
- ret = fb_videomode_from_videomode(&vm, &fb_vm);
- if (ret < 0)
- goto put_timings_node;
-
- fb_add_videomode(&fb_vm, &info->modelist);
- }
-
- /*
- * FIXME: Make sure we are not referencing any fields in display_np
- * and timings_np and drop our references to them before returning to
- * avoid leaking the nodes on probe deferral and driver unbind.
- */
-
- return 0;
+ fb_add_videomode(&fb_vm, &info->modelist);
-put_timings_node:
- of_node_put(timings_np);
put_display_node:
of_node_put(display_np);
return ret;
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index d09bab3bf224..e5a347c58180 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -147,6 +147,7 @@ struct atyfb_par {
u16 pci_id;
u32 accel_flags;
int blitter_may_be_busy;
+ unsigned fifo_space;
int asleep;
int lock_blank;
unsigned long res_start;
@@ -346,10 +347,13 @@ extern int aty_init_cursor(struct fb_info *info);
* Hardware acceleration
*/
-static inline void wait_for_fifo(u16 entries, const struct atyfb_par *par)
+static inline void wait_for_fifo(u16 entries, struct atyfb_par *par)
{
- while ((aty_ld_le32(FIFO_STAT, par) & 0xffff) >
- ((u32) (0x8000 >> entries)));
+ unsigned fifo_space = par->fifo_space;
+ while (entries > fifo_space) {
+ fifo_space = 16 - fls(aty_ld_le32(FIFO_STAT, par) & 0xffff);
+ }
+ par->fifo_space = fifo_space - entries;
}
static inline void wait_for_idle(struct atyfb_par *par)
@@ -359,7 +363,7 @@ static inline void wait_for_idle(struct atyfb_par *par)
par->blitter_may_be_busy = 0;
}
-extern void aty_reset_engine(const struct atyfb_par *par);
+extern void aty_reset_engine(struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 05111e90f168..b6fe103df145 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -1480,24 +1480,28 @@ static int atyfb_set_par(struct fb_info *info)
base = 0x2000;
printk("debug atyfb: Mach64 non-shadow register values:");
for (i = 0; i < 256; i = i+4) {
- if (i % 16 == 0)
- printk("\ndebug atyfb: 0x%04X: ", base + i);
- printk(" %08X", aty_ld_le32(i, par));
+ if (i % 16 == 0) {
+ pr_cont("\n");
+ printk("debug atyfb: 0x%04X: ", base + i);
+ }
+ pr_cont(" %08X", aty_ld_le32(i, par));
}
- printk("\n\n");
+ pr_cont("\n\n");
#ifdef CONFIG_FB_ATY_CT
/* PLL registers */
base = 0x00;
printk("debug atyfb: Mach64 PLL register values:");
for (i = 0; i < 64; i++) {
- if (i % 16 == 0)
- printk("\ndebug atyfb: 0x%02X: ", base + i);
+ if (i % 16 == 0) {
+ pr_cont("\n");
+ printk("debug atyfb: 0x%02X: ", base + i);
+ }
if (i % 4 == 0)
- printk(" ");
- printk("%02X", aty_ld_pll_ct(i, par));
+ pr_cont(" ");
+ pr_cont("%02X", aty_ld_pll_ct(i, par));
}
- printk("\n\n");
+ pr_cont("\n\n");
#endif /* CONFIG_FB_ATY_CT */
#ifdef CONFIG_FB_ATY_GENERIC_LCD
@@ -1509,19 +1513,19 @@ static int atyfb_set_par(struct fb_info *info)
for (i = 0; i <= POWER_MANAGEMENT; i++) {
if (i == EXT_VERT_STRETCH)
continue;
- printk("\ndebug atyfb: 0x%04X: ",
+ pr_cont("\ndebug atyfb: 0x%04X: ",
lt_lcd_regs[i]);
- printk(" %08X", aty_ld_lcd(i, par));
+ pr_cont(" %08X", aty_ld_lcd(i, par));
}
} else {
for (i = 0; i < 64; i++) {
if (i % 4 == 0)
- printk("\ndebug atyfb: 0x%02X: ",
+ pr_cont("\ndebug atyfb: 0x%02X: ",
base + i);
- printk(" %08X", aty_ld_lcd(i, par));
+ pr_cont(" %08X", aty_ld_lcd(i, par));
}
}
- printk("\n\n");
+ pr_cont("\n\n");
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
@@ -2597,8 +2601,8 @@ static int aty_init(struct fb_info *info)
aty_ld_le32(DSP_ON_OFF, par),
aty_ld_le32(CLOCK_CNTL, par));
for (i = 0; i < 40; i++)
- printk(" %02x", aty_ld_pll_ct(i, par));
- printk("\n");
+ pr_cont(" %02x", aty_ld_pll_ct(i, par));
+ pr_cont("\n");
}
#endif
if (par->pll_ops->init_pll)
diff --git a/drivers/video/fbdev/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c
index 2541a0e0de76..e4b2c89baee2 100644
--- a/drivers/video/fbdev/aty/mach64_accel.c
+++ b/drivers/video/fbdev/aty/mach64_accel.c
@@ -37,7 +37,7 @@ static u32 rotation24bpp(u32 dx, u32 direction)
return ((rotation << 8) | DST_24_ROTATION_ENABLE);
}
-void aty_reset_engine(const struct atyfb_par *par)
+void aty_reset_engine(struct atyfb_par *par)
{
/* reset engine */
aty_st_le32(GEN_TEST_CNTL,
@@ -50,6 +50,8 @@ void aty_reset_engine(const struct atyfb_par *par)
/* HOST errors */
aty_st_le32(BUS_CNTL,
aty_ld_le32(BUS_CNTL, par) | BUS_HOST_ERR_ACK | BUS_FIFO_ERR_ACK, par);
+
+ par->fifo_space = 0;
}
static void reset_GTC_3D_engine(const struct atyfb_par *par)
@@ -127,7 +129,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
/* set host attributes */
wait_for_fifo(13, par);
- aty_st_le32(HOST_CNTL, 0, par);
+ aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
/* set pattern attributes */
aty_st_le32(PAT_REG0, 0, par);
@@ -233,7 +235,8 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
rotation = rotation24bpp(dx, direction);
}
- wait_for_fifo(4, par);
+ wait_for_fifo(5, par);
+ aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par);
aty_st_le32(SRC_Y_X, (sx << 16) | sy, par);
aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par);
@@ -269,7 +272,8 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT);
}
- wait_for_fifo(3, par);
+ wait_for_fifo(4, par);
+ aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
aty_st_le32(DP_FRGD_CLR, color, par);
aty_st_le32(DP_SRC,
BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE,
@@ -284,7 +288,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width;
- u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix;
+ u32 pix_width, rotation = 0, src, mix;
if (par->asleep)
return;
@@ -296,8 +300,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
- pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par);
- host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN;
+ pix_width = par->crtc.dp_pix_width;
switch (image->depth) {
case 1:
@@ -345,7 +348,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
* since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit
* this hwaccelerated triple has an issue with not aligned data
*/
- if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
+ if (image->depth == 1 && M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
pix_width |= DP_HOST_TRIPLE_EN;
}
@@ -370,19 +373,18 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D;
}
- wait_for_fifo(6, par);
- aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par);
+ wait_for_fifo(5, par);
aty_st_le32(DP_PIX_WIDTH, pix_width, par);
aty_st_le32(DP_MIX, mix, par);
aty_st_le32(DP_SRC, src, par);
- aty_st_le32(HOST_CNTL, host_cntl, par);
+ aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par);
draw_rect(dx, dy, width, image->height, par);
src_bytes = (((image->width * image->depth) + 7) / 8) * image->height;
/* manual triple each pixel */
- if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
+ if (image->depth == 1 && info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
int inbit, outbit, mult24, byte_id_in_dword, width;
u8 *pbitmapin = (u8*)image->data, *pbitmapout;
u32 hostdword;
@@ -415,7 +417,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
}
}
wait_for_fifo(1, par);
- aty_st_le32(HOST_DATA0, hostdword, par);
+ aty_st_le32(HOST_DATA0, le32_to_cpu(hostdword), par);
}
} else {
u32 *pbitmap, dwords = (src_bytes + 3) / 4;
@@ -424,8 +426,4 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par);
}
}
-
- /* restore pix_width */
- wait_for_fifo(1, par);
- aty_st_le32(DP_PIX_WIDTH, pix_width_save, par);
}
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index 8de88b129b62..9af54c2368fd 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -355,9 +355,7 @@ static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
static void cg14_init_fix(struct fb_info *info, int linebytes,
struct device_node *dp)
{
- const char *name = dp->name;
-
- strlcpy(info->fix.id, name, sizeof(info->fix.id));
+ snprintf(info->fix.id, sizeof(info->fix.id), "%pOFn", dp);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index 6c334260cf53..1bd95b02f3aa 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -246,7 +246,7 @@ static int cg3_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
static void cg3_init_fix(struct fb_info *info, int linebytes,
struct device_node *dp)
{
- strlcpy(info->fix.id, dp->name, sizeof(info->fix.id));
+ snprintf(info->fix.id, sizeof(info->fix.id), "%pOFn", dp);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/clps711xfb.c b/drivers/video/fbdev/clps711xfb.c
deleted file mode 100644
index 7693aea8fb23..000000000000
--- a/drivers/video/fbdev/clps711xfb.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * linux/drivers/video/clps711xfb.c
- *
- * Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Framebuffer driver for the CLPS7111 and EP7212 processors.
- */
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <linux/uaccess.h>
-
-struct fb_info *cfb;
-
-#define CMAP_MAX_SIZE 16
-
-/*
- * LCD AC Prescale. This comes from the LCD panel manufacturers specifications.
- * This determines how many clocks + 1 of CL1 before the M signal toggles.
- * The number of lines on the display must not be divisible by this number.
- */
-static unsigned int lcd_ac_prescale = 13;
-
-/*
- * Set a single color register. Return != 0 for invalid regno.
- */
-static int
-clps7111fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info)
-{
- unsigned int level, mask, shift, pal;
-
- if (regno >= (1 << info->var.bits_per_pixel))
- return 1;
-
- /* gray = 0.30*R + 0.58*G + 0.11*B */
- level = (red * 77 + green * 151 + blue * 28) >> 20;
-
- /*
- * On an LCD, a high value is dark, while a low value is light.
- * So we invert the level.
- *
- * This isn't true on all machines, so we only do it on EDB7211.
- * --rmk
- */
- if (machine_is_edb7211()) {
- level = 15 - level;
- }
-
- shift = 4 * (regno & 7);
- level <<= shift;
- mask = 15 << shift;
- level &= mask;
-
- regno = regno < 8 ? PALLSW : PALMSW;
-
- pal = clps_readl(regno);
- pal = (pal & ~mask) | level;
- clps_writel(pal, regno);
-
- return 0;
-}
-
-/*
- * Validate the purposed mode.
- */
-static int
-clps7111fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- var->transp.msb_right = 0;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->red.msb_right = 0;
- var->red.offset = 0;
- var->red.length = var->bits_per_pixel;
- var->green = var->red;
- var->blue = var->red;
-
- if (var->bits_per_pixel > 4)
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * Set the hardware state.
- */
-static int
-clps7111fb_set_par(struct fb_info *info)
-{
- unsigned int lcdcon, syscon, pixclock;
-
- switch (info->var.bits_per_pixel) {
- case 1:
- info->fix.visual = FB_VISUAL_MONO01;
- break;
- case 2:
- info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- break;
- case 4:
- info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- break;
- }
-
- info->fix.line_length = info->var.xres_virtual * info->var.bits_per_pixel / 8;
-
- lcdcon = (info->var.xres_virtual * info->var.yres_virtual * info->var.bits_per_pixel) / 128 - 1;
- lcdcon |= ((info->var.xres_virtual / 16) - 1) << 13;
- lcdcon |= lcd_ac_prescale << 25;
-
- /*
- * Calculate pixel prescale value from the pixclock. This is:
- * 36.864MHz / pixclock_mhz - 1.
- * However, pixclock is in picoseconds, so this ends up being:
- * 36864000 * pixclock_ps / 10^12 - 1
- * and this will overflow the 32-bit math. We perform this as
- * (9 * 4096000 == 36864000):
- * pixclock_ps * 9 * (4096000 / 10^12) - 1
- */
- pixclock = 9 * info->var.pixclock / 244140 - 1;
- lcdcon |= pixclock << 19;
-
- if (info->var.bits_per_pixel == 4)
- lcdcon |= LCDCON_GSMD;
- if (info->var.bits_per_pixel >= 2)
- lcdcon |= LCDCON_GSEN;
-
- /*
- * LCDCON must only be changed while the LCD is disabled
- */
- syscon = clps_readl(SYSCON1);
- clps_writel(syscon & ~SYSCON1_LCDEN, SYSCON1);
- clps_writel(lcdcon, LCDCON);
- clps_writel(syscon | SYSCON1_LCDEN, SYSCON1);
- return 0;
-}
-
-static int clps7111fb_blank(int blank, struct fb_info *info)
-{
- /* Enable/Disable LCD controller. */
- if (blank)
- clps_writel(clps_readl(SYSCON1) & ~SYSCON1_LCDEN, SYSCON1);
- else
- clps_writel(clps_readl(SYSCON1) | SYSCON1_LCDEN, SYSCON1);
-
- return 0;
-}
-
-static struct fb_ops clps7111fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = clps7111fb_check_var,
- .fb_set_par = clps7111fb_set_par,
- .fb_setcolreg = clps7111fb_setcolreg,
- .fb_blank = clps7111fb_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-};
-
-static void clps711x_guess_lcd_params(struct fb_info *info)
-{
- unsigned int lcdcon, syscon, size;
- unsigned long phys_base = PAGE_OFFSET;
- void *virt_base = (void *)PAGE_OFFSET;
-
- info->var.xres_virtual = 640;
- info->var.yres_virtual = 240;
- info->var.bits_per_pixel = 4;
- info->var.activate = FB_ACTIVATE_NOW;
- info->var.height = -1;
- info->var.width = -1;
- info->var.pixclock = 93006; /* 10.752MHz pixel clock */
-
- /*
- * If the LCD controller is already running, decode the values
- * in LCDCON to xres/yres/bpp/pixclock/acprescale
- */
- syscon = clps_readl(SYSCON1);
- if (syscon & SYSCON1_LCDEN) {
- lcdcon = clps_readl(LCDCON);
-
- /*
- * Decode GSMD and GSEN bits to bits per pixel
- */
- switch (lcdcon & (LCDCON_GSMD | LCDCON_GSEN)) {
- case LCDCON_GSMD | LCDCON_GSEN:
- info->var.bits_per_pixel = 4;
- break;
-
- case LCDCON_GSEN:
- info->var.bits_per_pixel = 2;
- break;
-
- default:
- info->var.bits_per_pixel = 1;
- break;
- }
-
- /*
- * Decode xres/yres
- */
- info->var.xres_virtual = (((lcdcon >> 13) & 0x3f) + 1) * 16;
- info->var.yres_virtual = (((lcdcon & 0x1fff) + 1) * 128) /
- (info->var.xres_virtual *
- info->var.bits_per_pixel);
-
- /*
- * Calculate pixclock
- */
- info->var.pixclock = (((lcdcon >> 19) & 0x3f) + 1) * 244140 / 9;
-
- /*
- * Grab AC prescale
- */
- lcd_ac_prescale = (lcdcon >> 25) & 0x1f;
- }
-
- info->var.xres = info->var.xres_virtual;
- info->var.yres = info->var.yres_virtual;
- info->var.grayscale = info->var.bits_per_pixel > 1;
-
- size = info->var.xres * info->var.yres * info->var.bits_per_pixel / 8;
-
- /*
- * Might be worth checking to see if we can use the on-board
- * RAM if size here...
- * CLPS7110 - no on-board SRAM
- * EP7212 - 38400 bytes
- */
- if (size <= 38400) {
- printk(KERN_INFO "CLPS711xFB: could use on-board SRAM?\n");
- }
-
- if ((syscon & SYSCON1_LCDEN) == 0) {
- /*
- * The display isn't running. Ensure that
- * the display memory is empty.
- */
- memset(virt_base, 0, size);
- }
-
- info->screen_base = virt_base;
- info->fix.smem_start = phys_base;
- info->fix.smem_len = PAGE_ALIGN(size);
- info->fix.type = FB_TYPE_PACKED_PIXELS;
-}
-
-static int clps711x_fb_probe(struct platform_device *pdev)
-{
- int err = -ENOMEM;
-
- if (fb_get_options("clps711xfb", NULL))
- return -ENODEV;
-
- cfb = kzalloc(sizeof(*cfb), GFP_KERNEL);
- if (!cfb)
- goto out;
-
- strcpy(cfb->fix.id, "clps711x");
-
- cfb->fbops = &clps7111fb_ops;
- cfb->flags = FBINFO_DEFAULT;
-
- clps711x_guess_lcd_params(cfb);
-
- fb_alloc_cmap(&cfb->cmap, CMAP_MAX_SIZE, 0);
-
- err = register_framebuffer(cfb);
-
-out: return err;
-}
-
-static int clps711x_fb_remove(struct platform_device *pdev)
-{
- unregister_framebuffer(cfb);
- kfree(cfb);
-
- return 0;
-}
-
-static struct platform_driver clps711x_fb_driver = {
- .driver = {
- .name = "video-clps711x",
- },
- .probe = clps711x_fb_probe,
- .remove = clps711x_fb_remove,
-};
-module_platform_driver(clps711x_fb_driver);
-
-MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
-MODULE_DESCRIPTION("CLPS711X framebuffer driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 852d86c1c527..dd3128990776 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1480,8 +1480,8 @@ int of_get_fb_videomode(struct device_node *np, struct fb_videomode *fb,
if (ret)
return ret;
- pr_debug("%pOF: got %dx%d display mode from %s\n",
- np, vm.hactive, vm.vactive, np->name);
+ pr_debug("%pOF: got %dx%d display mode\n",
+ np, vm.hactive, vm.vactive);
dump_fb_videomode(fb);
return 0;
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index ecdcf358ad5e..901ca4ed10e9 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1473,7 +1473,7 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dp = pci_device_to_OF_node(pdev);
if(dp)
- printk(KERN_INFO "%s: OF name %s\n",__func__, dp->name);
+ printk(KERN_INFO "%s: OF name %pOFn\n",__func__, dp);
else if (IS_ENABLED(CONFIG_OF))
printk(KERN_ERR "imsttfb: no OF node for pci device\n");
diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c
index 71862188f528..446ac3364bad 100644
--- a/drivers/video/fbdev/leo.c
+++ b/drivers/video/fbdev/leo.c
@@ -434,7 +434,7 @@ static int leo_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
static void
leo_init_fix(struct fb_info *info, struct device_node *dp)
{
- strlcpy(info->fix.id, dp->name, sizeof(info->fix.id));
+ snprintf(info->fix.id, sizeof(info->fix.id), "%pOFn", dp);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/fbdev/mmp/hw/Kconfig b/drivers/video/fbdev/mmp/hw/Kconfig
index c735d133895c..fcb711143fb2 100644
--- a/drivers/video/fbdev/mmp/hw/Kconfig
+++ b/drivers/video/fbdev/mmp/hw/Kconfig
@@ -3,7 +3,6 @@ if MMP_DISP
config MMP_DISP_CONTROLLER
bool "mmp display controller hw support"
depends on CPU_PXA910 || CPU_MMP2
- default n
help
Marvell MMP display hw controller support
this controller is used on Marvell PXA910 and
diff --git a/drivers/video/fbdev/mmp/panel/Kconfig b/drivers/video/fbdev/mmp/panel/Kconfig
index 808890f7064b..f58558795f39 100644
--- a/drivers/video/fbdev/mmp/panel/Kconfig
+++ b/drivers/video/fbdev/mmp/panel/Kconfig
@@ -2,6 +2,5 @@
config MMP_PANEL_TPOHVGA
bool "tpohvga panel TJ032MD01BW support"
depends on SPI_MASTER
- default n
help
tpohvga panel support
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 77c0a2f45b3b..31f769d67195 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -419,9 +419,13 @@ static void __init offb_init_fb(const char *name,
var = &info->var;
info->par = par;
- strcpy(fix->id, "OFfb ");
- strncat(fix->id, name, sizeof(fix->id) - sizeof("OFfb "));
- fix->id[sizeof(fix->id) - 1] = '\0';
+ if (name) {
+ strcpy(fix->id, "OFfb ");
+ strncat(fix->id, name, sizeof(fix->id) - sizeof("OFfb "));
+ fix->id[sizeof(fix->id) - 1] = '\0';
+ } else
+ snprintf(fix->id, sizeof(fix->id), "OFfb %pOFn", dp);
+
var->xres = var->xres_virtual = width;
var->yres = var->yres_virtual = height;
@@ -644,7 +648,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
/* kludge for valkyrie */
if (strcmp(dp->name, "valkyrie") == 0)
address += 0x1000;
- offb_init_fb(no_real_node ? "bootx" : dp->name,
+ offb_init_fb(no_real_node ? "bootx" : NULL,
width, height, depth, pitch, address,
foreign_endian, no_real_node ? NULL : dp);
}
diff --git a/drivers/video/fbdev/omap/lcd_ams_delta.c b/drivers/video/fbdev/omap/lcd_ams_delta.c
index e8c748a0dfe2..cddbd00cbf9f 100644
--- a/drivers/video/fbdev/omap/lcd_ams_delta.c
+++ b/drivers/video/fbdev/omap/lcd_ams_delta.c
@@ -24,11 +24,10 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/lcd.h>
-#include <linux/gpio.h>
#include <mach/hardware.h>
-#include <mach/board-ams-delta.h>
#include "omapfb.h"
@@ -41,6 +40,8 @@
/* LCD class device section */
static int ams_delta_lcd;
+static struct gpio_desc *gpiod_vblen;
+static struct gpio_desc *gpiod_ndisp;
static int ams_delta_lcd_set_power(struct lcd_device *dev, int power)
{
@@ -99,41 +100,17 @@ static struct lcd_ops ams_delta_lcd_ops = {
/* omapfb panel section */
-static const struct gpio _gpios[] = {
- {
- .gpio = AMS_DELTA_GPIO_PIN_LCD_VBLEN,
- .flags = GPIOF_OUT_INIT_LOW,
- .label = "lcd_vblen",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_LCD_NDISP,
- .flags = GPIOF_OUT_INIT_LOW,
- .label = "lcd_ndisp",
- },
-};
-
-static int ams_delta_panel_init(struct lcd_panel *panel,
- struct omapfb_device *fbdev)
-{
- return gpio_request_array(_gpios, ARRAY_SIZE(_gpios));
-}
-
-static void ams_delta_panel_cleanup(struct lcd_panel *panel)
-{
- gpio_free_array(_gpios, ARRAY_SIZE(_gpios));
-}
-
static int ams_delta_panel_enable(struct lcd_panel *panel)
{
- gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_NDISP, 1);
- gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_VBLEN, 1);
+ gpiod_set_value(gpiod_ndisp, 1);
+ gpiod_set_value(gpiod_vblen, 1);
return 0;
}
static void ams_delta_panel_disable(struct lcd_panel *panel)
{
- gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_VBLEN, 0);
- gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_NDISP, 0);
+ gpiod_set_value(gpiod_vblen, 0);
+ gpiod_set_value(gpiod_ndisp, 0);
}
static struct lcd_panel ams_delta_panel = {
@@ -154,8 +131,6 @@ static struct lcd_panel ams_delta_panel = {
.pcd = 0,
.acb = 37,
- .init = ams_delta_panel_init,
- .cleanup = ams_delta_panel_cleanup,
.enable = ams_delta_panel_enable,
.disable = ams_delta_panel_disable,
};
@@ -166,9 +141,23 @@ static struct lcd_panel ams_delta_panel = {
static int ams_delta_panel_probe(struct platform_device *pdev)
{
struct lcd_device *lcd_device = NULL;
-#ifdef CONFIG_LCD_CLASS_DEVICE
int ret;
+ gpiod_vblen = devm_gpiod_get(&pdev->dev, "vblen", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_vblen)) {
+ ret = PTR_ERR(gpiod_vblen);
+ dev_err(&pdev->dev, "VBLEN GPIO request failed (%d)\n", ret);
+ return ret;
+ }
+
+ gpiod_ndisp = devm_gpiod_get(&pdev->dev, "ndisp", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod_ndisp)) {
+ ret = PTR_ERR(gpiod_ndisp);
+ dev_err(&pdev->dev, "NDISP GPIO request failed (%d)\n", ret);
+ return ret;
+ }
+
+#ifdef CONFIG_LCD_CLASS_DEVICE
lcd_device = lcd_device_register("omapfb", &pdev->dev, NULL,
&ams_delta_lcd_ops);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
index 6d0bb27e4f85..356b89b378d4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
@@ -10,7 +10,6 @@ config FB_OMAP2_DSS
config FB_OMAP2_DSS_DEBUG
bool "Debug support"
- default n
help
This enables printing of debug messages. Alternatively, debug messages
can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
@@ -19,7 +18,6 @@ config FB_OMAP2_DSS_DEBUG
config FB_OMAP2_DSS_DEBUGFS
bool "Debugfs filesystem support"
depends on DEBUG_FS
- default n
help
This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
querying about clock configuration and register configuration of dss,
@@ -28,7 +26,6 @@ config FB_OMAP2_DSS_DEBUGFS
config FB_OMAP2_DSS_COLLECT_IRQ_STATS
bool "Collect DSS IRQ statistics"
depends on FB_OMAP2_DSS_DEBUGFS
- default n
help
Collect DSS IRQ statistics, printable via debugfs.
@@ -45,7 +42,6 @@ config FB_OMAP2_DSS_DPI
config FB_OMAP2_DSS_RFBI
bool "RFBI support"
depends on BROKEN
- default n
help
MIPI DBI support (RFBI, Remote Framebuffer Interface, in Texas
Instrument's terminology).
@@ -73,7 +69,6 @@ config FB_OMAP4_DSS_HDMI
config FB_OMAP5_DSS_HDMI
bool "HDMI support for OMAP5"
- default n
select FB_OMAP2_DSS_HDMI_COMMON
help
HDMI Interface for OMAP5 and similar cores. This adds the High
@@ -82,7 +77,6 @@ config FB_OMAP5_DSS_HDMI
config FB_OMAP2_DSS_SDI
bool "SDI support"
- default n
help
SDI (Serial Display Interface) support.
@@ -91,7 +85,6 @@ config FB_OMAP2_DSS_SDI
config FB_OMAP2_DSS_DSI
bool "DSI support"
- default n
help
MIPI DSI (Display Serial Interface) support.
diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c
index 64de5cda541d..c4283e9e95af 100644
--- a/drivers/video/fbdev/p9100.c
+++ b/drivers/video/fbdev/p9100.c
@@ -239,7 +239,7 @@ static int p9100_ioctl(struct fb_info *info, unsigned int cmd,
static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_node *dp)
{
- strlcpy(info->fix.id, dp->name, sizeof(info->fix.id));
+ snprintf(info->fix.id, sizeof(info->fix.id), "%pOFn", dp);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index d059d04c63ac..e31340fad3c7 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -405,9 +405,6 @@ static int pxa168fb_set_par(struct fb_info *info)
struct fb_var_screeninfo *var = &info->var;
struct fb_videomode mode;
u32 x;
- struct pxa168fb_mach_info *mi;
-
- mi = dev_get_platdata(fbi->dev);
/*
* Set additional mode info.
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index a436d44f1b7f..01a7110e61a7 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -106,11 +106,11 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
struct fbtype __user *f = (struct fbtype __user *) arg;
if (put_user(type, &f->fb_type) ||
- __put_user(info->var.yres, &f->fb_height) ||
- __put_user(info->var.xres, &f->fb_width) ||
- __put_user(fb_depth, &f->fb_depth) ||
- __put_user(0, &f->fb_cmsize) ||
- __put_user(fb_size, &f->fb_cmsize))
+ put_user(info->var.yres, &f->fb_height) ||
+ put_user(info->var.xres, &f->fb_width) ||
+ put_user(fb_depth, &f->fb_depth) ||
+ put_user(0, &f->fb_cmsize) ||
+ put_user(fb_size, &f->fb_cmsize))
return -EFAULT;
return 0;
}
@@ -125,10 +125,10 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
unsigned int index, count, i;
if (get_user(index, &c->index) ||
- __get_user(count, &c->count) ||
- __get_user(ured, &c->red) ||
- __get_user(ugreen, &c->green) ||
- __get_user(ublue, &c->blue))
+ get_user(count, &c->count) ||
+ get_user(ured, &c->red) ||
+ get_user(ugreen, &c->green) ||
+ get_user(ublue, &c->blue))
return -EFAULT;
cmap.len = 1;
@@ -165,13 +165,13 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
u8 red, green, blue;
if (get_user(index, &c->index) ||
- __get_user(count, &c->count) ||
- __get_user(ured, &c->red) ||
- __get_user(ugreen, &c->green) ||
- __get_user(ublue, &c->blue))
+ get_user(count, &c->count) ||
+ get_user(ured, &c->red) ||
+ get_user(ugreen, &c->green) ||
+ get_user(ublue, &c->blue))
return -EFAULT;
- if (index + count > cmap->len)
+ if (index > cmap->len || count > cmap->len - index)
return -EINVAL;
for (i = 0; i < count; i++) {
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index 27a2b72e50e8..a8fb41f1a258 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -848,9 +848,7 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime)
SiS_DDC2Delay(SiS_Pr, 0x4000);
}
- } else if((SiS_Pr->SiS_IF_DEF_LVDS == 1) /* ||
- (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) ||
- (SiS_Pr->SiS_CustomT == CUT_CLEVO1400) */ ) { /* 315 series, LVDS; Special */
+ } else if (SiS_Pr->SiS_IF_DEF_LVDS == 1) { /* 315 series, LVDS; Special */
if(SiS_Pr->SiS_IF_DEF_CH70xx == 0) {
PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36);
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 6439231f2db2..4061a20cfe24 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -667,9 +667,9 @@ static int ssd1307fb_probe(struct i2c_client *client,
if (par->reset) {
/* Reset the screen */
- gpiod_set_value(par->reset, 0);
+ gpiod_set_value_cansleep(par->reset, 0);
udelay(4);
- gpiod_set_value(par->reset, 1);
+ gpiod_set_value_cansleep(par->reset, 1);
udelay(4);
}
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index afbd6101c78e..070026a7e55a 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -916,8 +916,6 @@ static int dlfb_ops_open(struct fb_info *info, int user)
dlfb->fb_count++;
- kref_get(&dlfb->kref);
-
if (fb_defio && (info->fbdefio == NULL)) {
/* enable defio at last moment if not disabled by client */
@@ -940,14 +938,17 @@ static int dlfb_ops_open(struct fb_info *info, int user)
return 0;
}
-/*
- * Called when all client interfaces to start transactions have been disabled,
- * and all references to our device instance (dlfb_data) are released.
- * Every transaction must have a reference, so we know are fully spun down
- */
-static void dlfb_free(struct kref *kref)
+static void dlfb_ops_destroy(struct fb_info *info)
{
- struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref);
+ struct dlfb_data *dlfb = info->par;
+
+ if (info->cmap.len != 0)
+ fb_dealloc_cmap(&info->cmap);
+ if (info->monspecs.modedb)
+ fb_destroy_modedb(info->monspecs.modedb);
+ vfree(info->screen_base);
+
+ fb_destroy_modelist(&info->modelist);
while (!list_empty(&dlfb->deferred_free)) {
struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list);
@@ -957,40 +958,13 @@ static void dlfb_free(struct kref *kref)
}
vfree(dlfb->backing_buffer);
kfree(dlfb->edid);
+ usb_put_dev(dlfb->udev);
kfree(dlfb);
-}
-
-static void dlfb_free_framebuffer(struct dlfb_data *dlfb)
-{
- struct fb_info *info = dlfb->info;
-
- if (info) {
- unregister_framebuffer(info);
-
- if (info->cmap.len != 0)
- fb_dealloc_cmap(&info->cmap);
- if (info->monspecs.modedb)
- fb_destroy_modedb(info->monspecs.modedb);
- vfree(info->screen_base);
-
- fb_destroy_modelist(&info->modelist);
-
- dlfb->info = NULL;
-
- /* Assume info structure is freed after this point */
- framebuffer_release(info);
- }
- /* ref taken in probe() as part of registering framebfufer */
- kref_put(&dlfb->kref, dlfb_free);
+ /* Assume info structure is freed after this point */
+ framebuffer_release(info);
}
-static void dlfb_free_framebuffer_work(struct work_struct *work)
-{
- struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
- free_framebuffer_work.work);
- dlfb_free_framebuffer(dlfb);
-}
/*
* Assumes caller is holding info->lock mutex (for open and release at least)
*/
@@ -1000,10 +974,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
dlfb->fb_count--;
- /* We can't free fb_info here - fbmem will touch it when we return */
- if (dlfb->virtualized && (dlfb->fb_count == 0))
- schedule_delayed_work(&dlfb->free_framebuffer_work, HZ);
-
if ((dlfb->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
@@ -1013,8 +983,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
- kref_put(&dlfb->kref, dlfb_free);
-
return 0;
}
@@ -1172,6 +1140,7 @@ static struct fb_ops dlfb_ops = {
.fb_blank = dlfb_ops_blank,
.fb_check_var = dlfb_ops_check_var,
.fb_set_par = dlfb_ops_set_par,
+ .fb_destroy = dlfb_ops_destroy,
};
@@ -1615,12 +1584,13 @@ success:
return true;
}
-static void dlfb_init_framebuffer_work(struct work_struct *work);
-
static int dlfb_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ int i;
+ const struct device_attribute *attr;
struct dlfb_data *dlfb;
+ struct fb_info *info;
int retval = -ENOMEM;
struct usb_device *usbdev = interface_to_usbdev(intf);
@@ -1631,10 +1601,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
goto error;
}
- kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */
INIT_LIST_HEAD(&dlfb->deferred_free);
- dlfb->udev = usbdev;
+ dlfb->udev = usb_get_dev(usbdev);
usb_set_intfdata(intf, dlfb);
dev_dbg(&intf->dev, "console enable=%d\n", console);
@@ -1657,42 +1626,6 @@ static int dlfb_usb_probe(struct usb_interface *intf,
}
- if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
- retval = -ENOMEM;
- dev_err(&intf->dev, "unable to allocate urb list\n");
- goto error;
- }
-
- kref_get(&dlfb->kref); /* matching kref_put in free_framebuffer_work */
-
- /* We don't register a new USB class. Our client interface is dlfbev */
-
- /* Workitem keep things fast & simple during USB enumeration */
- INIT_DELAYED_WORK(&dlfb->init_framebuffer_work,
- dlfb_init_framebuffer_work);
- schedule_delayed_work(&dlfb->init_framebuffer_work, 0);
-
- return 0;
-
-error:
- if (dlfb) {
-
- kref_put(&dlfb->kref, dlfb_free); /* last ref from kref_init */
-
- /* dev has been deallocated. Do not dereference */
- }
-
- return retval;
-}
-
-static void dlfb_init_framebuffer_work(struct work_struct *work)
-{
- int i, retval;
- struct fb_info *info;
- const struct device_attribute *attr;
- struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
- init_framebuffer_work.work);
-
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &dlfb->udev->dev);
if (!info) {
@@ -1706,17 +1639,22 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
dlfb->ops = dlfb_ops;
info->fbops = &dlfb->ops;
+ INIT_LIST_HEAD(&info->modelist);
+
+ if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ retval = -ENOMEM;
+ dev_err(&intf->dev, "unable to allocate urb list\n");
+ goto error;
+ }
+
+ /* We don't register a new USB class. Our client interface is dlfbev */
+
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
dev_err(info->device, "cmap allocation failed: %d\n", retval);
goto error;
}
- INIT_DELAYED_WORK(&dlfb->free_framebuffer_work,
- dlfb_free_framebuffer_work);
-
- INIT_LIST_HEAD(&info->modelist);
-
retval = dlfb_setup_modes(dlfb, info, NULL, 0);
if (retval != 0) {
dev_err(info->device,
@@ -1760,10 +1698,16 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
dev_name(info->dev), info->var.xres, info->var.yres,
((dlfb->backing_buffer) ?
info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
- return;
+ return 0;
error:
- dlfb_free_framebuffer(dlfb);
+ if (dlfb->info) {
+ dlfb_ops_destroy(dlfb->info);
+ } else if (dlfb) {
+ usb_put_dev(dlfb->udev);
+ kfree(dlfb);
+ }
+ return retval;
}
static void dlfb_usb_disconnect(struct usb_interface *intf)
@@ -1791,20 +1735,9 @@ static void dlfb_usb_disconnect(struct usb_interface *intf)
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
device_remove_file(info->dev, &fb_device_attrs[i]);
device_remove_bin_file(info->dev, &edid_attr);
- unlink_framebuffer(info);
}
- usb_set_intfdata(intf, NULL);
- dlfb->udev = NULL;
-
- /* if clients still have us open, will be freed on last close */
- if (dlfb->fb_count == 0)
- schedule_delayed_work(&dlfb->free_framebuffer_work, 0);
-
- /* release reference taken by kref_init in probe() */
- kref_put(&dlfb->kref, dlfb_free);
-
- /* consider dlfb_data freed */
+ unregister_framebuffer(info);
}
static struct usb_driver dlfb_driver = {
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 5244e93ceafc..c2e7aa103fa5 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -170,7 +170,7 @@ struct display_timings *of_get_display_timings(const struct device_node *np)
goto entryfail;
}
- pr_debug("%pOF: using %s as default timing\n", np, entry->name);
+ pr_debug("%pOF: using %pOFn as default timing\n", np, entry);
native_mode = entry;
diff --git a/drivers/video/vgastate.c b/drivers/video/vgastate.c
index 548c751d2415..122fb3c3ec9d 100644
--- a/drivers/video/vgastate.c
+++ b/drivers/video/vgastate.c
@@ -455,7 +455,7 @@ int save_vga(struct vgastate *state)
return 0;
}
-int restore_vga (struct vgastate *state)
+int restore_vga(struct vgastate *state)
{
if (state->vidstate == NULL)
return 1;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index d1c1f6283729..728ecd1eea30 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -41,13 +41,34 @@
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
+#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
+ __GFP_NOMEMALLOC)
+/* The order of free page blocks to report to host */
+#define VIRTIO_BALLOON_FREE_PAGE_ORDER (MAX_ORDER - 1)
+/* The size of a free page block in bytes */
+#define VIRTIO_BALLOON_FREE_PAGE_SIZE \
+ (1 << (VIRTIO_BALLOON_FREE_PAGE_ORDER + PAGE_SHIFT))
+
#ifdef CONFIG_BALLOON_COMPACTION
static struct vfsmount *balloon_mnt;
#endif
+enum virtio_balloon_vq {
+ VIRTIO_BALLOON_VQ_INFLATE,
+ VIRTIO_BALLOON_VQ_DEFLATE,
+ VIRTIO_BALLOON_VQ_STATS,
+ VIRTIO_BALLOON_VQ_FREE_PAGE,
+ VIRTIO_BALLOON_VQ_MAX
+};
+
struct virtio_balloon {
struct virtio_device *vdev;
- struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
+ struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
+
+ /* Balloon's own wq for cpu-intensive work items */
+ struct workqueue_struct *balloon_wq;
+ /* The free page reporting work item submitted to the balloon wq */
+ struct work_struct report_free_page_work;
/* The balloon servicing is delegated to a freezable workqueue. */
struct work_struct update_balloon_stats_work;
@@ -57,6 +78,18 @@ struct virtio_balloon {
spinlock_t stop_update_lock;
bool stop_update;
+ /* The list of allocated free pages, waiting to be given back to mm */
+ struct list_head free_page_list;
+ spinlock_t free_page_list_lock;
+ /* The number of free page blocks on the above list */
+ unsigned long num_free_page_blocks;
+ /* The cmd id received from host */
+ u32 cmd_id_received;
+ /* The cmd id that is actively in use */
+ __virtio32 cmd_id_active;
+ /* Buffer to store the stop sign */
+ __virtio32 cmd_id_stop;
+
/* Waiting for host to ack the pages we released. */
wait_queue_head_t acked;
@@ -320,17 +353,6 @@ static void stats_handle_request(struct virtio_balloon *vb)
virtqueue_kick(vq);
}
-static void virtballoon_changed(struct virtio_device *vdev)
-{
- struct virtio_balloon *vb = vdev->priv;
- unsigned long flags;
-
- spin_lock_irqsave(&vb->stop_update_lock, flags);
- if (!vb->stop_update)
- queue_work(system_freezable_wq, &vb->update_balloon_size_work);
- spin_unlock_irqrestore(&vb->stop_update_lock, flags);
-}
-
static inline s64 towards_target(struct virtio_balloon *vb)
{
s64 target;
@@ -347,6 +369,60 @@ static inline s64 towards_target(struct virtio_balloon *vb)
return target - vb->num_pages;
}
+/* Gives back @num_to_return blocks of free pages to mm. */
+static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
+ unsigned long num_to_return)
+{
+ struct page *page;
+ unsigned long num_returned;
+
+ spin_lock_irq(&vb->free_page_list_lock);
+ for (num_returned = 0; num_returned < num_to_return; num_returned++) {
+ page = balloon_page_pop(&vb->free_page_list);
+ if (!page)
+ break;
+ free_pages((unsigned long)page_address(page),
+ VIRTIO_BALLOON_FREE_PAGE_ORDER);
+ }
+ vb->num_free_page_blocks -= num_returned;
+ spin_unlock_irq(&vb->free_page_list_lock);
+
+ return num_returned;
+}
+
+static void virtballoon_changed(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb = vdev->priv;
+ unsigned long flags;
+ s64 diff = towards_target(vb);
+
+ if (diff) {
+ spin_lock_irqsave(&vb->stop_update_lock, flags);
+ if (!vb->stop_update)
+ queue_work(system_freezable_wq,
+ &vb->update_balloon_size_work);
+ spin_unlock_irqrestore(&vb->stop_update_lock, flags);
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
+ virtio_cread(vdev, struct virtio_balloon_config,
+ free_page_report_cmd_id, &vb->cmd_id_received);
+ if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
+ /* Pass ULONG_MAX to give back all the free pages */
+ return_free_pages_to_mm(vb, ULONG_MAX);
+ } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
+ vb->cmd_id_received !=
+ virtio32_to_cpu(vdev, vb->cmd_id_active)) {
+ spin_lock_irqsave(&vb->stop_update_lock, flags);
+ if (!vb->stop_update) {
+ queue_work(vb->balloon_wq,
+ &vb->report_free_page_work);
+ }
+ spin_unlock_irqrestore(&vb->stop_update_lock, flags);
+ }
+ }
+}
+
static void update_balloon_size(struct virtio_balloon *vb)
{
u32 actual = vb->num_pages;
@@ -389,26 +465,44 @@ static void update_balloon_size_func(struct work_struct *work)
static int init_vqs(struct virtio_balloon *vb)
{
- struct virtqueue *vqs[3];
- vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
- static const char * const names[] = { "inflate", "deflate", "stats" };
- int err, nvqs;
+ struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX];
+ vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX];
+ const char *names[VIRTIO_BALLOON_VQ_MAX];
+ int err;
/*
- * We expect two virtqueues: inflate and deflate, and
- * optionally stat.
+ * Inflateq and deflateq are used unconditionally. The names[]
+ * will be NULL if the related feature is not enabled, which will
+ * cause no allocation for the corresponding virtqueue in find_vqs.
*/
- nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
- err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL);
+ callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack;
+ names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
+ callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
+ names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
+ names[VIRTIO_BALLOON_VQ_STATS] = NULL;
+ names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
+
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
+ names[VIRTIO_BALLOON_VQ_STATS] = "stats";
+ callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request;
+ }
+
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
+ names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq";
+ callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
+ }
+
+ err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX,
+ vqs, callbacks, names, NULL, NULL);
if (err)
return err;
- vb->inflate_vq = vqs[0];
- vb->deflate_vq = vqs[1];
+ vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE];
+ vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE];
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
struct scatterlist sg;
unsigned int num_stats;
- vb->stats_vq = vqs[2];
+ vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS];
/*
* Prime this virtqueue with one buffer so the hypervisor can
@@ -426,9 +520,145 @@ static int init_vqs(struct virtio_balloon *vb)
}
virtqueue_kick(vb->stats_vq);
}
+
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
+
+ return 0;
+}
+
+static int send_cmd_id_start(struct virtio_balloon *vb)
+{
+ struct scatterlist sg;
+ struct virtqueue *vq = vb->free_page_vq;
+ int err, unused;
+
+ /* Detach all the used buffers from the vq */
+ while (virtqueue_get_buf(vq, &unused))
+ ;
+
+ vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received);
+ sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
+ err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
+ if (!err)
+ virtqueue_kick(vq);
+ return err;
+}
+
+static int send_cmd_id_stop(struct virtio_balloon *vb)
+{
+ struct scatterlist sg;
+ struct virtqueue *vq = vb->free_page_vq;
+ int err, unused;
+
+ /* Detach all the used buffers from the vq */
+ while (virtqueue_get_buf(vq, &unused))
+ ;
+
+ sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop));
+ err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL);
+ if (!err)
+ virtqueue_kick(vq);
+ return err;
+}
+
+static int get_free_page_and_send(struct virtio_balloon *vb)
+{
+ struct virtqueue *vq = vb->free_page_vq;
+ struct page *page;
+ struct scatterlist sg;
+ int err, unused;
+ void *p;
+
+ /* Detach all the used buffers from the vq */
+ while (virtqueue_get_buf(vq, &unused))
+ ;
+
+ page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG,
+ VIRTIO_BALLOON_FREE_PAGE_ORDER);
+ /*
+ * When the allocation returns NULL, it indicates that we have got all
+ * the possible free pages, so return -EINTR to stop.
+ */
+ if (!page)
+ return -EINTR;
+
+ p = page_address(page);
+ sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE);
+ /* There is always 1 entry reserved for the cmd id to use. */
+ if (vq->num_free > 1) {
+ err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
+ if (unlikely(err)) {
+ free_pages((unsigned long)p,
+ VIRTIO_BALLOON_FREE_PAGE_ORDER);
+ return err;
+ }
+ virtqueue_kick(vq);
+ spin_lock_irq(&vb->free_page_list_lock);
+ balloon_page_push(&vb->free_page_list, page);
+ vb->num_free_page_blocks++;
+ spin_unlock_irq(&vb->free_page_list_lock);
+ } else {
+ /*
+ * The vq has no available entry to add this page block, so
+ * just free it.
+ */
+ free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER);
+ }
+
+ return 0;
+}
+
+static int send_free_pages(struct virtio_balloon *vb)
+{
+ int err;
+ u32 cmd_id_active;
+
+ while (1) {
+ /*
+ * If a stop id or a new cmd id was just received from host,
+ * stop the reporting.
+ */
+ cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
+ if (cmd_id_active != vb->cmd_id_received)
+ break;
+
+ /*
+ * The free page blocks are allocated and sent to host one by
+ * one.
+ */
+ err = get_free_page_and_send(vb);
+ if (err == -EINTR)
+ break;
+ else if (unlikely(err))
+ return err;
+ }
+
return 0;
}
+static void report_free_page_func(struct work_struct *work)
+{
+ int err;
+ struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
+ report_free_page_work);
+ struct device *dev = &vb->vdev->dev;
+
+ /* Start by sending the received cmd id to host with an outbuf. */
+ err = send_cmd_id_start(vb);
+ if (unlikely(err))
+ dev_err(dev, "Failed to send a start id, err = %d\n", err);
+
+ err = send_free_pages(vb);
+ if (unlikely(err))
+ dev_err(dev, "Failed to send a free page, err = %d\n", err);
+
+ /* End by sending a stop id to host with an outbuf. */
+ err = send_cmd_id_stop(vb);
+ if (unlikely(err))
+ dev_err(dev, "Failed to send a stop id, err = %d\n", err);
+}
+
#ifdef CONFIG_BALLOON_COMPACTION
/*
* virtballoon_migratepage - perform the balloon page migration on behalf of
@@ -512,14 +742,23 @@ static struct file_system_type balloon_fs = {
#endif /* CONFIG_BALLOON_COMPACTION */
-static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
- struct shrink_control *sc)
+static unsigned long shrink_free_pages(struct virtio_balloon *vb,
+ unsigned long pages_to_free)
{
- unsigned long pages_to_free, pages_freed = 0;
- struct virtio_balloon *vb = container_of(shrinker,
- struct virtio_balloon, shrinker);
+ unsigned long blocks_to_free, blocks_freed;
- pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE;
+ pages_to_free = round_up(pages_to_free,
+ 1 << VIRTIO_BALLOON_FREE_PAGE_ORDER);
+ blocks_to_free = pages_to_free >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
+ blocks_freed = return_free_pages_to_mm(vb, blocks_to_free);
+
+ return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER;
+}
+
+static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
+ unsigned long pages_to_free)
+{
+ unsigned long pages_freed = 0;
/*
* One invocation of leak_balloon can deflate at most
@@ -527,12 +766,33 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
* multiple times to deflate pages till reaching pages_to_free.
*/
while (vb->num_pages && pages_to_free) {
+ pages_freed += leak_balloon(vb, pages_to_free) /
+ VIRTIO_BALLOON_PAGES_PER_PAGE;
pages_to_free -= pages_freed;
- pages_freed += leak_balloon(vb, pages_to_free);
}
update_balloon_size(vb);
- return pages_freed / VIRTIO_BALLOON_PAGES_PER_PAGE;
+ return pages_freed;
+}
+
+static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ unsigned long pages_to_free, pages_freed = 0;
+ struct virtio_balloon *vb = container_of(shrinker,
+ struct virtio_balloon, shrinker);
+
+ pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE;
+
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ pages_freed = shrink_free_pages(vb, pages_to_free);
+
+ if (pages_freed >= pages_to_free)
+ return pages_freed;
+
+ pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed);
+
+ return pages_freed;
}
static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
@@ -540,8 +800,12 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
{
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
+ unsigned long count;
- return vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
+ count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
+ count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
+
+ return count;
}
static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
@@ -561,6 +825,7 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
+ __u32 poison_val;
int err;
if (!vdev->config->get) {
@@ -604,6 +869,36 @@ static int virtballoon_probe(struct virtio_device *vdev)
}
vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
#endif
+ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
+ /*
+ * There is always one entry reserved for cmd id, so the ring
+ * size needs to be at least two to report free page hints.
+ */
+ if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
+ err = -ENOSPC;
+ goto out_del_vqs;
+ }
+ vb->balloon_wq = alloc_workqueue("balloon-wq",
+ WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
+ if (!vb->balloon_wq) {
+ err = -ENOMEM;
+ goto out_del_vqs;
+ }
+ INIT_WORK(&vb->report_free_page_work, report_free_page_func);
+ vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
+ vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
+ VIRTIO_BALLOON_CMD_ID_STOP);
+ vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
+ VIRTIO_BALLOON_CMD_ID_STOP);
+ vb->num_free_page_blocks = 0;
+ spin_lock_init(&vb->free_page_list_lock);
+ INIT_LIST_HEAD(&vb->free_page_list);
+ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
+ memset(&poison_val, PAGE_POISON, sizeof(poison_val));
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config,
+ poison_val, &poison_val);
+ }
+ }
/*
* We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a
* shrinker needs to be registered to relieve memory pressure.
@@ -611,7 +906,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
err = virtio_balloon_register_shrinker(vb);
if (err)
- goto out_del_vqs;
+ goto out_del_balloon_wq;
}
virtio_device_ready(vdev);
@@ -619,6 +914,9 @@ static int virtballoon_probe(struct virtio_device *vdev)
virtballoon_changed(vdev);
return 0;
+out_del_balloon_wq:
+ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ destroy_workqueue(vb->balloon_wq);
out_del_vqs:
vdev->config->del_vqs(vdev);
out_free_vb:
@@ -652,6 +950,11 @@ static void virtballoon_remove(struct virtio_device *vdev)
cancel_work_sync(&vb->update_balloon_size_work);
cancel_work_sync(&vb->update_balloon_stats_work);
+ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
+ cancel_work_sync(&vb->report_free_page_work);
+ destroy_workqueue(vb->balloon_wq);
+ }
+
remove_common(vb);
#ifdef CONFIG_BALLOON_COMPACTION
if (vb->vb_dev_info.inode)
@@ -695,6 +998,9 @@ static int virtballoon_restore(struct virtio_device *vdev)
static int virtballoon_validate(struct virtio_device *vdev)
{
+ if (!page_poisoning_enabled())
+ __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
+
__virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
return 0;
}
@@ -703,6 +1009,8 @@ static unsigned int features[] = {
VIRTIO_BALLOON_F_MUST_TELL_HOST,
VIRTIO_BALLOON_F_STATS_VQ,
VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
+ VIRTIO_BALLOON_F_FREE_PAGE_HINT,
+ VIRTIO_BALLOON_F_PAGE_POISON,
};
static struct virtio_driver virtio_balloon_driver = {
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 90d387b50ab7..815b9e9bb975 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -12,7 +12,6 @@ config XEN_BALLOON
config XEN_SELFBALLOONING
bool "Dynamically self-balloon kernel memory to target"
depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP && XEN_TMEM
- default n
help
Self-ballooning dynamically balloons available kernel memory driven
by the current usage of anonymous memory ("committed AS") and
@@ -27,7 +26,6 @@ config XEN_SELFBALLOONING
config XEN_BALLOON_MEMORY_HOTPLUG
bool "Memory hotplug support for Xen balloon driver"
- default n
depends on XEN_BALLOON && MEMORY_HOTPLUG
help
Memory hotplug support for Xen balloon driver allows expanding memory
@@ -86,7 +84,7 @@ config XEN_SCRUB_PAGES_DEFAULT
help
Scrub pages before returning them to the system for reuse by
other domains. This makes sure that any confidential data
- is not accidentally visible to other domains. Is it more
+ is not accidentally visible to other domains. It is more
secure, but slightly less efficient. This can be controlled with
xen_scrub_pages=0 parameter and
/sys/devices/system/xen_memory/xen_memory0/scrub_pages.
@@ -105,8 +103,7 @@ config XEN_DEV_EVTCHN
config XEN_BACKEND
bool "Backend driver support"
- depends on XEN_DOM0
- default y
+ default XEN_DOM0
help
Support for backend device drivers that provide I/O services
to other virtual machines.
@@ -227,7 +224,6 @@ config XEN_PCIDEV_BACKEND
config XEN_PVCALLS_FRONTEND
tristate "XEN PV Calls frontend driver"
depends on INET && XEN
- default n
select XEN_XENBUS_FRONTEND
help
Experimental frontend for the Xen PV Calls protocol
@@ -238,7 +234,6 @@ config XEN_PVCALLS_FRONTEND
config XEN_PVCALLS_BACKEND
bool "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND
- default n
help
Experimental backend for the Xen PV Calls protocol
(https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
@@ -264,7 +259,6 @@ config XEN_PRIVCMD
config XEN_STUB
bool "Xen stub drivers"
depends on XEN && X86_64 && BROKEN
- default n
help
Allow kernel to install stub drivers, to reserve space for Xen drivers,
i.e. memory hotplug and cpu hotplug, and to block native drivers loaded,
@@ -275,7 +269,6 @@ config XEN_STUB
config XEN_ACPI_HOTPLUG_MEMORY
tristate "Xen ACPI memory hotplug"
depends on XEN_DOM0 && XEN_STUB && ACPI
- default n
help
This is Xen ACPI memory hotplug.
@@ -287,7 +280,6 @@ config XEN_ACPI_HOTPLUG_CPU
tristate "Xen ACPI cpu hotplug"
depends on XEN_DOM0 && XEN_STUB && ACPI
select ACPI_CONTAINER
- default n
help
Xen ACPI cpu enumerating and hotplugging
@@ -316,7 +308,6 @@ config XEN_ACPI_PROCESSOR
config XEN_MCE_LOG
bool "Xen platform mcelog"
depends on XEN_DOM0 && X86_64 && X86_MCE
- default n
help
Allow kernel fetching MCE error from Xen platform and
converting it into Linux mcelog format for mcelog tools
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index e12bb256036f..fdfc64f5acea 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -44,7 +44,7 @@
#include <linux/cred.h>
#include <linux/errno.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/mutex.h>
@@ -395,7 +395,10 @@ static enum bp_state reserve_additional_memory(void)
* callers drop the mutex before trying again.
*/
mutex_unlock(&balloon_mutex);
+ /* add_memory_resource() requires the device_hotplug lock */
+ lock_device_hotplug();
rc = add_memory_resource(nid, resource, memhp_auto_online);
+ unlock_device_hotplug();
mutex_lock(&balloon_mutex);
if (rc) {
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index e6c1934734b7..93194f3e7540 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/irqnr.h>
#include <linux/pci.h>
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 84575baceebc..f15f89df1f36 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -33,7 +33,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index b1092fbefa63..2e5d845b5091 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -137,13 +137,13 @@ static void pvcalls_conn_back_read(void *opaque)
if (masked_prod < masked_cons) {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = wanted;
- iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 1, wanted);
+ iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted);
} else {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = array_size - masked_prod;
vec[1].iov_base = data->in;
vec[1].iov_len = wanted - vec[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 2, wanted);
+ iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted);
}
atomic_set(&map->read, 0);
@@ -195,13 +195,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = size;
- iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 1, size);
+ iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size);
} else {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
vec[1].iov_base = data->out;
vec[1].iov_len = size - vec[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 2, size);
+ iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size);
}
atomic_set(&map->write, 0);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f5c1af4ce9ab..2a7f545bd0b5 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -35,7 +35,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/dma-direct.h>
#include <linux/export.h>
#include <xen/swiotlb-xen.h>
@@ -217,7 +217,8 @@ retry:
* Get IO TLB memory from any location.
*/
if (early)
- xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
+ xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
+ PAGE_SIZE);
else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
@@ -247,7 +248,8 @@ retry:
xen_io_tlb_nslabs);
if (rc) {
if (early)
- free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
+ memblock_free(__pa(xen_io_tlb_start),
+ PAGE_ALIGN(bytes));
else {
free_pages((unsigned long)xen_io_tlb_start, order);
xen_io_tlb_start = NULL;
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 63c1494a8d73..2acbfe104e46 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -76,12 +76,15 @@ static void watch_target(struct xenbus_watch *watch,
if (!watch_fired) {
watch_fired = true;
- err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
- &static_max);
- if (err != 1)
- static_max = new_target;
- else
+
+ if ((xenbus_scanf(XBT_NIL, "memory", "static-max",
+ "%llu", &static_max) == 1) ||
+ (xenbus_scanf(XBT_NIL, "memory", "memory_static_max",
+ "%llu", &static_max) == 1))
static_max >>= PAGE_SHIFT - 10;
+ else
+ static_max = new_target;
+
target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
: static_max - balloon_stats.target_pages;
}
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 55988b8418ee..5165aa82bf7d 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -68,7 +68,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/swap.h>
#include <linux/mm.h>
#include <linux/mman.h>
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index a1c17000129b..e17ca8156171 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -278,10 +278,8 @@ static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
dev_err(&dev->dev, "%s\n", printf_buffer);
path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
- if (!path_buffer ||
- xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer))
- dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
- dev->nodename, printf_buffer);
+ if (path_buffer)
+ xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
kfree(printf_buffer);
kfree(path_buffer);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index e1cbdfdb7c68..0bcbcc20f769 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -65,7 +65,7 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
if (retval == 0)
return retval;
- iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE);
+ iov_iter_bvec(&to, READ, &bvec, 1, PAGE_SIZE);
retval = p9_client_read(fid, page_offset(page), &to, &err);
if (err) {
@@ -175,7 +175,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
bvec.bv_page = page;
bvec.bv_offset = 0;
bvec.bv_len = len;
- iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len);
+ iov_iter_bvec(&from, WRITE, &bvec, 1, len);
/* We should have writeback_fid always set */
BUG_ON(!v9inode->writeback_fid);
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index cb6c4031af55..00745147329d 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -123,7 +123,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
if (rdir->tail == rdir->head) {
struct iov_iter to;
int n;
- iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen);
+ iov_iter_kvec(&to, READ, &kvec, 1, buflen);
n = p9_client_read(file->private_data, ctx->pos, &to,
&err);
if (err)
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 352abc39e891..ac8ff8ca4c11 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -32,7 +32,7 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
struct iov_iter to;
int err;
- iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size);
+ iov_iter_kvec(&to, READ, &kvec, 1, buffer_size);
attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
if (IS_ERR(attr_fid)) {
@@ -107,7 +107,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
struct iov_iter from;
int retval, err;
- iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
+ iov_iter_kvec(&from, WRITE, &kvec, 1, value_len);
p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
name, value_len, flags);
diff --git a/fs/afs/Kconfig b/fs/afs/Kconfig
index ebba3b18e5da..701aaa9b1899 100644
--- a/fs/afs/Kconfig
+++ b/fs/afs/Kconfig
@@ -27,3 +27,15 @@ config AFS_FSCACHE
help
Say Y here if you want AFS data to be cached locally on disk through
the generic filesystem cache manager
+
+config AFS_DEBUG_CURSOR
+ bool "AFS server cursor debugging"
+ depends on AFS_FS
+ help
+ Say Y here to cause the contents of a server cursor to be dumped to
+ the dmesg log if the server rotation algorithm fails to successfully
+ contact a server.
+
+ See <file:Documentation/filesystems/afs.txt> for more information.
+
+ If unsure, say N.
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index 546874057bd3..0738e2bf5193 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -17,6 +17,7 @@ kafs-y := \
file.o \
flock.o \
fsclient.o \
+ fs_probe.o \
inode.o \
main.o \
misc.o \
@@ -29,9 +30,13 @@ kafs-y := \
super.o \
netdevices.o \
vlclient.o \
+ vl_list.o \
+ vl_probe.o \
+ vl_rotate.o \
volume.o \
write.o \
- xattr.o
+ xattr.o \
+ yfsclient.o
kafs-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_AFS_FS) := kafs.o
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index 55a756c60746..967db336d11a 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -64,19 +64,25 @@ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr,
/*
* Parse a text string consisting of delimited addresses.
*/
-struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
- char delim,
- unsigned short service,
- unsigned short port)
+struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *net,
+ const char *text, size_t len,
+ char delim,
+ unsigned short service,
+ unsigned short port)
{
+ struct afs_vlserver_list *vllist;
struct afs_addr_list *alist;
const char *p, *end = text + len;
+ const char *problem;
unsigned int nr = 0;
+ int ret = -ENOMEM;
_enter("%*.*s,%c", (int)len, (int)len, text, delim);
- if (!len)
+ if (!len) {
+ _leave(" = -EDESTADDRREQ [empty]");
return ERR_PTR(-EDESTADDRREQ);
+ }
if (delim == ':' && (memchr(text, ',', len) || !memchr(text, '.', len)))
delim = ',';
@@ -84,18 +90,24 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
/* Count the addresses */
p = text;
do {
- if (!*p)
- return ERR_PTR(-EINVAL);
+ if (!*p) {
+ problem = "nul";
+ goto inval;
+ }
if (*p == delim)
continue;
nr++;
if (*p == '[') {
p++;
- if (p == end)
- return ERR_PTR(-EINVAL);
+ if (p == end) {
+ problem = "brace1";
+ goto inval;
+ }
p = memchr(p, ']', end - p);
- if (!p)
- return ERR_PTR(-EINVAL);
+ if (!p) {
+ problem = "brace2";
+ goto inval;
+ }
p++;
if (p >= end)
break;
@@ -109,10 +121,19 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
_debug("%u/%u addresses", nr, AFS_MAX_ADDRESSES);
- alist = afs_alloc_addrlist(nr, service, port);
- if (!alist)
+ vllist = afs_alloc_vlserver_list(1);
+ if (!vllist)
return ERR_PTR(-ENOMEM);
+ vllist->nr_servers = 1;
+ vllist->servers[0].server = afs_alloc_vlserver("<dummy>", 7, AFS_VL_PORT);
+ if (!vllist->servers[0].server)
+ goto error_vl;
+
+ alist = afs_alloc_addrlist(nr, service, AFS_VL_PORT);
+ if (!alist)
+ goto error;
+
/* Extract the addresses */
p = text;
do {
@@ -135,17 +156,21 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
break;
}
- if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop))
+ if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop)) {
family = AF_INET;
- else if (in6_pton(p, q - p, (u8 *)x, -1, &stop))
+ } else if (in6_pton(p, q - p, (u8 *)x, -1, &stop)) {
family = AF_INET6;
- else
+ } else {
+ problem = "family";
goto bad_address;
+ }
- if (stop != q)
+ p = q;
+ if (stop != p) {
+ problem = "nostop";
goto bad_address;
+ }
- p = q;
if (q < end && *q == ']')
p++;
@@ -154,18 +179,23 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
/* Port number specification "+1234" */
xport = 0;
p++;
- if (p >= end || !isdigit(*p))
+ if (p >= end || !isdigit(*p)) {
+ problem = "port";
goto bad_address;
+ }
do {
xport *= 10;
xport += *p - '0';
- if (xport > 65535)
+ if (xport > 65535) {
+ problem = "pval";
goto bad_address;
+ }
p++;
} while (p < end && isdigit(*p));
} else if (*p == delim) {
p++;
} else {
+ problem = "weird";
goto bad_address;
}
}
@@ -177,12 +207,23 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
} while (p < end);
+ rcu_assign_pointer(vllist->servers[0].server->addresses, alist);
_leave(" = [nr %u]", alist->nr_addrs);
- return alist;
+ return vllist;
-bad_address:
- kfree(alist);
+inval:
+ _leave(" = -EINVAL [%s %zu %*.*s]",
+ problem, p - text, (int)len, (int)len, text);
return ERR_PTR(-EINVAL);
+bad_address:
+ _leave(" = -EINVAL [%s %zu %*.*s]",
+ problem, p - text, (int)len, (int)len, text);
+ ret = -EINVAL;
+error:
+ afs_put_addrlist(alist);
+error_vl:
+ afs_put_vlserverlist(net, vllist);
+ return ERR_PTR(ret);
}
/*
@@ -201,30 +242,34 @@ static int afs_cmp_addr_list(const struct afs_addr_list *a1,
/*
* Perform a DNS query for VL servers and build a up an address list.
*/
-struct afs_addr_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry)
+struct afs_vlserver_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry)
{
- struct afs_addr_list *alist;
- char *vllist = NULL;
+ struct afs_vlserver_list *vllist;
+ char *result = NULL;
int ret;
_enter("%s", cell->name);
- ret = dns_query("afsdb", cell->name, cell->name_len,
- "", &vllist, _expiry);
- if (ret < 0)
+ ret = dns_query("afsdb", cell->name, cell->name_len, "srv=1",
+ &result, _expiry);
+ if (ret < 0) {
+ _leave(" = %d [dns]", ret);
return ERR_PTR(ret);
-
- alist = afs_parse_text_addrs(vllist, strlen(vllist), ',',
- VL_SERVICE, AFS_VL_PORT);
- if (IS_ERR(alist)) {
- kfree(vllist);
- if (alist != ERR_PTR(-ENOMEM))
- pr_err("Failed to parse DNS data\n");
- return alist;
}
- kfree(vllist);
- return alist;
+ if (*_expiry == 0)
+ *_expiry = ktime_get_real_seconds() + 60;
+
+ if (ret > 1 && result[0] == 0)
+ vllist = afs_extract_vlserver_list(cell, result, ret);
+ else
+ vllist = afs_parse_text_addrs(cell->net, result, ret, ',',
+ VL_SERVICE, AFS_VL_PORT);
+ kfree(result);
+ if (IS_ERR(vllist) && vllist != ERR_PTR(-ENOMEM))
+ pr_err("Failed to parse DNS data %ld\n", PTR_ERR(vllist));
+
+ return vllist;
}
/*
@@ -258,6 +303,8 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
srx = &alist->addrs[i];
+ srx->srx_family = AF_RXRPC;
+ srx->transport_type = SOCK_DGRAM;
srx->transport_len = sizeof(srx->transport.sin);
srx->transport.sin.sin_family = AF_INET;
srx->transport.sin.sin_port = htons(port);
@@ -296,6 +343,8 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
srx = &alist->addrs[i];
+ srx->srx_family = AF_RXRPC;
+ srx->transport_type = SOCK_DGRAM;
srx->transport_len = sizeof(srx->transport.sin6);
srx->transport.sin6.sin6_family = AF_INET6;
srx->transport.sin6.sin6_port = htons(port);
@@ -308,25 +357,33 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
*/
bool afs_iterate_addresses(struct afs_addr_cursor *ac)
{
- _enter("%hu+%hd", ac->start, (short)ac->index);
+ unsigned long set, failed;
+ int index;
if (!ac->alist)
return false;
- if (ac->begun) {
- ac->index++;
- if (ac->index == ac->alist->nr_addrs)
- ac->index = 0;
+ set = ac->alist->responded;
+ failed = ac->alist->failed;
+ _enter("%lx-%lx-%lx,%d", set, failed, ac->tried, ac->index);
- if (ac->index == ac->start) {
- ac->error = -EDESTADDRREQ;
- return false;
- }
- }
+ ac->nr_iterations++;
+
+ set &= ~(failed | ac->tried);
+
+ if (!set)
+ return false;
- ac->begun = true;
+ index = READ_ONCE(ac->alist->preferred);
+ if (test_bit(index, &set))
+ goto selected;
+
+ index = __ffs(set);
+
+selected:
+ ac->index = index;
+ set_bit(index, &ac->tried);
ac->responded = false;
- ac->addr = &ac->alist->addrs[ac->index];
return true;
}
@@ -339,53 +396,13 @@ int afs_end_cursor(struct afs_addr_cursor *ac)
alist = ac->alist;
if (alist) {
- if (ac->responded && ac->index != ac->start)
- WRITE_ONCE(alist->index, ac->index);
+ if (ac->responded &&
+ ac->index != alist->preferred &&
+ test_bit(ac->alist->preferred, &ac->tried))
+ WRITE_ONCE(alist->preferred, ac->index);
afs_put_addrlist(alist);
+ ac->alist = NULL;
}
- ac->addr = NULL;
- ac->alist = NULL;
- ac->begun = false;
return ac->error;
}
-
-/*
- * Set the address cursor for iterating over VL servers.
- */
-int afs_set_vl_cursor(struct afs_addr_cursor *ac, struct afs_cell *cell)
-{
- struct afs_addr_list *alist;
- int ret;
-
- if (!rcu_access_pointer(cell->vl_addrs)) {
- ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET,
- TASK_INTERRUPTIBLE);
- if (ret < 0)
- return ret;
-
- if (!rcu_access_pointer(cell->vl_addrs) &&
- ktime_get_real_seconds() < cell->dns_expiry)
- return cell->error;
- }
-
- read_lock(&cell->vl_addrs_lock);
- alist = rcu_dereference_protected(cell->vl_addrs,
- lockdep_is_held(&cell->vl_addrs_lock));
- if (alist->nr_addrs > 0)
- afs_get_addrlist(alist);
- else
- alist = NULL;
- read_unlock(&cell->vl_addrs_lock);
-
- if (!alist)
- return -EDESTADDRREQ;
-
- ac->alist = alist;
- ac->addr = NULL;
- ac->start = READ_ONCE(alist->index);
- ac->index = ac->start;
- ac->error = 0;
- ac->begun = false;
- return 0;
-}
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index b4ff1f7ae4ab..d12ffb457e47 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -23,9 +23,9 @@
#define AFSPATHMAX 1024 /* Maximum length of a pathname plus NUL */
#define AFSOPAQUEMAX 1024 /* Maximum length of an opaque field */
-typedef unsigned afs_volid_t;
-typedef unsigned afs_vnodeid_t;
-typedef unsigned long long afs_dataversion_t;
+typedef u64 afs_volid_t;
+typedef u64 afs_vnodeid_t;
+typedef u64 afs_dataversion_t;
typedef enum {
AFSVL_RWVOL, /* read/write volume */
@@ -52,8 +52,9 @@ typedef enum {
*/
struct afs_fid {
afs_volid_t vid; /* volume ID */
- afs_vnodeid_t vnode; /* file index within volume */
- unsigned unique; /* unique ID number (file index version) */
+ afs_vnodeid_t vnode; /* Lower 64-bits of file index within volume */
+ u32 vnode_hi; /* Upper 32-bits of file index */
+ u32 unique; /* unique ID number (file index version) */
};
/*
@@ -67,14 +68,14 @@ typedef enum {
} afs_callback_type_t;
struct afs_callback {
+ time64_t expires_at; /* Time at which expires */
unsigned version; /* Callback version */
- unsigned expiry; /* Time at which expires */
afs_callback_type_t type; /* Type of callback */
};
struct afs_callback_break {
struct afs_fid fid; /* File identifier */
- struct afs_callback cb; /* Callback details */
+ //struct afs_callback cb; /* Callback details */
};
#define AFSCBMAX 50 /* maximum callbacks transferred per bulk op */
@@ -129,19 +130,18 @@ typedef u32 afs_access_t;
struct afs_file_status {
u64 size; /* file size */
afs_dataversion_t data_version; /* current data version */
- time_t mtime_client; /* last time client changed data */
- time_t mtime_server; /* last time server changed data */
- unsigned abort_code; /* Abort if bulk-fetching this failed */
-
- afs_file_type_t type; /* file type */
- unsigned nlink; /* link count */
- u32 author; /* author ID */
- u32 owner; /* owner ID */
- u32 group; /* group ID */
+ struct timespec64 mtime_client; /* Last time client changed data */
+ struct timespec64 mtime_server; /* Last time server changed data */
+ s64 author; /* author ID */
+ s64 owner; /* owner ID */
+ s64 group; /* group ID */
afs_access_t caller_access; /* access rights for authenticated caller */
afs_access_t anon_access; /* access rights for unauthenticated caller */
umode_t mode; /* UNIX mode */
+ afs_file_type_t type; /* file type */
+ u32 nlink; /* link count */
s32 lock_count; /* file lock count (0=UNLK -1=WRLCK +ve=#RDLCK */
+ u32 abort_code; /* Abort if bulk-fetching this failed */
};
/*
@@ -158,25 +158,27 @@ struct afs_file_status {
* AFS volume synchronisation information
*/
struct afs_volsync {
- time_t creation; /* volume creation time */
+ time64_t creation; /* volume creation time */
};
/*
* AFS volume status record
*/
struct afs_volume_status {
- u32 vid; /* volume ID */
- u32 parent_id; /* parent volume ID */
+ afs_volid_t vid; /* volume ID */
+ afs_volid_t parent_id; /* parent volume ID */
u8 online; /* true if volume currently online and available */
u8 in_service; /* true if volume currently in service */
u8 blessed; /* same as in_service */
u8 needs_salvage; /* true if consistency checking required */
u32 type; /* volume type (afs_voltype_t) */
- u32 min_quota; /* minimum space set aside (blocks) */
- u32 max_quota; /* maximum space this volume may occupy (blocks) */
- u32 blocks_in_use; /* space this volume currently occupies (blocks) */
- u32 part_blocks_avail; /* space available in volume's partition */
- u32 part_max_blocks; /* size of volume's partition */
+ u64 min_quota; /* minimum space set aside (blocks) */
+ u64 max_quota; /* maximum space this volume may occupy (blocks) */
+ u64 blocks_in_use; /* space this volume currently occupies (blocks) */
+ u64 part_blocks_avail; /* space available in volume's partition */
+ u64 part_max_blocks; /* size of volume's partition */
+ s64 vol_copy_date;
+ s64 vol_backup_date;
};
#define AFS_BLOCK_SIZE 1024
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
index b1c31ec4523a..f6d0a21e8052 100644
--- a/fs/afs/cache.c
+++ b/fs/afs/cache.c
@@ -49,7 +49,7 @@ static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
struct afs_vnode *vnode = cookie_netfs_data;
struct afs_vnode_cache_aux aux;
- _enter("{%x,%x,%llx},%p,%u",
+ _enter("{%llx,%x,%llx},%p,%u",
vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
buffer, buflen);
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 5f261fbf2182..1c7955f5cdaf 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -210,12 +210,10 @@ void afs_init_callback_state(struct afs_server *server)
/*
* actually break a callback
*/
-void afs_break_callback(struct afs_vnode *vnode)
+void __afs_break_callback(struct afs_vnode *vnode)
{
_enter("");
- write_seqlock(&vnode->cb_lock);
-
clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
vnode->cb_break++;
@@ -230,7 +228,12 @@ void afs_break_callback(struct afs_vnode *vnode)
afs_lock_may_be_available(vnode);
spin_unlock(&vnode->lock);
}
+}
+void afs_break_callback(struct afs_vnode *vnode)
+{
+ write_seqlock(&vnode->cb_lock);
+ __afs_break_callback(vnode);
write_sequnlock(&vnode->cb_lock);
}
@@ -310,14 +313,10 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
/* TODO: Sort the callback break list by volume ID */
for (; count > 0; callbacks++, count--) {
- _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }",
+ _debug("- Fid { vl=%08llx n=%llu u=%u }",
callbacks->fid.vid,
callbacks->fid.vnode,
- callbacks->fid.unique,
- callbacks->cb.version,
- callbacks->cb.expiry,
- callbacks->cb.type
- );
+ callbacks->fid.unique);
afs_break_one_callback(server, &callbacks->fid);
}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 6127f0fcd62c..cf445dbd5f2e 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -20,6 +20,8 @@
#include "internal.h"
static unsigned __read_mostly afs_cell_gc_delay = 10;
+static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
+static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
static void afs_manage_cell(struct work_struct *);
@@ -119,7 +121,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
*/
static struct afs_cell *afs_alloc_cell(struct afs_net *net,
const char *name, unsigned int namelen,
- const char *vllist)
+ const char *addresses)
{
struct afs_cell *cell;
int i, ret;
@@ -134,7 +136,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
return ERR_PTR(-EINVAL);
- _enter("%*.*s,%s", namelen, namelen, name, vllist);
+ _enter("%*.*s,%s", namelen, namelen, name, addresses);
cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
if (!cell) {
@@ -153,23 +155,26 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
(1 << AFS_CELL_FL_NO_LOOKUP_YET));
INIT_LIST_HEAD(&cell->proc_volumes);
rwlock_init(&cell->proc_lock);
- rwlock_init(&cell->vl_addrs_lock);
+ rwlock_init(&cell->vl_servers_lock);
/* Fill in the VL server list if we were given a list of addresses to
* use.
*/
- if (vllist) {
- struct afs_addr_list *alist;
-
- alist = afs_parse_text_addrs(vllist, strlen(vllist), ':',
- VL_SERVICE, AFS_VL_PORT);
- if (IS_ERR(alist)) {
- ret = PTR_ERR(alist);
+ if (addresses) {
+ struct afs_vlserver_list *vllist;
+
+ vllist = afs_parse_text_addrs(net,
+ addresses, strlen(addresses), ':',
+ VL_SERVICE, AFS_VL_PORT);
+ if (IS_ERR(vllist)) {
+ ret = PTR_ERR(vllist);
goto parse_failed;
}
- rcu_assign_pointer(cell->vl_addrs, alist);
+ rcu_assign_pointer(cell->vl_servers, vllist);
cell->dns_expiry = TIME64_MAX;
+ } else {
+ cell->dns_expiry = ktime_get_real_seconds();
}
_leave(" = %p", cell);
@@ -356,26 +361,40 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
*/
static void afs_update_cell(struct afs_cell *cell)
{
- struct afs_addr_list *alist, *old;
- time64_t now, expiry;
+ struct afs_vlserver_list *vllist, *old;
+ unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
+ unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
+ time64_t now, expiry = 0;
_enter("%s", cell->name);
- alist = afs_dns_query(cell, &expiry);
- if (IS_ERR(alist)) {
- switch (PTR_ERR(alist)) {
+ vllist = afs_dns_query(cell, &expiry);
+
+ now = ktime_get_real_seconds();
+ if (min_ttl > max_ttl)
+ max_ttl = min_ttl;
+ if (expiry < now + min_ttl)
+ expiry = now + min_ttl;
+ else if (expiry > now + max_ttl)
+ expiry = now + max_ttl;
+
+ if (IS_ERR(vllist)) {
+ switch (PTR_ERR(vllist)) {
case -ENODATA:
- /* The DNS said that the cell does not exist */
+ case -EDESTADDRREQ:
+ /* The DNS said that the cell does not exist or there
+ * weren't any addresses to be had.
+ */
set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
- cell->dns_expiry = ktime_get_real_seconds() + 61;
+ cell->dns_expiry = expiry;
break;
case -EAGAIN:
case -ECONNREFUSED:
default:
set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
- cell->dns_expiry = ktime_get_real_seconds() + 10;
+ cell->dns_expiry = now + 10;
break;
}
@@ -387,12 +406,12 @@ static void afs_update_cell(struct afs_cell *cell)
/* Exclusion on changing vl_addrs is achieved by a
* non-reentrant work item.
*/
- old = rcu_dereference_protected(cell->vl_addrs, true);
- rcu_assign_pointer(cell->vl_addrs, alist);
+ old = rcu_dereference_protected(cell->vl_servers, true);
+ rcu_assign_pointer(cell->vl_servers, vllist);
cell->dns_expiry = expiry;
if (old)
- afs_put_addrlist(old);
+ afs_put_vlserverlist(cell->net, old);
}
if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags))
@@ -414,7 +433,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)
ASSERTCMP(atomic_read(&cell->usage), ==, 0);
- afs_put_addrlist(rcu_access_pointer(cell->vl_addrs));
+ afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
key_put(cell->anonymous_key);
kfree(cell);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 9e51d6fe7e8f..8ee5972893ed 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -16,6 +16,7 @@
#include <linux/ip.h>
#include "internal.h"
#include "afs_cm.h"
+#include "protocol_yfs.h"
static int afs_deliver_cb_init_call_back_state(struct afs_call *);
static int afs_deliver_cb_init_call_back_state3(struct afs_call *);
@@ -30,6 +31,8 @@ static void SRXAFSCB_Probe(struct work_struct *);
static void SRXAFSCB_ProbeUuid(struct work_struct *);
static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
+static int afs_deliver_yfs_cb_callback(struct afs_call *);
+
#define CM_NAME(name) \
const char afs_SRXCB##name##_name[] __tracepoint_string = \
"CB." #name
@@ -101,12 +104,25 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
};
/*
+ * YFS CB.CallBack operation type
+ */
+static CM_NAME(YFS_CallBack);
+static const struct afs_call_type afs_SRXYFSCB_CallBack = {
+ .name = afs_SRXCBYFS_CallBack_name,
+ .deliver = afs_deliver_yfs_cb_callback,
+ .destructor = afs_cm_destructor,
+ .work = SRXAFSCB_CallBack,
+};
+
+/*
* route an incoming cache manager call
* - return T if supported, F if not
*/
bool afs_cm_incoming_call(struct afs_call *call)
{
- _enter("{CB.OP %u}", call->operation_ID);
+ _enter("{%u, CB.OP %u}", call->service_id, call->operation_ID);
+
+ call->epoch = rxrpc_kernel_get_epoch(call->net->socket, call->rxcall);
switch (call->operation_ID) {
case CBCallBack:
@@ -127,12 +143,102 @@ bool afs_cm_incoming_call(struct afs_call *call)
case CBTellMeAboutYourself:
call->type = &afs_SRXCBTellMeAboutYourself;
return true;
+ case YFSCBCallBack:
+ if (call->service_id != YFS_CM_SERVICE)
+ return false;
+ call->type = &afs_SRXYFSCB_CallBack;
+ return true;
default:
return false;
}
}
/*
+ * Record a probe to the cache manager from a server.
+ */
+static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server)
+{
+ _enter("");
+
+ if (test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags) &&
+ !test_bit(AFS_SERVER_FL_PROBING, &server->flags)) {
+ if (server->cm_epoch == call->epoch)
+ return 0;
+
+ if (!server->probe.said_rebooted) {
+ pr_notice("kAFS: FS rebooted %pU\n", &server->uuid);
+ server->probe.said_rebooted = true;
+ }
+ }
+
+ spin_lock(&server->probe_lock);
+
+ if (!test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) {
+ server->cm_epoch = call->epoch;
+ server->probe.cm_epoch = call->epoch;
+ goto out;
+ }
+
+ if (server->probe.cm_probed &&
+ call->epoch != server->probe.cm_epoch &&
+ !server->probe.said_inconsistent) {
+ pr_notice("kAFS: FS endpoints inconsistent %pU\n",
+ &server->uuid);
+ server->probe.said_inconsistent = true;
+ }
+
+ if (!server->probe.cm_probed || call->epoch == server->cm_epoch)
+ server->probe.cm_epoch = server->cm_epoch;
+
+out:
+ server->probe.cm_probed = true;
+ spin_unlock(&server->probe_lock);
+ return 0;
+}
+
+/*
+ * Find the server record by peer address and record a probe to the cache
+ * manager from a server.
+ */
+static int afs_find_cm_server_by_peer(struct afs_call *call)
+{
+ struct sockaddr_rxrpc srx;
+ struct afs_server *server;
+
+ rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
+
+ server = afs_find_server(call->net, &srx);
+ if (!server) {
+ trace_afs_cm_no_server(call, &srx);
+ return 0;
+ }
+
+ call->cm_server = server;
+ return afs_record_cm_probe(call, server);
+}
+
+/*
+ * Find the server record by server UUID and record a probe to the cache
+ * manager from a server.
+ */
+static int afs_find_cm_server_by_uuid(struct afs_call *call,
+ struct afs_uuid *uuid)
+{
+ struct afs_server *server;
+
+ rcu_read_lock();
+ server = afs_find_server_by_uuid(call->net, call->request);
+ rcu_read_unlock();
+ if (!server) {
+ trace_afs_cm_no_server_u(call, call->request);
+ return 0;
+ }
+
+ call->cm_server = server;
+ return afs_record_cm_probe(call, server);
+}
+
+/*
* Clean up a cache manager call.
*/
static void afs_cm_destructor(struct afs_call *call)
@@ -168,7 +274,6 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
static int afs_deliver_cb_callback(struct afs_call *call)
{
struct afs_callback_break *cb;
- struct sockaddr_rxrpc srx;
__be32 *bp;
int ret, loop;
@@ -176,32 +281,32 @@ static int afs_deliver_cb_callback(struct afs_call *call)
switch (call->unmarshall) {
case 0:
- call->offset = 0;
+ afs_extract_to_tmp(call);
call->unmarshall++;
/* extract the FID array and its count in two steps */
case 1:
_debug("extract FID count");
- ret = afs_extract_data(call, &call->tmp, 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("FID count: %u", call->count);
if (call->count > AFSCBMAX)
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_cb_fid_count);
call->buffer = kmalloc(array3_size(call->count, 3, 4),
GFP_KERNEL);
if (!call->buffer)
return -ENOMEM;
- call->offset = 0;
+ afs_extract_to_buf(call, call->count * 3 * 4);
call->unmarshall++;
case 2:
_debug("extract FID array");
- ret = afs_extract_data(call, call->buffer,
- call->count * 3 * 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
@@ -218,59 +323,46 @@ static int afs_deliver_cb_callback(struct afs_call *call)
cb->fid.vid = ntohl(*bp++);
cb->fid.vnode = ntohl(*bp++);
cb->fid.unique = ntohl(*bp++);
- cb->cb.type = AFSCM_CB_UNTYPED;
}
- call->offset = 0;
+ afs_extract_to_tmp(call);
call->unmarshall++;
/* extract the callback array and its count in two steps */
case 3:
_debug("extract CB count");
- ret = afs_extract_data(call, &call->tmp, 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
call->count2 = ntohl(call->tmp);
_debug("CB count: %u", call->count2);
if (call->count2 != call->count && call->count2 != 0)
- return afs_protocol_error(call, -EBADMSG);
- call->offset = 0;
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_cb_count);
+ call->_iter = &call->iter;
+ iov_iter_discard(&call->iter, READ, call->count2 * 3 * 4);
call->unmarshall++;
case 4:
- _debug("extract CB array");
- ret = afs_extract_data(call, call->buffer,
- call->count2 * 3 * 4, false);
+ _debug("extract discard %zu/%u",
+ iov_iter_count(&call->iter), call->count2 * 3 * 4);
+
+ ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
- _debug("unmarshall CB array");
- cb = call->request;
- bp = call->buffer;
- for (loop = call->count2; loop > 0; loop--, cb++) {
- cb->cb.version = ntohl(*bp++);
- cb->cb.expiry = ntohl(*bp++);
- cb->cb.type = ntohl(*bp++);
- }
-
- call->offset = 0;
call->unmarshall++;
case 5:
break;
}
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
- return -EIO;
+ return afs_io_error(call, afs_io_error_cm_reply);
/* we'll need the file server record as that tells us which set of
* vnodes to operate upon */
- rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
- call->cm_server = afs_find_server(call->net, &srx);
- if (!call->cm_server)
- trace_afs_cm_no_server(call, &srx);
-
- return afs_queue_call_work(call);
+ return afs_find_cm_server_by_peer(call);
}
/*
@@ -294,24 +386,18 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
*/
static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
{
- struct sockaddr_rxrpc srx;
int ret;
_enter("");
- rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
-
- ret = afs_extract_data(call, NULL, 0, false);
+ afs_extract_discard(call, 0);
+ ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
/* we'll need the file server record as that tells us which set of
* vnodes to operate upon */
- call->cm_server = afs_find_server(call->net, &srx);
- if (!call->cm_server)
- trace_afs_cm_no_server(call, &srx);
-
- return afs_queue_call_work(call);
+ return afs_find_cm_server_by_peer(call);
}
/*
@@ -330,16 +416,15 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
switch (call->unmarshall) {
case 0:
- call->offset = 0;
call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
if (!call->buffer)
return -ENOMEM;
+ afs_extract_to_buf(call, 11 * sizeof(__be32));
call->unmarshall++;
case 1:
_debug("extract UUID");
- ret = afs_extract_data(call, call->buffer,
- 11 * sizeof(__be32), false);
+ ret = afs_extract_data(call, false);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
@@ -362,7 +447,6 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
for (loop = 0; loop < 6; loop++)
r->node[loop] = ntohl(b[loop + 5]);
- call->offset = 0;
call->unmarshall++;
case 2:
@@ -370,17 +454,11 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
}
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
- return -EIO;
+ return afs_io_error(call, afs_io_error_cm_reply);
/* we'll need the file server record as that tells us which set of
* vnodes to operate upon */
- rcu_read_lock();
- call->cm_server = afs_find_server_by_uuid(call->net, call->request);
- rcu_read_unlock();
- if (!call->cm_server)
- trace_afs_cm_no_server_u(call, call->request);
-
- return afs_queue_call_work(call);
+ return afs_find_cm_server_by_uuid(call, call->request);
}
/*
@@ -405,14 +483,14 @@ static int afs_deliver_cb_probe(struct afs_call *call)
_enter("");
- ret = afs_extract_data(call, NULL, 0, false);
+ afs_extract_discard(call, 0);
+ ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
- return -EIO;
-
- return afs_queue_call_work(call);
+ return afs_io_error(call, afs_io_error_cm_reply);
+ return afs_find_cm_server_by_peer(call);
}
/*
@@ -453,16 +531,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
switch (call->unmarshall) {
case 0:
- call->offset = 0;
call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
if (!call->buffer)
return -ENOMEM;
+ afs_extract_to_buf(call, 11 * sizeof(__be32));
call->unmarshall++;
case 1:
_debug("extract UUID");
- ret = afs_extract_data(call, call->buffer,
- 11 * sizeof(__be32), false);
+ ret = afs_extract_data(call, false);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
@@ -485,7 +562,6 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
for (loop = 0; loop < 6; loop++)
r->node[loop] = ntohl(b[loop + 5]);
- call->offset = 0;
call->unmarshall++;
case 2:
@@ -493,9 +569,8 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
}
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
- return -EIO;
-
- return afs_queue_call_work(call);
+ return afs_io_error(call, afs_io_error_cm_reply);
+ return afs_find_cm_server_by_uuid(call, call->request);
}
/*
@@ -570,12 +645,88 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
_enter("");
- ret = afs_extract_data(call, NULL, 0, false);
+ afs_extract_discard(call, 0);
+ ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
- return -EIO;
+ return afs_io_error(call, afs_io_error_cm_reply);
+ return afs_find_cm_server_by_peer(call);
+}
+
+/*
+ * deliver request data to a YFS CB.CallBack call
+ */
+static int afs_deliver_yfs_cb_callback(struct afs_call *call)
+{
+ struct afs_callback_break *cb;
+ struct yfs_xdr_YFSFid *bp;
+ size_t size;
+ int ret, loop;
+
+ _enter("{%u}", call->unmarshall);
+
+ switch (call->unmarshall) {
+ case 0:
+ afs_extract_to_tmp(call);
+ call->unmarshall++;
+
+ /* extract the FID array and its count in two steps */
+ case 1:
+ _debug("extract FID count");
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ call->count = ntohl(call->tmp);
+ _debug("FID count: %u", call->count);
+ if (call->count > YFSCBMAX)
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_cb_fid_count);
+
+ size = array_size(call->count, sizeof(struct yfs_xdr_YFSFid));
+ call->buffer = kmalloc(size, GFP_KERNEL);
+ if (!call->buffer)
+ return -ENOMEM;
+ afs_extract_to_buf(call, size);
+ call->unmarshall++;
+
+ case 2:
+ _debug("extract FID array");
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
+
+ _debug("unmarshall FID array");
+ call->request = kcalloc(call->count,
+ sizeof(struct afs_callback_break),
+ GFP_KERNEL);
+ if (!call->request)
+ return -ENOMEM;
+
+ cb = call->request;
+ bp = call->buffer;
+ for (loop = call->count; loop > 0; loop--, cb++) {
+ cb->fid.vid = xdr_to_u64(bp->volume);
+ cb->fid.vnode = xdr_to_u64(bp->vnode.lo);
+ cb->fid.vnode_hi = ntohl(bp->vnode.hi);
+ cb->fid.unique = ntohl(bp->vnode.unique);
+ bp++;
+ }
+
+ afs_extract_to_tmp(call);
+ call->unmarshall++;
+
+ case 3:
+ break;
+ }
+
+ if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
+ return afs_io_error(call, afs_io_error_cm_reply);
- return afs_queue_call_work(call);
+ /* We'll need the file server record as that tells us which set of
+ * vnodes to operate upon.
+ */
+ return afs_find_cm_server_by_peer(call);
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 855bf2b79fed..43dea3b00c29 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -138,6 +138,7 @@ static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page,
ntohs(dbuf->blocks[tmp].hdr.magic));
trace_afs_dir_check_failed(dvnode, off, i_size);
kunmap(page);
+ trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
goto error;
}
@@ -190,9 +191,11 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
retry:
i_size = i_size_read(&dvnode->vfs_inode);
if (i_size < 2048)
- return ERR_PTR(-EIO);
- if (i_size > 2048 * 1024)
+ return ERR_PTR(afs_bad(dvnode, afs_file_error_dir_small));
+ if (i_size > 2048 * 1024) {
+ trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
return ERR_PTR(-EFBIG);
+ }
_enter("%llu", i_size);
@@ -315,7 +318,8 @@ content_has_grown:
/*
* deal with one block in an AFS directory
*/
-static int afs_dir_iterate_block(struct dir_context *ctx,
+static int afs_dir_iterate_block(struct afs_vnode *dvnode,
+ struct dir_context *ctx,
union afs_xdr_dir_block *block,
unsigned blkoff)
{
@@ -365,7 +369,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
" (len %u/%zu)",
blkoff / sizeof(union afs_xdr_dir_block),
offset, next, tmp, nlen);
- return -EIO;
+ return afs_bad(dvnode, afs_file_error_dir_over_end);
}
if (!(block->hdr.bitmap[next / 8] &
(1 << (next % 8)))) {
@@ -373,7 +377,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
" %u unmarked extension (len %u/%zu)",
blkoff / sizeof(union afs_xdr_dir_block),
offset, next, tmp, nlen);
- return -EIO;
+ return afs_bad(dvnode, afs_file_error_dir_unmarked_ext);
}
_debug("ENT[%zu.%u]: ext %u/%zu",
@@ -442,7 +446,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
*/
page = req->pages[blkoff / PAGE_SIZE];
if (!page) {
- ret = -EIO;
+ ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
break;
}
mark_page_accessed(page);
@@ -455,7 +459,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
do {
dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) /
sizeof(union afs_xdr_dir_block)];
- ret = afs_dir_iterate_block(ctx, dblock, blkoff);
+ ret = afs_dir_iterate_block(dvnode, ctx, dblock, blkoff);
if (ret != 1) {
kunmap(page);
goto out;
@@ -548,7 +552,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
}
*fid = cookie.fid;
- _leave(" = 0 { vn=%u u=%u }", fid->vnode, fid->unique);
+ _leave(" = 0 { vn=%llu u=%u }", fid->vnode, fid->unique);
return 0;
}
@@ -826,7 +830,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
struct key *key;
int ret;
- _enter("{%x:%u},%p{%pd},",
+ _enter("{%llx:%llu},%p{%pd},",
dvnode->fid.vid, dvnode->fid.vnode, dentry, dentry);
ASSERTCMP(d_inode(dentry), ==, NULL);
@@ -896,7 +900,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (d_really_is_positive(dentry)) {
vnode = AFS_FS_I(d_inode(dentry));
- _enter("{v={%x:%u} n=%pd fl=%lx},",
+ _enter("{v={%llx:%llu} n=%pd fl=%lx},",
vnode->fid.vid, vnode->fid.vnode, dentry,
vnode->flags);
} else {
@@ -965,7 +969,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
/* if the vnode ID has changed, then the dirent points to a
* different file */
if (fid.vnode != vnode->fid.vnode) {
- _debug("%pd: dirent changed [%u != %u]",
+ _debug("%pd: dirent changed [%llu != %llu]",
dentry, fid.vnode,
vnode->fid.vnode);
goto not_found;
@@ -1085,6 +1089,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
vnode = AFS_FS_I(inode);
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
+ afs_vnode_commit_status(fc, vnode, 0);
d_add(new_dentry, inode);
}
@@ -1104,7 +1109,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
mode |= S_IFDIR;
- _enter("{%x:%u},{%pd},%ho",
+ _enter("{%llx:%llu},{%pd},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
key = afs_request_key(dvnode->volume->cell);
@@ -1169,12 +1174,12 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
static int afs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct afs_fs_cursor fc;
- struct afs_vnode *dvnode = AFS_FS_I(dir);
+ struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
struct key *key;
u64 data_version = dvnode->status.data_version;
int ret;
- _enter("{%x:%u},{%pd}",
+ _enter("{%llx:%llu},{%pd}",
dvnode->fid.vid, dvnode->fid.vnode, dentry);
key = afs_request_key(dvnode->volume->cell);
@@ -1183,11 +1188,19 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
goto error;
}
+ /* Try to make sure we have a callback promise on the victim. */
+ if (d_really_is_positive(dentry)) {
+ vnode = AFS_FS_I(d_inode(dentry));
+ ret = afs_validate(vnode, key);
+ if (ret < 0)
+ goto error_key;
+ }
+
ret = -ERESTARTSYS;
if (afs_begin_vnode_operation(&fc, dvnode, key)) {
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_remove(&fc, dentry->d_name.name, true,
+ afs_fs_remove(&fc, vnode, dentry->d_name.name, true,
data_version);
}
@@ -1201,6 +1214,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
}
}
+error_key:
key_put(key);
error:
return ret;
@@ -1231,7 +1245,9 @@ static int afs_dir_remove_link(struct dentry *dentry, struct key *key,
if (d_really_is_positive(dentry)) {
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- if (dir_valid) {
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
+ /* Already done */
+ } else if (dir_valid) {
drop_nlink(&vnode->vfs_inode);
if (vnode->vfs_inode.i_nlink == 0) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
@@ -1260,13 +1276,13 @@ static int afs_dir_remove_link(struct dentry *dentry, struct key *key,
static int afs_unlink(struct inode *dir, struct dentry *dentry)
{
struct afs_fs_cursor fc;
- struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
+ struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
struct key *key;
unsigned long d_version = (unsigned long)dentry->d_fsdata;
u64 data_version = dvnode->status.data_version;
int ret;
- _enter("{%x:%u},{%pd}",
+ _enter("{%llx:%llu},{%pd}",
dvnode->fid.vid, dvnode->fid.vnode, dentry);
if (dentry->d_name.len >= AFSNAMEMAX)
@@ -1290,7 +1306,18 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
if (afs_begin_vnode_operation(&fc, dvnode, key)) {
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_remove(&fc, dentry->d_name.name, false,
+
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) &&
+ !test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) {
+ yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name,
+ data_version);
+ if (fc.ac.error != -ECONNABORTED ||
+ fc.ac.abort_code != RXGEN_OPCODE)
+ continue;
+ set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags);
+ }
+
+ afs_fs_remove(&fc, vnode, dentry->d_name.name, false,
data_version);
}
@@ -1330,7 +1357,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
mode |= S_IFREG;
- _enter("{%x:%u},{%pd},%ho,",
+ _enter("{%llx:%llu},{%pd},%ho,",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
ret = -ENAMETOOLONG;
@@ -1393,7 +1420,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
dvnode = AFS_FS_I(dir);
data_version = dvnode->status.data_version;
- _enter("{%x:%u},{%x:%u},{%pd}",
+ _enter("{%llx:%llu},{%llx:%llu},{%pd}",
vnode->fid.vid, vnode->fid.vnode,
dvnode->fid.vid, dvnode->fid.vnode,
dentry);
@@ -1464,7 +1491,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
u64 data_version = dvnode->status.data_version;
int ret;
- _enter("{%x:%u},{%pd},%s",
+ _enter("{%llx:%llu},{%pd},%s",
dvnode->fid.vid, dvnode->fid.vnode, dentry,
content);
@@ -1540,7 +1567,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
orig_data_version = orig_dvnode->status.data_version;
new_data_version = new_dvnode->status.data_version;
- _enter("{%x:%u},{%x:%u},{%x:%u},{%pd}",
+ _enter("{%llx:%llu},{%llx:%llu},{%llx:%llu},{%pd}",
orig_dvnode->fid.vid, orig_dvnode->fid.vnode,
vnode->fid.vid, vnode->fid.vnode,
new_dvnode->fid.vid, new_dvnode->fid.vnode,
@@ -1607,7 +1634,7 @@ static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags)
{
struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
- _enter("{{%x:%u}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
+ _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
set_page_private(page, 0);
ClearPagePrivate(page);
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index f29c6dade7f6..a9ba81ddf154 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -46,7 +46,7 @@ static int afs_probe_cell_name(struct dentry *dentry)
return 0;
}
- ret = dns_query("afsdb", name, len, "", NULL, NULL);
+ ret = dns_query("afsdb", name, len, "srv=1", NULL, NULL);
if (ret == -ENODATA)
ret = -EDESTADDRREQ;
return ret;
@@ -62,7 +62,7 @@ struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
struct inode *inode;
int ret = -ENOENT;
- _enter("%p{%pd}, {%x:%u}",
+ _enter("%p{%pd}, {%llx:%llu}",
dentry, dentry, vnode->fid.vid, vnode->fid.vnode);
if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 7d4f26198573..d6bc3f5d784b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -121,7 +121,7 @@ int afs_open(struct inode *inode, struct file *file)
struct key *key;
int ret;
- _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
+ _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key)) {
@@ -170,7 +170,7 @@ int afs_release(struct inode *inode, struct file *file)
struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_file *af = file->private_data;
- _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
+ _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
if ((file->f_mode & FMODE_WRITE))
return vfs_fsync(file, 0);
@@ -228,7 +228,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
struct afs_fs_cursor fc;
int ret;
- _enter("%s{%x:%u.%u},%x,,,",
+ _enter("%s{%llx:%llu.%u},%x,,,",
vnode->volume->name,
vnode->fid.vid,
vnode->fid.vnode,
@@ -634,7 +634,7 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
unsigned long priv;
- _enter("{{%x:%u}[%lu],%lx},%x",
+ _enter("{{%llx:%llu}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
gfp_flags);
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index dc62d15a964b..0568fd986821 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -29,7 +29,7 @@ static const struct file_lock_operations afs_lock_ops = {
*/
void afs_lock_may_be_available(struct afs_vnode *vnode)
{
- _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+ _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
}
@@ -76,7 +76,7 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
struct afs_fs_cursor fc;
int ret;
- _enter("%s{%x:%u.%u},%x,%u",
+ _enter("%s{%llx:%llu.%u},%x,%u",
vnode->volume->name,
vnode->fid.vid,
vnode->fid.vnode,
@@ -107,7 +107,7 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
struct afs_fs_cursor fc;
int ret;
- _enter("%s{%x:%u.%u},%x",
+ _enter("%s{%llx:%llu.%u},%x",
vnode->volume->name,
vnode->fid.vid,
vnode->fid.vnode,
@@ -138,7 +138,7 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
struct afs_fs_cursor fc;
int ret;
- _enter("%s{%x:%u.%u},%x",
+ _enter("%s{%llx:%llu.%u},%x",
vnode->volume->name,
vnode->fid.vid,
vnode->fid.vnode,
@@ -175,7 +175,7 @@ void afs_lock_work(struct work_struct *work)
struct key *key;
int ret;
- _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+ _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
spin_lock(&vnode->lock);
@@ -192,7 +192,7 @@ again:
ret = afs_release_lock(vnode, vnode->lock_key);
if (ret < 0)
printk(KERN_WARNING "AFS:"
- " Failed to release lock on {%x:%x} error %d\n",
+ " Failed to release lock on {%llx:%llx} error %d\n",
vnode->fid.vid, vnode->fid.vnode, ret);
spin_lock(&vnode->lock);
@@ -229,7 +229,7 @@ again:
key_put(key);
if (ret < 0)
- pr_warning("AFS: Failed to extend lock on {%x:%x} error %d\n",
+ pr_warning("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
vnode->fid.vid, vnode->fid.vnode, ret);
spin_lock(&vnode->lock);
@@ -430,7 +430,7 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
struct key *key = afs_file_key(file);
int ret;
- _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
+ _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
/* only whole-file locks are supported */
if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
@@ -582,7 +582,7 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl)
struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
int ret;
- _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
+ _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
/* Flush all pending writes before doing anything with locks. */
vfs_fsync(file, 0);
@@ -639,7 +639,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
{
struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
- _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
+ _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
vnode->fid.vid, vnode->fid.vnode, cmd,
fl->fl_type, fl->fl_flags,
(long long) fl->fl_start, (long long) fl->fl_end);
@@ -662,7 +662,7 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
- _enter("{%x:%u},%d,{t=%x,fl=%x}",
+ _enter("{%llx:%llu},%d,{t=%x,fl=%x}",
vnode->fid.vid, vnode->fid.vnode, cmd,
fl->fl_type, fl->fl_flags);
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
new file mode 100644
index 000000000000..d049cb459742
--- /dev/null
+++ b/fs/afs/fs_probe.c
@@ -0,0 +1,270 @@
+/* AFS fileserver probing
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "afs_fs.h"
+#include "internal.h"
+#include "protocol_yfs.h"
+
+static bool afs_fs_probe_done(struct afs_server *server)
+{
+ if (!atomic_dec_and_test(&server->probe_outstanding))
+ return false;
+
+ wake_up_var(&server->probe_outstanding);
+ clear_bit_unlock(AFS_SERVER_FL_PROBING, &server->flags);
+ wake_up_bit(&server->flags, AFS_SERVER_FL_PROBING);
+ return true;
+}
+
+/*
+ * Process the result of probing a fileserver. This is called after successful
+ * or failed delivery of an FS.GetCapabilities operation.
+ */
+void afs_fileserver_probe_result(struct afs_call *call)
+{
+ struct afs_addr_list *alist = call->alist;
+ struct afs_server *server = call->reply[0];
+ unsigned int server_index = (long)call->reply[1];
+ unsigned int index = call->addr_ix;
+ unsigned int rtt = UINT_MAX;
+ bool have_result = false;
+ u64 _rtt;
+ int ret = call->error;
+
+ _enter("%pU,%u", &server->uuid, index);
+
+ spin_lock(&server->probe_lock);
+
+ switch (ret) {
+ case 0:
+ server->probe.error = 0;
+ goto responded;
+ case -ECONNABORTED:
+ if (!server->probe.responded) {
+ server->probe.abort_code = call->abort_code;
+ server->probe.error = ret;
+ }
+ goto responded;
+ case -ENOMEM:
+ case -ENONET:
+ server->probe.local_failure = true;
+ afs_io_error(call, afs_io_error_fs_probe_fail);
+ goto out;
+ case -ECONNRESET: /* Responded, but call expired. */
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
+ case -ECONNREFUSED:
+ case -ETIMEDOUT:
+ case -ETIME:
+ default:
+ clear_bit(index, &alist->responded);
+ set_bit(index, &alist->failed);
+ if (!server->probe.responded &&
+ (server->probe.error == 0 ||
+ server->probe.error == -ETIMEDOUT ||
+ server->probe.error == -ETIME))
+ server->probe.error = ret;
+ afs_io_error(call, afs_io_error_fs_probe_fail);
+ goto out;
+ }
+
+responded:
+ set_bit(index, &alist->responded);
+ clear_bit(index, &alist->failed);
+
+ if (call->service_id == YFS_FS_SERVICE) {
+ server->probe.is_yfs = true;
+ set_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
+ alist->addrs[index].srx_service = call->service_id;
+ } else {
+ server->probe.not_yfs = true;
+ if (!server->probe.is_yfs) {
+ clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
+ alist->addrs[index].srx_service = call->service_id;
+ }
+ }
+
+ /* Get the RTT and scale it to fit into a 32-bit value that represents
+ * over a minute of time so that we can access it with one instruction
+ * on a 32-bit system.
+ */
+ _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
+ _rtt /= 64;
+ rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt;
+ if (rtt < server->probe.rtt) {
+ server->probe.rtt = rtt;
+ alist->preferred = index;
+ have_result = true;
+ }
+
+ smp_wmb(); /* Set rtt before responded. */
+ server->probe.responded = true;
+ set_bit(AFS_SERVER_FL_PROBED, &server->flags);
+out:
+ spin_unlock(&server->probe_lock);
+
+ _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
+ server_index, index, &alist->addrs[index].transport,
+ (unsigned int)rtt, ret);
+
+ have_result |= afs_fs_probe_done(server);
+ if (have_result) {
+ server->probe.have_result = true;
+ wake_up_var(&server->probe.have_result);
+ wake_up_all(&server->probe_wq);
+ }
+}
+
+/*
+ * Probe all of a fileserver's addresses to find out the best route and to
+ * query its capabilities.
+ */
+static int afs_do_probe_fileserver(struct afs_net *net,
+ struct afs_server *server,
+ struct key *key,
+ unsigned int server_index)
+{
+ struct afs_addr_cursor ac = {
+ .index = 0,
+ };
+ int ret;
+
+ _enter("%pU", &server->uuid);
+
+ read_lock(&server->fs_lock);
+ ac.alist = rcu_dereference_protected(server->addresses,
+ lockdep_is_held(&server->fs_lock));
+ read_unlock(&server->fs_lock);
+
+ atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
+ memset(&server->probe, 0, sizeof(server->probe));
+ server->probe.rtt = UINT_MAX;
+
+ for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
+ ret = afs_fs_get_capabilities(net, server, &ac, key, server_index,
+ true);
+ if (ret != -EINPROGRESS) {
+ afs_fs_probe_done(server);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Send off probes to all unprobed servers.
+ */
+int afs_probe_fileservers(struct afs_net *net, struct key *key,
+ struct afs_server_list *list)
+{
+ struct afs_server *server;
+ int i, ret;
+
+ for (i = 0; i < list->nr_servers; i++) {
+ server = list->servers[i].server;
+ if (test_bit(AFS_SERVER_FL_PROBED, &server->flags))
+ continue;
+
+ if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags)) {
+ ret = afs_do_probe_fileserver(net, server, key, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Wait for the first as-yet untried fileserver to respond.
+ */
+int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
+{
+ struct wait_queue_entry *waits;
+ struct afs_server *server;
+ unsigned int rtt = UINT_MAX;
+ bool have_responders = false;
+ int pref = -1, i;
+
+ _enter("%u,%lx", slist->nr_servers, untried);
+
+ /* Only wait for servers that have a probe outstanding. */
+ for (i = 0; i < slist->nr_servers; i++) {
+ if (test_bit(i, &untried)) {
+ server = slist->servers[i].server;
+ if (!test_bit(AFS_SERVER_FL_PROBING, &server->flags))
+ __clear_bit(i, &untried);
+ if (server->probe.responded)
+ have_responders = true;
+ }
+ }
+ if (have_responders || !untried)
+ return 0;
+
+ waits = kmalloc(array_size(slist->nr_servers, sizeof(*waits)), GFP_KERNEL);
+ if (!waits)
+ return -ENOMEM;
+
+ for (i = 0; i < slist->nr_servers; i++) {
+ if (test_bit(i, &untried)) {
+ server = slist->servers[i].server;
+ init_waitqueue_entry(&waits[i], current);
+ add_wait_queue(&server->probe_wq, &waits[i]);
+ }
+ }
+
+ for (;;) {
+ bool still_probing = false;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ for (i = 0; i < slist->nr_servers; i++) {
+ if (test_bit(i, &untried)) {
+ server = slist->servers[i].server;
+ if (server->probe.responded)
+ goto stop;
+ if (test_bit(AFS_SERVER_FL_PROBING, &server->flags))
+ still_probing = true;
+ }
+ }
+
+ if (!still_probing || unlikely(signal_pending(current)))
+ goto stop;
+ schedule();
+ }
+
+stop:
+ set_current_state(TASK_RUNNING);
+
+ for (i = 0; i < slist->nr_servers; i++) {
+ if (test_bit(i, &untried)) {
+ server = slist->servers[i].server;
+ if (server->probe.responded &&
+ server->probe.rtt < rtt) {
+ pref = i;
+ rtt = server->probe.rtt;
+ }
+
+ remove_wait_queue(&server->probe_wq, &waits[i]);
+ }
+ }
+
+ kfree(waits);
+
+ if (pref == -1 && signal_pending(current))
+ return -ERESTARTSYS;
+
+ if (pref >= 0)
+ slist->preferred = pref;
+ return 0;
+}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 50929cb91732..ca08c83168f5 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -17,15 +17,10 @@
#include "internal.h"
#include "afs_fs.h"
#include "xdr_fs.h"
+#include "protocol_yfs.h"
static const struct afs_fid afs_zero_fid;
-/*
- * We need somewhere to discard into in case the server helpfully returns more
- * than we asked for in FS.FetchData{,64}.
- */
-static u8 afs_discard_buffer[64];
-
static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
{
call->cbi = afs_get_cb_interest(cbi);
@@ -75,8 +70,7 @@ void afs_update_inode_from_status(struct afs_vnode *vnode,
struct timespec64 t;
umode_t mode;
- t.tv_sec = status->mtime_client;
- t.tv_nsec = 0;
+ t = status->mtime_client;
vnode->vfs_inode.i_ctime = t;
vnode->vfs_inode.i_mtime = t;
vnode->vfs_inode.i_atime = t;
@@ -96,7 +90,7 @@ void afs_update_inode_from_status(struct afs_vnode *vnode,
if (!(flags & AFS_VNODE_NOT_YET_SET)) {
if (expected_version &&
*expected_version != status->data_version) {
- _debug("vnode modified %llx on {%x:%u} [exp %llx]",
+ _debug("vnode modified %llx on {%llx:%llu} [exp %llx]",
(unsigned long long) status->data_version,
vnode->fid.vid, vnode->fid.vnode,
(unsigned long long) *expected_version);
@@ -170,7 +164,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
if (type != status->type &&
vnode &&
!test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
- pr_warning("Vnode %x:%x:%x changed type %u to %u\n",
+ pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n",
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
@@ -200,8 +194,10 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
EXTRACT_M(mode);
EXTRACT_M(group);
- status->mtime_client = ntohl(xdr->mtime_client);
- status->mtime_server = ntohl(xdr->mtime_server);
+ status->mtime_client.tv_sec = ntohl(xdr->mtime_client);
+ status->mtime_client.tv_nsec = 0;
+ status->mtime_server.tv_sec = ntohl(xdr->mtime_server);
+ status->mtime_server.tv_nsec = 0;
status->lock_count = ntohl(xdr->lock_count);
size = (u64)ntohl(xdr->size_lo);
@@ -233,7 +229,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
bad:
xdr_dump_bad(*_bp);
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
}
/*
@@ -273,7 +269,7 @@ static void xdr_decode_AFSCallBack(struct afs_call *call,
write_seqlock(&vnode->cb_lock);
- if (call->cb_break == afs_cb_break_sum(vnode, cbi)) {
+ if (!afs_cb_is_broken(call->cb_break, vnode, cbi)) {
vnode->cb_version = ntohl(*bp++);
cb_expiry = ntohl(*bp++);
vnode->cb_type = ntohl(*bp++);
@@ -293,13 +289,19 @@ static void xdr_decode_AFSCallBack(struct afs_call *call,
*_bp = bp;
}
-static void xdr_decode_AFSCallBack_raw(const __be32 **_bp,
+static ktime_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
+{
+ return ktime_add_ns(call->reply_time, expiry * NSEC_PER_SEC);
+}
+
+static void xdr_decode_AFSCallBack_raw(struct afs_call *call,
+ const __be32 **_bp,
struct afs_callback *cb)
{
const __be32 *bp = *_bp;
cb->version = ntohl(*bp++);
- cb->expiry = ntohl(*bp++);
+ cb->expires_at = xdr_decode_expiry(call, ntohl(*bp++));
cb->type = ntohl(*bp++);
*_bp = bp;
}
@@ -311,14 +313,18 @@ static void xdr_decode_AFSVolSync(const __be32 **_bp,
struct afs_volsync *volsync)
{
const __be32 *bp = *_bp;
+ u32 creation;
- volsync->creation = ntohl(*bp++);
+ creation = ntohl(*bp++);
bp++; /* spare2 */
bp++; /* spare3 */
bp++; /* spare4 */
bp++; /* spare5 */
bp++; /* spare6 */
*_bp = bp;
+
+ if (volsync)
+ volsync->creation = creation;
}
/*
@@ -379,6 +385,8 @@ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp,
vs->blocks_in_use = ntohl(*bp++);
vs->part_blocks_avail = ntohl(*bp++);
vs->part_max_blocks = ntohl(*bp++);
+ vs->vol_copy_date = 0;
+ vs->vol_backup_date = 0;
*_bp = bp;
}
@@ -395,16 +403,16 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
if (ret < 0)
return ret;
- _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+ _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- if (afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
+ ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
xdr_decode_AFSCallBack(call, vnode, &bp);
- if (call->reply[1])
- xdr_decode_AFSVolSync(&bp, call->reply[1]);
+ xdr_decode_AFSVolSync(&bp, call->reply[1]);
_leave(" = 0 [done]");
return 0;
@@ -431,7 +439,10 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- _enter(",%x,{%x:%u},,",
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_fetch_file_status(fc, volsync, new_inode);
+
+ _enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus_vnode,
@@ -445,6 +456,7 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
call->reply[0] = vnode;
call->reply[1] = volsync;
call->expected_version = new_inode ? 1 : vnode->status.data_version;
+ call->want_reply_time = true;
/* marshall the parameters */
bp = call->request;
@@ -468,139 +480,117 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
struct afs_read *req = call->reply[2];
const __be32 *bp;
unsigned int size;
- void *buffer;
int ret;
- _enter("{%u,%zu/%u;%llu/%llu}",
- call->unmarshall, call->offset, call->count,
- req->remain, req->actual_len);
+ _enter("{%u,%zu/%llu}",
+ call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
switch (call->unmarshall) {
case 0:
req->actual_len = 0;
- call->offset = 0;
+ req->index = 0;
+ req->offset = req->pos & (PAGE_SIZE - 1);
call->unmarshall++;
- if (call->operation_ID != FSFETCHDATA64) {
- call->unmarshall++;
- goto no_msw;
+ if (call->operation_ID == FSFETCHDATA64) {
+ afs_extract_to_tmp64(call);
+ } else {
+ call->tmp_u = htonl(0);
+ afs_extract_to_tmp(call);
}
- /* extract the upper part of the returned data length of an
- * FSFETCHDATA64 op (which should always be 0 using this
- * client) */
- case 1:
- _debug("extract data length (MSW)");
- ret = afs_extract_data(call, &call->tmp, 4, true);
- if (ret < 0)
- return ret;
-
- req->actual_len = ntohl(call->tmp);
- req->actual_len <<= 32;
- call->offset = 0;
- call->unmarshall++;
-
- no_msw:
/* extract the returned data length */
- case 2:
+ case 1:
_debug("extract data length");
- ret = afs_extract_data(call, &call->tmp, 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
- req->actual_len |= ntohl(call->tmp);
+ req->actual_len = be64_to_cpu(call->tmp64);
_debug("DATA length: %llu", req->actual_len);
-
- req->remain = req->actual_len;
- call->offset = req->pos & (PAGE_SIZE - 1);
- req->index = 0;
- if (req->actual_len == 0)
+ req->remain = min(req->len, req->actual_len);
+ if (req->remain == 0)
goto no_more_data;
+
call->unmarshall++;
begin_page:
ASSERTCMP(req->index, <, req->nr_pages);
- if (req->remain > PAGE_SIZE - call->offset)
- size = PAGE_SIZE - call->offset;
+ if (req->remain > PAGE_SIZE - req->offset)
+ size = PAGE_SIZE - req->offset;
else
size = req->remain;
- call->count = call->offset + size;
- ASSERTCMP(call->count, <=, PAGE_SIZE);
- req->remain -= size;
+ call->bvec[0].bv_len = size;
+ call->bvec[0].bv_offset = req->offset;
+ call->bvec[0].bv_page = req->pages[req->index];
+ iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
+ ASSERTCMP(size, <=, PAGE_SIZE);
/* extract the returned data */
- case 3:
- _debug("extract data %llu/%llu %zu/%u",
- req->remain, req->actual_len, call->offset, call->count);
+ case 2:
+ _debug("extract data %zu/%llu",
+ iov_iter_count(&call->iter), req->remain);
- buffer = kmap(req->pages[req->index]);
- ret = afs_extract_data(call, buffer, call->count, true);
- kunmap(req->pages[req->index]);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
- if (call->offset == PAGE_SIZE) {
+ req->remain -= call->bvec[0].bv_len;
+ req->offset += call->bvec[0].bv_len;
+ ASSERTCMP(req->offset, <=, PAGE_SIZE);
+ if (req->offset == PAGE_SIZE) {
+ req->offset = 0;
if (req->page_done)
req->page_done(call, req);
req->index++;
- if (req->remain > 0) {
- call->offset = 0;
- if (req->index >= req->nr_pages) {
- call->unmarshall = 4;
- goto begin_discard;
- }
+ if (req->remain > 0)
goto begin_page;
- }
}
- goto no_more_data;
+
+ ASSERTCMP(req->remain, ==, 0);
+ if (req->actual_len <= req->len)
+ goto no_more_data;
/* Discard any excess data the server gave us */
- begin_discard:
- case 4:
- size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain);
- call->count = size;
- _debug("extract discard %llu/%llu %zu/%u",
- req->remain, req->actual_len, call->offset, call->count);
-
- call->offset = 0;
- ret = afs_extract_data(call, afs_discard_buffer, call->count, true);
- req->remain -= call->offset;
+ iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
+ call->unmarshall = 3;
+ case 3:
+ _debug("extract discard %zu/%llu",
+ iov_iter_count(&call->iter), req->actual_len - req->len);
+
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
- if (req->remain > 0)
- goto begin_discard;
no_more_data:
- call->offset = 0;
- call->unmarshall = 5;
+ call->unmarshall = 4;
+ afs_extract_to_buf(call, (21 + 3 + 6) * 4);
/* extract the metadata */
- case 5:
- ret = afs_extract_data(call, call->buffer,
- (21 + 3 + 6) * 4, false);
+ case 4:
+ ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
bp = call->buffer;
- if (afs_decode_status(call, &bp, &vnode->status, vnode,
- &vnode->status.data_version, req) < 0)
- return afs_protocol_error(call, -EBADMSG);
+ ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+ &vnode->status.data_version, req);
+ if (ret < 0)
+ return ret;
xdr_decode_AFSCallBack(call, vnode, &bp);
- if (call->reply[1])
- xdr_decode_AFSVolSync(&bp, call->reply[1]);
+ xdr_decode_AFSVolSync(&bp, call->reply[1]);
- call->offset = 0;
call->unmarshall++;
- case 6:
+ case 5:
break;
}
for (; req->index < req->nr_pages; req->index++) {
- if (call->count < PAGE_SIZE)
+ if (req->offset < PAGE_SIZE)
zero_user_segment(req->pages[req->index],
- call->count, PAGE_SIZE);
+ req->offset, PAGE_SIZE);
if (req->page_done)
req->page_done(call, req);
- call->count = 0;
+ req->offset = 0;
}
_leave(" = 0 [done]");
@@ -653,6 +643,7 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, struct afs_read *req)
call->reply[1] = NULL; /* volsync */
call->reply[2] = req;
call->expected_version = vnode->status.data_version;
+ call->want_reply_time = true;
/* marshall the parameters */
bp = call->request;
@@ -682,6 +673,9 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_fetch_data(fc, req);
+
if (upper_32_bits(req->pos) ||
upper_32_bits(req->len) ||
upper_32_bits(req->pos + req->len))
@@ -698,6 +692,7 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
call->reply[1] = NULL; /* volsync */
call->reply[2] = req;
call->expected_version = vnode->status.data_version;
+ call->want_reply_time = true;
/* marshall the parameters */
bp = call->request;
@@ -733,11 +728,14 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
xdr_decode_AFSFid(&bp, call->reply[1]);
- if (afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL) < 0 ||
- afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
- xdr_decode_AFSCallBack_raw(&bp, call->reply[3]);
+ ret = afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_AFSCallBack_raw(call, &bp, call->reply[3]);
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -778,6 +776,15 @@ int afs_fs_create(struct afs_fs_cursor *fc,
size_t namesz, reqsz, padsz;
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)){
+ if (S_ISDIR(mode))
+ return yfs_fs_make_dir(fc, name, mode, current_data_version,
+ newfid, newstatus, newcb);
+ else
+ return yfs_fs_create_file(fc, name, mode, current_data_version,
+ newfid, newstatus, newcb);
+ }
+
_enter("");
namesz = strlen(name);
@@ -796,6 +803,7 @@ int afs_fs_create(struct afs_fs_cursor *fc,
call->reply[2] = newstatus;
call->reply[3] = newcb;
call->expected_version = current_data_version + 1;
+ call->want_reply_time = true;
/* marshall the parameters */
bp = call->request;
@@ -839,9 +847,10 @@ static int afs_deliver_fs_remove(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- if (afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
+ ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -868,15 +877,18 @@ static const struct afs_call_type afs_RXFSRemoveDir = {
/*
* remove a file or directory
*/
-int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir,
- u64 current_data_version)
+int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
+ const char *name, bool isdir, u64 current_data_version)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_remove(fc, vnode, name, isdir, current_data_version);
+
_enter("");
namesz = strlen(name);
@@ -890,15 +902,16 @@ int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir,
return -ENOMEM;
call->key = fc->key;
- call->reply[0] = vnode;
+ call->reply[0] = dvnode;
+ call->reply[1] = vnode;
call->expected_version = current_data_version + 1;
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(dvnode->fid.vid);
+ *bp++ = htonl(dvnode->fid.vnode);
+ *bp++ = htonl(dvnode->fid.unique);
*bp++ = htonl(namesz);
memcpy(bp, name, namesz);
bp = (void *) bp + namesz;
@@ -908,7 +921,7 @@ int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir,
}
afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
+ trace_afs_make_fs_call(call, &dvnode->fid);
return afs_make_call(&fc->ac, call, GFP_NOFS, false);
}
@@ -929,10 +942,13 @@ static int afs_deliver_fs_link(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- if (afs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL) < 0 ||
- afs_decode_status(call, &bp, &dvnode->status, dvnode,
- &call->expected_version, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
+ ret = afs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ ret = afs_decode_status(call, &bp, &dvnode->status, dvnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -961,6 +977,9 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
size_t namesz, reqsz, padsz;
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_link(fc, vnode, name, current_data_version);
+
_enter("");
namesz = strlen(name);
@@ -1016,10 +1035,13 @@ static int afs_deliver_fs_symlink(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
xdr_decode_AFSFid(&bp, call->reply[1]);
- if (afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL) ||
- afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
+ ret = afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -1052,6 +1074,10 @@ int afs_fs_symlink(struct afs_fs_cursor *fc,
size_t namesz, reqsz, padsz, c_namesz, c_padsz;
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_symlink(fc, name, contents, current_data_version,
+ newfid, newstatus);
+
_enter("");
namesz = strlen(name);
@@ -1122,13 +1148,16 @@ static int afs_deliver_fs_rename(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- if (afs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode,
- &call->expected_version, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
- if (new_dvnode != orig_dvnode &&
- afs_decode_status(call, &bp, &new_dvnode->status, new_dvnode,
- &call->expected_version_2, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
+ ret = afs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ if (new_dvnode != orig_dvnode) {
+ ret = afs_decode_status(call, &bp, &new_dvnode->status, new_dvnode,
+ &call->expected_version_2, NULL);
+ if (ret < 0)
+ return ret;
+ }
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -1161,6 +1190,12 @@ int afs_fs_rename(struct afs_fs_cursor *fc,
size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz;
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_rename(fc, orig_name,
+ new_dvnode, new_name,
+ current_orig_data_version,
+ current_new_data_version);
+
_enter("");
o_namesz = strlen(orig_name);
@@ -1231,9 +1266,10 @@ static int afs_deliver_fs_store_data(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- if (afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
+ ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
afs_pages_written_back(vnode, call);
@@ -1273,7 +1309,7 @@ static int afs_fs_store_data64(struct afs_fs_cursor *fc,
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- _enter(",%x,{%x:%u},,",
+ _enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
call = afs_alloc_flat_call(net, &afs_RXFSStoreData64,
@@ -1330,7 +1366,10 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
loff_t size, pos, i_size;
__be32 *bp;
- _enter(",%x,{%x:%u},,",
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_store_data(fc, mapping, first, last, offset, to);
+
+ _enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
size = (loff_t)to - (loff_t)offset;
@@ -1407,9 +1446,10 @@ static int afs_deliver_fs_store_status(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- if (afs_decode_status(call, &bp, &vnode->status, vnode,
- &call->expected_version, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
+ ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -1451,7 +1491,7 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- _enter(",%x,{%x:%u},,",
+ _enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
@@ -1498,7 +1538,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- _enter(",%x,{%x:%u},,",
+ _enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
@@ -1544,10 +1584,13 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_setattr(fc, attr);
+
if (attr->ia_valid & ATTR_SIZE)
return afs_fs_setattr_size(fc, attr);
- _enter(",%x,{%x:%u},,",
+ _enter(",%x,{%llx:%llu},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
call = afs_alloc_flat_call(net, &afs_RXFSStoreStatus,
@@ -1581,164 +1624,114 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
{
const __be32 *bp;
char *p;
+ u32 size;
int ret;
_enter("{%u}", call->unmarshall);
switch (call->unmarshall) {
case 0:
- call->offset = 0;
call->unmarshall++;
+ afs_extract_to_buf(call, 12 * 4);
/* extract the returned status record */
case 1:
_debug("extract status");
- ret = afs_extract_data(call, call->buffer,
- 12 * 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
bp = call->buffer;
xdr_decode_AFSFetchVolumeStatus(&bp, call->reply[1]);
- call->offset = 0;
call->unmarshall++;
+ afs_extract_to_tmp(call);
/* extract the volume name length */
case 2:
- ret = afs_extract_data(call, &call->tmp, 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("volname length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG);
- call->offset = 0;
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_volname_len);
+ size = (call->count + 3) & ~3; /* It's padded */
+ afs_extract_begin(call, call->reply[2], size);
call->unmarshall++;
/* extract the volume name */
case 3:
_debug("extract volname");
- if (call->count > 0) {
- ret = afs_extract_data(call, call->reply[2],
- call->count, true);
- if (ret < 0)
- return ret;
- }
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
p = call->reply[2];
p[call->count] = 0;
_debug("volname '%s'", p);
-
- call->offset = 0;
+ afs_extract_to_tmp(call);
call->unmarshall++;
- /* extract the volume name padding */
- if ((call->count & 3) == 0) {
- call->unmarshall++;
- goto no_volname_padding;
- }
- call->count = 4 - (call->count & 3);
-
- case 4:
- ret = afs_extract_data(call, call->buffer,
- call->count, true);
- if (ret < 0)
- return ret;
-
- call->offset = 0;
- call->unmarshall++;
- no_volname_padding:
-
/* extract the offline message length */
- case 5:
- ret = afs_extract_data(call, &call->tmp, 4, true);
+ case 4:
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("offline msg length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG);
- call->offset = 0;
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_offline_msg_len);
+ size = (call->count + 3) & ~3; /* It's padded */
+ afs_extract_begin(call, call->reply[2], size);
call->unmarshall++;
/* extract the offline message */
- case 6:
+ case 5:
_debug("extract offline");
- if (call->count > 0) {
- ret = afs_extract_data(call, call->reply[2],
- call->count, true);
- if (ret < 0)
- return ret;
- }
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
p = call->reply[2];
p[call->count] = 0;
_debug("offline '%s'", p);
- call->offset = 0;
+ afs_extract_to_tmp(call);
call->unmarshall++;
- /* extract the offline message padding */
- if ((call->count & 3) == 0) {
- call->unmarshall++;
- goto no_offline_padding;
- }
- call->count = 4 - (call->count & 3);
-
- case 7:
- ret = afs_extract_data(call, call->buffer,
- call->count, true);
- if (ret < 0)
- return ret;
-
- call->offset = 0;
- call->unmarshall++;
- no_offline_padding:
-
/* extract the message of the day length */
- case 8:
- ret = afs_extract_data(call, &call->tmp, 4, true);
+ case 6:
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
call->count = ntohl(call->tmp);
_debug("motd length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG);
- call->offset = 0;
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_motd_len);
+ size = (call->count + 3) & ~3; /* It's padded */
+ afs_extract_begin(call, call->reply[2], size);
call->unmarshall++;
/* extract the message of the day */
- case 9:
+ case 7:
_debug("extract motd");
- if (call->count > 0) {
- ret = afs_extract_data(call, call->reply[2],
- call->count, true);
- if (ret < 0)
- return ret;
- }
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
p = call->reply[2];
p[call->count] = 0;
_debug("motd '%s'", p);
- call->offset = 0;
call->unmarshall++;
- /* extract the message of the day padding */
- call->count = (4 - (call->count & 3)) & 3;
-
- case 10:
- ret = afs_extract_data(call, call->buffer,
- call->count, false);
- if (ret < 0)
- return ret;
-
- call->offset = 0;
- call->unmarshall++;
- case 11:
+ case 8:
break;
}
@@ -1778,6 +1771,9 @@ int afs_fs_get_volume_status(struct afs_fs_cursor *fc,
__be32 *bp;
void *tmpbuf;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_get_volume_status(fc, vs);
+
_enter("");
tmpbuf = kmalloc(AFSOPAQUEMAX, GFP_KERNEL);
@@ -1867,6 +1863,9 @@ int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_set_lock(fc, type);
+
_enter("");
call = afs_alloc_flat_call(net, &afs_RXFSSetLock, 5 * 4, 6 * 4);
@@ -1899,6 +1898,9 @@ int afs_fs_extend_lock(struct afs_fs_cursor *fc)
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_extend_lock(fc);
+
_enter("");
call = afs_alloc_flat_call(net, &afs_RXFSExtendLock, 4 * 4, 6 * 4);
@@ -1930,6 +1932,9 @@ int afs_fs_release_lock(struct afs_fs_cursor *fc)
struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_release_lock(fc);
+
_enter("");
call = afs_alloc_flat_call(net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4);
@@ -2004,19 +2009,16 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
u32 count;
int ret;
- _enter("{%u,%zu/%u}", call->unmarshall, call->offset, call->count);
+ _enter("{%u,%zu}", call->unmarshall, iov_iter_count(&call->iter));
-again:
switch (call->unmarshall) {
case 0:
- call->offset = 0;
+ afs_extract_to_tmp(call);
call->unmarshall++;
/* Extract the capabilities word count */
case 1:
- ret = afs_extract_data(call, &call->tmp,
- 1 * sizeof(__be32),
- true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
@@ -2024,24 +2026,17 @@ again:
call->count = count;
call->count2 = count;
- call->offset = 0;
+ iov_iter_discard(&call->iter, READ, count * sizeof(__be32));
call->unmarshall++;
/* Extract capabilities words */
case 2:
- count = min(call->count, 16U);
- ret = afs_extract_data(call, call->buffer,
- count * sizeof(__be32),
- call->count > 16);
+ ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
/* TODO: Examine capabilities */
- call->count -= count;
- if (call->count > 0)
- goto again;
- call->offset = 0;
call->unmarshall++;
break;
}
@@ -2050,6 +2045,14 @@ again:
return 0;
}
+static void afs_destroy_fs_get_capabilities(struct afs_call *call)
+{
+ struct afs_server *server = call->reply[0];
+
+ afs_put_server(call->net, server);
+ afs_flat_call_destructor(call);
+}
+
/*
* FS.GetCapabilities operation type
*/
@@ -2057,7 +2060,8 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
.name = "FS.GetCapabilities",
.op = afs_FS_GetCapabilities,
.deliver = afs_deliver_fs_get_capabilities,
- .destructor = afs_flat_call_destructor,
+ .done = afs_fileserver_probe_result,
+ .destructor = afs_destroy_fs_get_capabilities,
};
/*
@@ -2067,7 +2071,9 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
int afs_fs_get_capabilities(struct afs_net *net,
struct afs_server *server,
struct afs_addr_cursor *ac,
- struct key *key)
+ struct key *key,
+ unsigned int server_index,
+ bool async)
{
struct afs_call *call;
__be32 *bp;
@@ -2079,6 +2085,10 @@ int afs_fs_get_capabilities(struct afs_net *net,
return -ENOMEM;
call->key = key;
+ call->reply[0] = afs_get_server(server);
+ call->reply[1] = (void *)(long)server_index;
+ call->upgrade = true;
+ call->want_reply_time = true;
/* marshall the parameters */
bp = call->request;
@@ -2086,7 +2096,7 @@ int afs_fs_get_capabilities(struct afs_net *net,
/* Can't take a ref on server */
trace_afs_make_fs_call(call, NULL);
- return afs_make_call(ac, call, GFP_NOFS, false);
+ return afs_make_call(ac, call, GFP_NOFS, async);
}
/*
@@ -2097,7 +2107,7 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call)
struct afs_file_status *status = call->reply[1];
struct afs_callback *callback = call->reply[2];
struct afs_volsync *volsync = call->reply[3];
- struct afs_vnode *vnode = call->reply[0];
+ struct afs_fid *fid = call->reply[0];
const __be32 *bp;
int ret;
@@ -2105,21 +2115,16 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call)
if (ret < 0)
return ret;
- _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+ _enter("{%llx:%llu}", fid->vid, fid->vnode);
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- afs_decode_status(call, &bp, status, vnode,
- &call->expected_version, NULL);
- callback[call->count].version = ntohl(bp[0]);
- callback[call->count].expiry = ntohl(bp[1]);
- callback[call->count].type = ntohl(bp[2]);
- if (vnode)
- xdr_decode_AFSCallBack(call, vnode, &bp);
- else
- bp += 3;
- if (volsync)
- xdr_decode_AFSVolSync(&bp, volsync);
+ ret = afs_decode_status(call, &bp, status, NULL,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_AFSCallBack_raw(call, &bp, callback);
+ xdr_decode_AFSVolSync(&bp, volsync);
_leave(" = 0 [done]");
return 0;
@@ -2148,7 +2153,10 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc,
struct afs_call *call;
__be32 *bp;
- _enter(",%x,{%x:%u},,",
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_fetch_status(fc, net, fid, status, callback, volsync);
+
+ _enter(",%x,{%llx:%llu},,",
key_serial(fc->key), fid->vid, fid->vnode);
call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
@@ -2158,11 +2166,12 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc,
}
call->key = fc->key;
- call->reply[0] = NULL; /* vnode for fid[0] */
+ call->reply[0] = fid;
call->reply[1] = status;
call->reply[2] = callback;
call->reply[3] = volsync;
call->expected_version = 1; /* vnode->status.data_version */
+ call->want_reply_time = true;
/* marshall the parameters */
bp = call->request;
@@ -2193,38 +2202,40 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
switch (call->unmarshall) {
case 0:
- call->offset = 0;
+ afs_extract_to_tmp(call);
call->unmarshall++;
/* Extract the file status count and array in two steps */
case 1:
_debug("extract status count");
- ret = afs_extract_data(call, &call->tmp, 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
tmp = ntohl(call->tmp);
_debug("status count: %u/%u", tmp, call->count2);
if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_ibulkst_count);
call->count = 0;
call->unmarshall++;
more_counts:
- call->offset = 0;
+ afs_extract_to_buf(call, 21 * sizeof(__be32));
case 2:
_debug("extract status array %u", call->count);
- ret = afs_extract_data(call, call->buffer, 21 * 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
bp = call->buffer;
statuses = call->reply[1];
- if (afs_decode_status(call, &bp, &statuses[call->count],
- call->count == 0 ? vnode : NULL,
- NULL, NULL) < 0)
- return afs_protocol_error(call, -EBADMSG);
+ ret = afs_decode_status(call, &bp, &statuses[call->count],
+ call->count == 0 ? vnode : NULL,
+ NULL, NULL);
+ if (ret < 0)
+ return ret;
call->count++;
if (call->count < call->count2)
@@ -2232,27 +2243,28 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->count = 0;
call->unmarshall++;
- call->offset = 0;
+ afs_extract_to_tmp(call);
/* Extract the callback count and array in two steps */
case 3:
_debug("extract CB count");
- ret = afs_extract_data(call, &call->tmp, 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
tmp = ntohl(call->tmp);
_debug("CB count: %u", tmp);
if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_ibulkst_cb_count);
call->count = 0;
call->unmarshall++;
more_cbs:
- call->offset = 0;
+ afs_extract_to_buf(call, 3 * sizeof(__be32));
case 4:
_debug("extract CB array");
- ret = afs_extract_data(call, call->buffer, 3 * 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
@@ -2260,7 +2272,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
bp = call->buffer;
callbacks = call->reply[2];
callbacks[call->count].version = ntohl(bp[0]);
- callbacks[call->count].expiry = ntohl(bp[1]);
+ callbacks[call->count].expires_at = xdr_decode_expiry(call, ntohl(bp[1]));
callbacks[call->count].type = ntohl(bp[2]);
statuses = call->reply[1];
if (call->count == 0 && vnode && statuses[0].abort_code == 0)
@@ -2269,19 +2281,17 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
if (call->count < call->count2)
goto more_cbs;
- call->offset = 0;
+ afs_extract_to_buf(call, 6 * sizeof(__be32));
call->unmarshall++;
case 5:
- ret = afs_extract_data(call, call->buffer, 6 * 4, false);
+ ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
bp = call->buffer;
- if (call->reply[3])
- xdr_decode_AFSVolSync(&bp, call->reply[3]);
+ xdr_decode_AFSVolSync(&bp, call->reply[3]);
- call->offset = 0;
call->unmarshall++;
case 6:
@@ -2317,7 +2327,11 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
__be32 *bp;
int i;
- _enter(",%x,{%x:%u},%u",
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+ return yfs_fs_inline_bulk_status(fc, net, fids, statuses, callbacks,
+ nr_fids, volsync);
+
+ _enter(",%x,{%llx:%llu},%u",
key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids);
call = afs_alloc_flat_call(net, &afs_RXFSInlineBulkStatus,
@@ -2334,6 +2348,7 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
call->reply[2] = callbacks;
call->reply[3] = volsync;
call->count2 = nr_fids;
+ call->want_reply_time = true;
/* marshall the parameters */
bp = call->request;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 479b7fdda124..4c6d8e1112c2 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -82,7 +82,7 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key)
default:
printk("kAFS: AFS vnode with undefined type\n");
read_sequnlock_excl(&vnode->cb_lock);
- return afs_protocol_error(NULL, -EBADMSG);
+ return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type);
}
inode->i_blocks = 0;
@@ -100,7 +100,7 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
struct afs_fs_cursor fc;
int ret;
- _enter("%s,{%x:%u.%u,S=%lx}",
+ _enter("%s,{%llx:%llu.%u,S=%lx}",
vnode->volume->name,
vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
vnode->flags);
@@ -127,9 +127,9 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
int afs_iget5_test(struct inode *inode, void *opaque)
{
struct afs_iget_data *data = opaque;
+ struct afs_vnode *vnode = AFS_FS_I(inode);
- return inode->i_ino == data->fid.vnode &&
- inode->i_generation == data->fid.unique;
+ return memcmp(&vnode->fid, &data->fid, sizeof(data->fid)) == 0;
}
/*
@@ -150,11 +150,14 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
struct afs_iget_data *data = opaque;
struct afs_vnode *vnode = AFS_FS_I(inode);
- inode->i_ino = data->fid.vnode;
- inode->i_generation = data->fid.unique;
vnode->fid = data->fid;
vnode->volume = data->volume;
+ /* YFS supports 96-bit vnode IDs, but Linux only supports
+ * 64-bit inode numbers.
+ */
+ inode->i_ino = data->fid.vnode;
+ inode->i_generation = data->fid.unique;
return 0;
}
@@ -193,7 +196,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
return ERR_PTR(-ENOMEM);
}
- _debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }",
+ _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }",
inode, inode->i_ino, data.fid.vid, data.fid.vnode,
data.fid.unique);
@@ -252,8 +255,8 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
key.vnode_id = vnode->fid.vnode;
key.unique = vnode->fid.unique;
- key.vnode_id_ext[0] = 0;
- key.vnode_id_ext[1] = 0;
+ key.vnode_id_ext[0] = vnode->fid.vnode >> 32;
+ key.vnode_id_ext[1] = vnode->fid.vnode_hi;
aux.data_version = vnode->status.data_version;
vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
@@ -277,7 +280,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
struct inode *inode;
int ret;
- _enter(",{%x:%u.%u},,", fid->vid, fid->vnode, fid->unique);
+ _enter(",{%llx:%llu.%u},,", fid->vid, fid->vnode, fid->unique);
as = sb->s_fs_info;
data.volume = as->volume;
@@ -289,7 +292,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
return ERR_PTR(-ENOMEM);
}
- _debug("GOT INODE %p { vl=%x vn=%x, u=%x }",
+ _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }",
inode, fid->vid, fid->vnode, fid->unique);
vnode = AFS_FS_I(inode);
@@ -314,11 +317,11 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
* didn't give us a callback) */
vnode->cb_version = 0;
vnode->cb_type = 0;
- vnode->cb_expires_at = 0;
+ vnode->cb_expires_at = ktime_get();
} else {
vnode->cb_version = cb->version;
vnode->cb_type = cb->type;
- vnode->cb_expires_at = cb->expiry;
+ vnode->cb_expires_at = cb->expires_at;
vnode->cb_interest = afs_get_cb_interest(cbi);
set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
}
@@ -352,7 +355,7 @@ bad_inode:
*/
void afs_zap_data(struct afs_vnode *vnode)
{
- _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+ _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
#ifdef CONFIG_AFS_FSCACHE
fscache_invalidate(vnode->cache);
@@ -382,7 +385,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
bool valid = false;
int ret;
- _enter("{v={%x:%u} fl=%lx},%x",
+ _enter("{v={%llx:%llu} fl=%lx},%x",
vnode->fid.vid, vnode->fid.vnode, vnode->flags,
key_serial(key));
@@ -501,7 +504,7 @@ void afs_evict_inode(struct inode *inode)
vnode = AFS_FS_I(inode);
- _enter("{%x:%u.%d}",
+ _enter("{%llx:%llu.%d}",
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique);
@@ -550,7 +553,7 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
struct key *key;
int ret;
- _enter("{%x:%u},{n=%pd},%x",
+ _enter("{%llx:%llu},{n=%pd},%x",
vnode->fid.vid, vnode->fid.vnode, dentry,
attr->ia_valid);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 72de1f157d20..5da3b09b7518 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -22,6 +22,7 @@
#include <linux/backing-dev.h>
#include <linux/uuid.h>
#include <linux/mm_types.h>
+#include <linux/dns_resolver.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h>
@@ -75,10 +76,13 @@ struct afs_addr_list {
u32 version; /* Version */
unsigned char max_addrs;
unsigned char nr_addrs;
- unsigned char index; /* Address currently in use */
+ unsigned char preferred; /* Preferred address */
unsigned char nr_ipv4; /* Number of IPv4 addresses */
+ enum dns_record_source source:8;
+ enum dns_lookup_status status:8;
unsigned long probed; /* Mask of servers that have been probed */
- unsigned long yfs; /* Mask of servers that are YFS */
+ unsigned long failed; /* Mask of addrs that failed locally/ICMP */
+ unsigned long responded; /* Mask of addrs that responded */
struct sockaddr_rxrpc addrs[];
#define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8))
};
@@ -88,6 +92,7 @@ struct afs_addr_list {
*/
struct afs_call {
const struct afs_call_type *type; /* type of call */
+ struct afs_addr_list *alist; /* Address is alist[addr_ix] */
wait_queue_head_t waitq; /* processes awaiting completion */
struct work_struct async_work; /* async I/O processor */
struct work_struct work; /* actual work processor */
@@ -98,16 +103,22 @@ struct afs_call {
struct afs_cb_interest *cbi; /* Callback interest for server used */
void *request; /* request data (first part) */
struct address_space *mapping; /* Pages being written from */
+ struct iov_iter iter; /* Buffer iterator */
+ struct iov_iter *_iter; /* Iterator currently in use */
+ union { /* Convenience for ->iter */
+ struct kvec kvec[1];
+ struct bio_vec bvec[1];
+ };
void *buffer; /* reply receive buffer */
void *reply[4]; /* Where to put the reply */
pgoff_t first; /* first page in mapping to deal with */
pgoff_t last; /* last page in mapping to deal with */
- size_t offset; /* offset into received data store */
atomic_t usage;
enum afs_call_state state;
spinlock_t state_lock;
int error; /* error code */
u32 abort_code; /* Remote abort ID or 0 */
+ u32 epoch;
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
unsigned first_offset; /* offset into mapping[first] */
@@ -117,19 +128,28 @@ struct afs_call {
unsigned count2; /* count used in unmarshalling */
};
unsigned char unmarshall; /* unmarshalling phase */
+ unsigned char addr_ix; /* Address in ->alist */
bool incoming; /* T if incoming call */
bool send_pages; /* T if data from mapping should be sent */
bool need_attention; /* T if RxRPC poked us */
bool async; /* T if asynchronous */
bool ret_reply0; /* T if should return reply[0] on success */
bool upgrade; /* T to request service upgrade */
+ bool want_reply_time; /* T if want reply_time */
u16 service_id; /* Actual service ID (after upgrade) */
unsigned int debug_id; /* Trace ID */
u32 operation_ID; /* operation ID for an incoming call */
u32 count; /* count for use in unmarshalling */
- __be32 tmp; /* place to extract temporary data */
+ union { /* place to extract temporary data */
+ struct {
+ __be32 tmp_u;
+ __be32 tmp;
+ } __attribute__((packed));
+ __be64 tmp64;
+ };
afs_dataversion_t expected_version; /* Updated version expected from store */
afs_dataversion_t expected_version_2; /* 2nd updated version expected from store */
+ ktime_t reply_time; /* Time of first reply packet */
};
struct afs_call_type {
@@ -146,6 +166,9 @@ struct afs_call_type {
/* Work function */
void (*work)(struct work_struct *work);
+
+ /* Call done function (gets called immediately on success or failure) */
+ void (*done)(struct afs_call *call);
};
/*
@@ -185,6 +208,7 @@ struct afs_read {
refcount_t usage;
unsigned int index; /* Which page we're reading into */
unsigned int nr_pages;
+ unsigned int offset; /* offset into current page */
void (*page_done)(struct afs_call *, struct afs_read *);
struct page **pages;
struct page *array[];
@@ -343,13 +367,70 @@ struct afs_cell {
rwlock_t proc_lock;
/* VL server list. */
- rwlock_t vl_addrs_lock; /* Lock on vl_addrs */
- struct afs_addr_list __rcu *vl_addrs; /* List of VL servers */
+ rwlock_t vl_servers_lock; /* Lock on vl_servers */
+ struct afs_vlserver_list __rcu *vl_servers;
+
u8 name_len; /* Length of name */
char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */
};
/*
+ * Volume Location server record.
+ */
+struct afs_vlserver {
+ struct rcu_head rcu;
+ struct afs_addr_list __rcu *addresses; /* List of addresses for this VL server */
+ unsigned long flags;
+#define AFS_VLSERVER_FL_PROBED 0 /* The VL server has been probed */
+#define AFS_VLSERVER_FL_PROBING 1 /* VL server is being probed */
+#define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */
+ rwlock_t lock; /* Lock on addresses */
+ atomic_t usage;
+
+ /* Probe state */
+ wait_queue_head_t probe_wq;
+ atomic_t probe_outstanding;
+ spinlock_t probe_lock;
+ struct {
+ unsigned int rtt; /* RTT as ktime/64 */
+ u32 abort_code;
+ short error;
+ bool have_result;
+ bool responded:1;
+ bool is_yfs:1;
+ bool not_yfs:1;
+ bool local_failure:1;
+ } probe;
+
+ u16 port;
+ u16 name_len; /* Length of name */
+ char name[]; /* Server name, case-flattened */
+};
+
+/*
+ * Weighted list of Volume Location servers.
+ */
+struct afs_vlserver_entry {
+ u16 priority; /* Preference (as SRV) */
+ u16 weight; /* Weight (as SRV) */
+ enum dns_record_source source:8;
+ enum dns_lookup_status status:8;
+ struct afs_vlserver *server;
+};
+
+struct afs_vlserver_list {
+ struct rcu_head rcu;
+ atomic_t usage;
+ u8 nr_servers;
+ u8 index; /* Server currently in use */
+ u8 preferred; /* Preferred server */
+ enum dns_record_source source:8;
+ enum dns_lookup_status status:8;
+ rwlock_t lock;
+ struct afs_vlserver_entry servers[];
+};
+
+/*
* Cached VLDB entry.
*
* This is pointed to by cell->vldb_entries, indexed by name.
@@ -403,8 +484,12 @@ struct afs_server {
#define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */
#define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */
#define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */
+#define AFS_SERVER_FL_IS_YFS 9 /* Server is YFS not AFS */
+#define AFS_SERVER_FL_NO_RM2 10 /* Fileserver doesn't support YFS.RemoveFile2 */
+#define AFS_SERVER_FL_HAVE_EPOCH 11 /* ->epoch is valid */
atomic_t usage;
u32 addr_version; /* Address list version */
+ u32 cm_epoch; /* Server RxRPC epoch */
/* file service access */
rwlock_t fs_lock; /* access lock */
@@ -413,6 +498,26 @@ struct afs_server {
struct hlist_head cb_volumes; /* List of volume interests on this server */
unsigned cb_s_break; /* Break-everything counter. */
rwlock_t cb_break_lock; /* Volume finding lock */
+
+ /* Probe state */
+ wait_queue_head_t probe_wq;
+ atomic_t probe_outstanding;
+ spinlock_t probe_lock;
+ struct {
+ unsigned int rtt; /* RTT as ktime/64 */
+ u32 abort_code;
+ u32 cm_epoch;
+ short error;
+ bool have_result;
+ bool responded:1;
+ bool is_yfs:1;
+ bool not_yfs:1;
+ bool local_failure:1;
+ bool no_epoch:1;
+ bool cm_probed:1;
+ bool said_rebooted:1;
+ bool said_inconsistent:1;
+ } probe;
};
/*
@@ -447,8 +552,8 @@ struct afs_server_entry {
struct afs_server_list {
refcount_t usage;
- unsigned short nr_servers;
- unsigned short index; /* Server currently in use */
+ unsigned char nr_servers;
+ unsigned char preferred; /* Preferred server */
unsigned short vnovol_mask; /* Servers to be skipped due to VNOVOL */
unsigned int seq; /* Set to ->servers_seq when installed */
rwlock_t lock;
@@ -550,6 +655,15 @@ struct afs_vnode {
afs_callback_type_t cb_type; /* type of callback */
};
+static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
+{
+#ifdef CONFIG_AFS_FSCACHE
+ return vnode->cache;
+#else
+ return NULL;
+#endif
+}
+
/*
* cached security record for one user's attempt to access a vnode
*/
@@ -586,13 +700,31 @@ struct afs_interface {
*/
struct afs_addr_cursor {
struct afs_addr_list *alist; /* Current address list (pins ref) */
- struct sockaddr_rxrpc *addr;
+ unsigned long tried; /* Tried addresses */
+ signed char index; /* Current address */
+ bool responded; /* T if the current address responded */
+ unsigned short nr_iterations; /* Number of address iterations */
+ short error;
u32 abort_code;
- unsigned short start; /* Starting point in alist->addrs[] */
- unsigned short index; /* Wrapping offset from start to current addr */
+};
+
+/*
+ * Cursor for iterating over a set of volume location servers.
+ */
+struct afs_vl_cursor {
+ struct afs_addr_cursor ac;
+ struct afs_cell *cell; /* The cell we're querying */
+ struct afs_vlserver_list *server_list; /* Current server list (pins ref) */
+ struct afs_vlserver *server; /* Server on which this resides */
+ struct key *key; /* Key for the server */
+ unsigned long untried; /* Bitmask of untried servers */
+ short index; /* Current server */
short error;
- bool begun; /* T if we've begun iteration */
- bool responded; /* T if the current address responded */
+ unsigned short flags;
+#define AFS_VL_CURSOR_STOP 0x0001 /* Set to cease iteration */
+#define AFS_VL_CURSOR_RETRY 0x0002 /* Set to do a retry */
+#define AFS_VL_CURSOR_RETRIED 0x0004 /* Set if started a retry */
+ unsigned short nr_iterations; /* Number of server iterations */
};
/*
@@ -604,10 +736,11 @@ struct afs_fs_cursor {
struct afs_server_list *server_list; /* Current server list (pins ref) */
struct afs_cb_interest *cbi; /* Server on which this resides (pins ref) */
struct key *key; /* Key for the server */
+ unsigned long untried; /* Bitmask of untried servers */
unsigned int cb_break; /* cb_break + cb_s_break before the call */
unsigned int cb_break_2; /* cb_break + cb_s_break (2nd vnode) */
- unsigned char start; /* Initial index in server list */
- unsigned char index; /* Number of servers tried beyond start */
+ short index; /* Current server */
+ short error;
unsigned short flags;
#define AFS_FS_CURSOR_STOP 0x0001 /* Set to cease iteration */
#define AFS_FS_CURSOR_VBUSY 0x0002 /* Set if seen VBUSY */
@@ -615,6 +748,7 @@ struct afs_fs_cursor {
#define AFS_FS_CURSOR_VNOVOL 0x0008 /* Set if seen VNOVOL */
#define AFS_FS_CURSOR_CUR_ONLY 0x0010 /* Set if current server only (file lock held) */
#define AFS_FS_CURSOR_NO_VSLEEP 0x0020 /* Set to prevent sleep on VBUSY, VOFFLINE, ... */
+ unsigned short nr_iterations; /* Number of server iterations */
};
/*
@@ -640,12 +774,12 @@ extern struct afs_addr_list *afs_alloc_addrlist(unsigned int,
unsigned short,
unsigned short);
extern void afs_put_addrlist(struct afs_addr_list *);
-extern struct afs_addr_list *afs_parse_text_addrs(const char *, size_t, char,
- unsigned short, unsigned short);
-extern struct afs_addr_list *afs_dns_query(struct afs_cell *, time64_t *);
+extern struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *,
+ const char *, size_t, char,
+ unsigned short, unsigned short);
+extern struct afs_vlserver_list *afs_dns_query(struct afs_cell *, time64_t *);
extern bool afs_iterate_addresses(struct afs_addr_cursor *);
extern int afs_end_cursor(struct afs_addr_cursor *);
-extern int afs_set_vl_cursor(struct afs_addr_cursor *, struct afs_cell *);
extern void afs_merge_fs_addr4(struct afs_addr_list *, __be32, u16);
extern void afs_merge_fs_addr6(struct afs_addr_list *, __be32 *, u16);
@@ -668,6 +802,7 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
* callback.c
*/
extern void afs_init_callback_state(struct afs_server *);
+extern void __afs_break_callback(struct afs_vnode *);
extern void afs_break_callback(struct afs_vnode *);
extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*);
@@ -688,10 +823,13 @@ static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break;
}
-static inline unsigned int afs_cb_break_sum(struct afs_vnode *vnode,
- struct afs_cb_interest *cbi)
+static inline bool afs_cb_is_broken(unsigned int cb_break,
+ const struct afs_vnode *vnode,
+ const struct afs_cb_interest *cbi)
{
- return vnode->cb_break + cbi->server->cb_s_break + vnode->volume->cb_v_break;
+ return !cbi || cb_break != (vnode->cb_break +
+ cbi->server->cb_s_break +
+ vnode->volume->cb_v_break);
}
/*
@@ -781,7 +919,7 @@ extern int afs_fs_give_up_callbacks(struct afs_net *, struct afs_server *);
extern int afs_fs_fetch_data(struct afs_fs_cursor *, struct afs_read *);
extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t, u64,
struct afs_fid *, struct afs_file_status *, struct afs_callback *);
-extern int afs_fs_remove(struct afs_fs_cursor *, const char *, bool, u64);
+extern int afs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, u64);
extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, u64,
struct afs_fid *, struct afs_file_status *);
@@ -797,7 +935,7 @@ extern int afs_fs_release_lock(struct afs_fs_cursor *);
extern int afs_fs_give_up_all_callbacks(struct afs_net *, struct afs_server *,
struct afs_addr_cursor *, struct key *);
extern int afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
- struct afs_addr_cursor *, struct key *);
+ struct afs_addr_cursor *, struct key *, unsigned int, bool);
extern int afs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
struct afs_fid *, struct afs_file_status *,
struct afs_callback *, unsigned int,
@@ -807,6 +945,13 @@ extern int afs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
struct afs_callback *, struct afs_volsync *);
/*
+ * fs_probe.c
+ */
+extern void afs_fileserver_probe_result(struct afs_call *);
+extern int afs_probe_fileservers(struct afs_net *, struct key *, struct afs_server_list *);
+extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long);
+
+/*
* inode.c
*/
extern int afs_fetch_status(struct afs_vnode *, struct key *, bool);
@@ -922,7 +1067,6 @@ extern int __net_init afs_open_socket(struct afs_net *);
extern void __net_exit afs_close_socket(struct afs_net *);
extern void afs_charge_preallocation(struct work_struct *);
extern void afs_put_call(struct afs_call *);
-extern int afs_queue_call_work(struct afs_call *);
extern long afs_make_call(struct afs_addr_cursor *, struct afs_call *, gfp_t, bool);
extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
const struct afs_call_type *,
@@ -930,12 +1074,39 @@ extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
extern void afs_flat_call_destructor(struct afs_call *);
extern void afs_send_empty_reply(struct afs_call *);
extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
-extern int afs_extract_data(struct afs_call *, void *, size_t, bool);
-extern int afs_protocol_error(struct afs_call *, int);
+extern int afs_extract_data(struct afs_call *, bool);
+extern int afs_protocol_error(struct afs_call *, int, enum afs_eproto_cause);
+
+static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size)
+{
+ call->kvec[0].iov_base = buf;
+ call->kvec[0].iov_len = size;
+ iov_iter_kvec(&call->iter, READ, call->kvec, 1, size);
+}
+
+static inline void afs_extract_to_tmp(struct afs_call *call)
+{
+ afs_extract_begin(call, &call->tmp, sizeof(call->tmp));
+}
+
+static inline void afs_extract_to_tmp64(struct afs_call *call)
+{
+ afs_extract_begin(call, &call->tmp64, sizeof(call->tmp64));
+}
+
+static inline void afs_extract_discard(struct afs_call *call, size_t size)
+{
+ iov_iter_discard(&call->iter, READ, size);
+}
+
+static inline void afs_extract_to_buf(struct afs_call *call, size_t size)
+{
+ afs_extract_begin(call, call->buffer, size);
+}
static inline int afs_transfer_reply(struct afs_call *call)
{
- return afs_extract_data(call, call->buffer, call->reply_max, false);
+ return afs_extract_data(call, false);
}
static inline bool afs_check_call_state(struct afs_call *call,
@@ -1012,7 +1183,6 @@ extern void afs_put_server(struct afs_net *, struct afs_server *);
extern void afs_manage_servers(struct work_struct *);
extern void afs_servers_timer(struct timer_list *);
extern void __net_exit afs_purge_servers(struct afs_net *);
-extern bool afs_probe_fileserver(struct afs_fs_cursor *);
extern bool afs_check_server_record(struct afs_fs_cursor *, struct afs_server *);
/*
@@ -1039,14 +1209,51 @@ extern void afs_fs_exit(void);
/*
* vlclient.c
*/
-extern struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *,
- struct afs_addr_cursor *,
- struct key *, const char *, int);
-extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *, struct afs_addr_cursor *,
- struct key *, const uuid_t *);
-extern int afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *, struct key *);
-extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *, struct afs_addr_cursor *,
- struct key *, const uuid_t *);
+extern struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *,
+ const char *, int);
+extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *, const uuid_t *);
+extern int afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *, struct key *,
+ struct afs_vlserver *, unsigned int, bool);
+extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *, const uuid_t *);
+
+/*
+ * vl_probe.c
+ */
+extern void afs_vlserver_probe_result(struct afs_call *);
+extern int afs_send_vl_probes(struct afs_net *, struct key *, struct afs_vlserver_list *);
+extern int afs_wait_for_vl_probes(struct afs_vlserver_list *, unsigned long);
+
+/*
+ * vl_rotate.c
+ */
+extern bool afs_begin_vlserver_operation(struct afs_vl_cursor *,
+ struct afs_cell *, struct key *);
+extern bool afs_select_vlserver(struct afs_vl_cursor *);
+extern bool afs_select_current_vlserver(struct afs_vl_cursor *);
+extern int afs_end_vlserver_operation(struct afs_vl_cursor *);
+
+/*
+ * vlserver_list.c
+ */
+static inline struct afs_vlserver *afs_get_vlserver(struct afs_vlserver *vlserver)
+{
+ atomic_inc(&vlserver->usage);
+ return vlserver;
+}
+
+static inline struct afs_vlserver_list *afs_get_vlserverlist(struct afs_vlserver_list *vllist)
+{
+ if (vllist)
+ atomic_inc(&vllist->usage);
+ return vllist;
+}
+
+extern struct afs_vlserver *afs_alloc_vlserver(const char *, size_t, unsigned short);
+extern void afs_put_vlserver(struct afs_net *, struct afs_vlserver *);
+extern struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int);
+extern void afs_put_vlserverlist(struct afs_net *, struct afs_vlserver_list *);
+extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
+ const void *, size_t);
/*
* volume.c
@@ -1089,6 +1296,36 @@ extern int afs_launder_page(struct page *);
extern const struct xattr_handler *afs_xattr_handlers[];
extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
+/*
+ * yfsclient.c
+ */
+extern int yfs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_volsync *, bool);
+extern int yfs_fs_fetch_data(struct afs_fs_cursor *, struct afs_read *);
+extern int yfs_fs_create_file(struct afs_fs_cursor *, const char *, umode_t, u64,
+ struct afs_fid *, struct afs_file_status *, struct afs_callback *);
+extern int yfs_fs_make_dir(struct afs_fs_cursor *, const char *, umode_t, u64,
+ struct afs_fid *, struct afs_file_status *, struct afs_callback *);
+extern int yfs_fs_remove_file2(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
+extern int yfs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, u64);
+extern int yfs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
+extern int yfs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, u64,
+ struct afs_fid *, struct afs_file_status *);
+extern int yfs_fs_rename(struct afs_fs_cursor *, const char *,
+ struct afs_vnode *, const char *, u64, u64);
+extern int yfs_fs_store_data(struct afs_fs_cursor *, struct address_space *,
+ pgoff_t, pgoff_t, unsigned, unsigned);
+extern int yfs_fs_setattr(struct afs_fs_cursor *, struct iattr *);
+extern int yfs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *);
+extern int yfs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t);
+extern int yfs_fs_extend_lock(struct afs_fs_cursor *);
+extern int yfs_fs_release_lock(struct afs_fs_cursor *);
+extern int yfs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
+ struct afs_fid *, struct afs_file_status *,
+ struct afs_callback *, struct afs_volsync *);
+extern int yfs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
+ struct afs_fid *, struct afs_file_status *,
+ struct afs_callback *, unsigned int,
+ struct afs_volsync *);
/*
* Miscellaneous inline functions.
@@ -1120,6 +1357,17 @@ static inline void afs_check_for_remote_deletion(struct afs_fs_cursor *fc,
}
}
+static inline int afs_io_error(struct afs_call *call, enum afs_io_error where)
+{
+ trace_afs_io_error(call->debug_id, -EIO, where);
+ return -EIO;
+}
+
+static inline int afs_bad(struct afs_vnode *vnode, enum afs_file_error where)
+{
+ trace_afs_file_error(vnode, -EIO, where);
+ return -EIO;
+}
/*****************************************************************************/
/*
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 99fd13500a97..2e51c6994148 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -130,9 +130,10 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
goto error_no_page;
}
- ret = -EIO;
- if (PageError(page))
+ if (PageError(page)) {
+ ret = afs_bad(AFS_FS_I(d_inode(mntpt)), afs_file_error_mntpt);
goto error;
+ }
buf = kmap_atomic(page);
memcpy(devname, buf, size);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 9101f62707af..be2ee3bbd0a9 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -17,6 +17,11 @@
#include <linux/uaccess.h>
#include "internal.h"
+struct afs_vl_seq_net_private {
+ struct seq_net_private seq; /* Must be first */
+ struct afs_vlserver_list *vllist;
+};
+
static inline struct afs_net *afs_seq2net(struct seq_file *m)
{
return afs_net(seq_file_net(m));
@@ -32,16 +37,24 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m)
*/
static int afs_proc_cells_show(struct seq_file *m, void *v)
{
- struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
+ struct afs_vlserver_list *vllist;
+ struct afs_cell *cell;
if (v == SEQ_START_TOKEN) {
/* display header on line 1 */
- seq_puts(m, "USE NAME\n");
+ seq_puts(m, "USE TTL SV NAME\n");
return 0;
}
+ cell = list_entry(v, struct afs_cell, proc_link);
+ vllist = rcu_dereference(cell->vl_servers);
+
/* display one cell per line on subsequent lines */
- seq_printf(m, "%3u %s\n", atomic_read(&cell->usage), cell->name);
+ seq_printf(m, "%3u %6lld %2u %s\n",
+ atomic_read(&cell->usage),
+ cell->dns_expiry - ktime_get_real_seconds(),
+ vllist ? vllist->nr_servers : 0,
+ cell->name);
return 0;
}
@@ -208,7 +221,7 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
return 0;
}
- seq_printf(m, "%3d %08x %s\n",
+ seq_printf(m, "%3d %08llx %s\n",
atomic_read(&vol->usage), vol->vid,
afs_vol_types[vol->type]);
@@ -247,61 +260,102 @@ static const struct seq_operations afs_proc_cell_volumes_ops = {
.show = afs_proc_cell_volumes_show,
};
+static const char *const dns_record_sources[NR__dns_record_source + 1] = {
+ [DNS_RECORD_UNAVAILABLE] = "unav",
+ [DNS_RECORD_FROM_CONFIG] = "cfg",
+ [DNS_RECORD_FROM_DNS_A] = "A",
+ [DNS_RECORD_FROM_DNS_AFSDB] = "AFSDB",
+ [DNS_RECORD_FROM_DNS_SRV] = "SRV",
+ [DNS_RECORD_FROM_NSS] = "nss",
+ [NR__dns_record_source] = "[weird]"
+};
+
+static const char *const dns_lookup_statuses[NR__dns_lookup_status + 1] = {
+ [DNS_LOOKUP_NOT_DONE] = "no-lookup",
+ [DNS_LOOKUP_GOOD] = "good",
+ [DNS_LOOKUP_GOOD_WITH_BAD] = "good/bad",
+ [DNS_LOOKUP_BAD] = "bad",
+ [DNS_LOOKUP_GOT_NOT_FOUND] = "not-found",
+ [DNS_LOOKUP_GOT_LOCAL_FAILURE] = "local-failure",
+ [DNS_LOOKUP_GOT_TEMP_FAILURE] = "temp-failure",
+ [DNS_LOOKUP_GOT_NS_FAILURE] = "ns-failure",
+ [NR__dns_lookup_status] = "[weird]"
+};
+
/*
* Display the list of Volume Location servers we're using for a cell.
*/
static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
{
- struct sockaddr_rxrpc *addr = v;
+ const struct afs_vl_seq_net_private *priv = m->private;
+ const struct afs_vlserver_list *vllist = priv->vllist;
+ const struct afs_vlserver_entry *entry;
+ const struct afs_vlserver *vlserver;
+ const struct afs_addr_list *alist;
+ int i;
- /* display header on line 1 */
- if (v == (void *)1) {
- seq_puts(m, "ADDRESS\n");
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(m, "# source %s, status %s\n",
+ dns_record_sources[vllist->source],
+ dns_lookup_statuses[vllist->status]);
return 0;
}
- /* display one cell per line on subsequent lines */
- seq_printf(m, "%pISp\n", &addr->transport);
+ entry = v;
+ vlserver = entry->server;
+ alist = rcu_dereference(vlserver->addresses);
+
+ seq_printf(m, "%s [p=%hu w=%hu s=%s,%s]:\n",
+ vlserver->name, entry->priority, entry->weight,
+ dns_record_sources[alist ? alist->source : entry->source],
+ dns_lookup_statuses[alist ? alist->status : entry->status]);
+ if (alist) {
+ for (i = 0; i < alist->nr_addrs; i++)
+ seq_printf(m, " %c %pISpc\n",
+ alist->preferred == i ? '>' : '-',
+ &alist->addrs[i].transport);
+ }
return 0;
}
static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
__acquires(rcu)
{
- struct afs_addr_list *alist;
+ struct afs_vl_seq_net_private *priv = m->private;
+ struct afs_vlserver_list *vllist;
struct afs_cell *cell = PDE_DATA(file_inode(m->file));
loff_t pos = *_pos;
rcu_read_lock();
- alist = rcu_dereference(cell->vl_addrs);
+ vllist = rcu_dereference(cell->vl_servers);
+ priv->vllist = vllist;
- /* allow for the header line */
- if (!pos)
- return (void *) 1;
- pos--;
+ if (pos < 0)
+ *_pos = pos = 0;
+ if (pos == 0)
+ return SEQ_START_TOKEN;
- if (!alist || pos >= alist->nr_addrs)
+ if (!vllist || pos - 1 >= vllist->nr_servers)
return NULL;
- return alist->addrs + pos;
+ return &vllist->servers[pos - 1];
}
static void *afs_proc_cell_vlservers_next(struct seq_file *m, void *v,
loff_t *_pos)
{
- struct afs_addr_list *alist;
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
+ struct afs_vl_seq_net_private *priv = m->private;
+ struct afs_vlserver_list *vllist = priv->vllist;
loff_t pos;
- alist = rcu_dereference(cell->vl_addrs);
-
pos = *_pos;
- (*_pos)++;
- if (!alist || pos >= alist->nr_addrs)
+ pos++;
+ *_pos = pos;
+ if (!vllist || pos - 1 >= vllist->nr_servers)
return NULL;
- return alist->addrs + pos;
+ return &vllist->servers[pos - 1];
}
static void afs_proc_cell_vlservers_stop(struct seq_file *m, void *v)
@@ -337,11 +391,11 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
&server->uuid,
atomic_read(&server->usage),
&alist->addrs[0].transport,
- alist->index == 0 ? "*" : "");
+ alist->preferred == 0 ? "*" : "");
for (i = 1; i < alist->nr_addrs; i++)
seq_printf(m, " %pISpc%s\n",
&alist->addrs[i].transport,
- alist->index == i ? "*" : "");
+ alist->preferred == i ? "*" : "");
return 0;
}
@@ -562,7 +616,7 @@ int afs_proc_cell_setup(struct afs_cell *cell)
if (!proc_create_net_data("vlservers", 0444, dir,
&afs_proc_cell_vlservers_ops,
- sizeof(struct seq_net_private),
+ sizeof(struct afs_vl_seq_net_private),
cell) ||
!proc_create_net_data("volumes", 0444, dir,
&afs_proc_cell_volumes_ops,
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
new file mode 100644
index 000000000000..07bc10f076aa
--- /dev/null
+++ b/fs/afs/protocol_yfs.h
@@ -0,0 +1,163 @@
+/* YFS protocol bits
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define YFS_FS_SERVICE 2500
+#define YFS_CM_SERVICE 2501
+
+#define YFSCBMAX 1024
+
+enum YFS_CM_Operations {
+ YFSCBProbe = 206, /* probe client */
+ YFSCBGetLock = 207, /* get contents of CM lock table */
+ YFSCBXStatsVersion = 209, /* get version of extended statistics */
+ YFSCBGetXStats = 210, /* get contents of extended statistics data */
+ YFSCBInitCallBackState3 = 213, /* initialise callback state, version 3 */
+ YFSCBProbeUuid = 214, /* check the client hasn't rebooted */
+ YFSCBGetServerPrefs = 215,
+ YFSCBGetCellServDV = 216,
+ YFSCBGetLocalCell = 217,
+ YFSCBGetCacheConfig = 218,
+ YFSCBGetCellByNum = 65537,
+ YFSCBTellMeAboutYourself = 65538, /* get client capabilities */
+ YFSCBCallBack = 64204,
+};
+
+enum YFS_FS_Operations {
+ YFSFETCHACL = 64131, /* YFS Fetch file ACL */
+ YFSFETCHSTATUS = 64132, /* YFS Fetch file status */
+ YFSSTOREACL = 64134, /* YFS Store file ACL */
+ YFSSTORESTATUS = 64135, /* YFS Store file status */
+ YFSREMOVEFILE = 64136, /* YFS Remove a file */
+ YFSCREATEFILE = 64137, /* YFS Create a file */
+ YFSRENAME = 64138, /* YFS Rename or move a file or directory */
+ YFSSYMLINK = 64139, /* YFS Create a symbolic link */
+ YFSLINK = 64140, /* YFS Create a hard link */
+ YFSMAKEDIR = 64141, /* YFS Create a directory */
+ YFSREMOVEDIR = 64142, /* YFS Remove a directory */
+ YFSGETVOLUMESTATUS = 64149, /* YFS Get volume status information */
+ YFSSETVOLUMESTATUS = 64150, /* YFS Set volume status information */
+ YFSSETLOCK = 64156, /* YFS Request a file lock */
+ YFSEXTENDLOCK = 64157, /* YFS Extend a file lock */
+ YFSRELEASELOCK = 64158, /* YFS Release a file lock */
+ YFSLOOKUP = 64161, /* YFS lookup file in directory */
+ YFSFLUSHCPS = 64165,
+ YFSFETCHOPAQUEACL = 64168,
+ YFSWHOAMI = 64170,
+ YFSREMOVEACL = 64171,
+ YFSREMOVEFILE2 = 64173,
+ YFSSTOREOPAQUEACL2 = 64174,
+ YFSINLINEBULKSTATUS = 64536, /* YFS Fetch multiple file statuses with errors */
+ YFSFETCHDATA64 = 64537, /* YFS Fetch file data */
+ YFSSTOREDATA64 = 64538, /* YFS Store file data */
+ YFSUPDATESYMLINK = 64540,
+};
+
+struct yfs_xdr_u64 {
+ __be32 msw;
+ __be32 lsw;
+} __packed;
+
+static inline u64 xdr_to_u64(const struct yfs_xdr_u64 x)
+{
+ return ((u64)ntohl(x.msw) << 32) | ntohl(x.lsw);
+}
+
+static inline struct yfs_xdr_u64 u64_to_xdr(const u64 x)
+{
+ return (struct yfs_xdr_u64){ .msw = htonl(x >> 32), .lsw = htonl(x) };
+}
+
+struct yfs_xdr_vnode {
+ struct yfs_xdr_u64 lo;
+ __be32 hi;
+ __be32 unique;
+} __packed;
+
+struct yfs_xdr_YFSFid {
+ struct yfs_xdr_u64 volume;
+ struct yfs_xdr_vnode vnode;
+} __packed;
+
+
+struct yfs_xdr_YFSFetchStatus {
+ __be32 type;
+ __be32 nlink;
+ struct yfs_xdr_u64 size;
+ struct yfs_xdr_u64 data_version;
+ struct yfs_xdr_u64 author;
+ struct yfs_xdr_u64 owner;
+ struct yfs_xdr_u64 group;
+ __be32 mode;
+ __be32 caller_access;
+ __be32 anon_access;
+ struct yfs_xdr_vnode parent;
+ __be32 data_access_protocol;
+ struct yfs_xdr_u64 mtime_client;
+ struct yfs_xdr_u64 mtime_server;
+ __be32 lock_count;
+ __be32 abort_code;
+} __packed;
+
+struct yfs_xdr_YFSCallBack {
+ __be32 version;
+ struct yfs_xdr_u64 expiration_time;
+ __be32 type;
+} __packed;
+
+struct yfs_xdr_YFSStoreStatus {
+ __be32 mask;
+ __be32 mode;
+ struct yfs_xdr_u64 mtime_client;
+ struct yfs_xdr_u64 owner;
+ struct yfs_xdr_u64 group;
+} __packed;
+
+struct yfs_xdr_RPCFlags {
+ __be32 rpc_flags;
+} __packed;
+
+struct yfs_xdr_YFSVolSync {
+ struct yfs_xdr_u64 vol_creation_date;
+ struct yfs_xdr_u64 vol_update_date;
+ struct yfs_xdr_u64 max_quota;
+ struct yfs_xdr_u64 blocks_in_use;
+ struct yfs_xdr_u64 blocks_avail;
+} __packed;
+
+enum yfs_volume_type {
+ yfs_volume_type_ro = 0,
+ yfs_volume_type_rw = 1,
+};
+
+#define yfs_FVSOnline 0x1
+#define yfs_FVSInservice 0x2
+#define yfs_FVSBlessed 0x4
+#define yfs_FVSNeedsSalvage 0x8
+
+struct yfs_xdr_YFSFetchVolumeStatus {
+ struct yfs_xdr_u64 vid;
+ struct yfs_xdr_u64 parent_id;
+ __be32 flags;
+ __be32 type;
+ struct yfs_xdr_u64 max_quota;
+ struct yfs_xdr_u64 blocks_in_use;
+ struct yfs_xdr_u64 part_blocks_avail;
+ struct yfs_xdr_u64 part_max_blocks;
+ struct yfs_xdr_u64 vol_copy_date;
+ struct yfs_xdr_u64 vol_backup_date;
+} __packed;
+
+struct yfs_xdr_YFSStoreVolumeStatus {
+ __be32 mask;
+ struct yfs_xdr_u64 min_quota;
+ struct yfs_xdr_u64 max_quota;
+ struct yfs_xdr_u64 file_quota;
+} __packed;
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index 1faef56b12bd..00504254c1c2 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -19,14 +19,6 @@
#include "afs_fs.h"
/*
- * Initialise a filesystem server cursor for iterating over FS servers.
- */
-static void afs_init_fs_cursor(struct afs_fs_cursor *fc, struct afs_vnode *vnode)
-{
- memset(fc, 0, sizeof(*fc));
-}
-
-/*
* Begin an operation on the fileserver.
*
* Fileserver operations are serialised on the server by vnode, so we serialise
@@ -35,13 +27,14 @@ static void afs_init_fs_cursor(struct afs_fs_cursor *fc, struct afs_vnode *vnode
bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
struct key *key)
{
- afs_init_fs_cursor(fc, vnode);
+ memset(fc, 0, sizeof(*fc));
fc->vnode = vnode;
fc->key = key;
fc->ac.error = SHRT_MAX;
+ fc->error = -EDESTADDRREQ;
if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
- fc->ac.error = -EINTR;
+ fc->error = -EINTR;
fc->flags |= AFS_FS_CURSOR_STOP;
return false;
}
@@ -65,12 +58,15 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
fc->server_list = afs_get_serverlist(vnode->volume->servers);
read_unlock(&vnode->volume->servers_lock);
+ fc->untried = (1UL << fc->server_list->nr_servers) - 1;
+ fc->index = READ_ONCE(fc->server_list->preferred);
+
cbi = vnode->cb_interest;
if (cbi) {
/* See if the vnode's preferred record is still available */
for (i = 0; i < fc->server_list->nr_servers; i++) {
if (fc->server_list->servers[i].cb_interest == cbi) {
- fc->start = i;
+ fc->index = i;
goto found_interest;
}
}
@@ -80,7 +76,7 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
* and have to return an error.
*/
if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
- fc->ac.error = -ESTALE;
+ fc->error = -ESTALE;
return false;
}
@@ -94,12 +90,9 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
afs_put_cb_interest(afs_v2net(vnode), cbi);
cbi = NULL;
- } else {
- fc->start = READ_ONCE(fc->server_list->index);
}
found_interest:
- fc->index = fc->start;
return true;
}
@@ -117,7 +110,7 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
default: m = "busy"; break;
}
- pr_notice("kAFS: Volume %u '%s' is %s\n", volume->vid, volume->name, m);
+ pr_notice("kAFS: Volume %llu '%s' is %s\n", volume->vid, volume->name, m);
}
/*
@@ -127,7 +120,7 @@ static bool afs_sleep_and_retry(struct afs_fs_cursor *fc)
{
msleep_interruptible(1000);
if (signal_pending(current)) {
- fc->ac.error = -ERESTARTSYS;
+ fc->error = -ERESTARTSYS;
return false;
}
@@ -143,27 +136,32 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
struct afs_addr_list *alist;
struct afs_server *server;
struct afs_vnode *vnode = fc->vnode;
+ u32 rtt, abort_code;
+ int error = fc->ac.error, i;
- _enter("%u/%u,%u/%u,%d,%d",
- fc->index, fc->start,
- fc->ac.index, fc->ac.start,
- fc->ac.error, fc->ac.abort_code);
+ _enter("%lx[%d],%lx[%d],%d,%d",
+ fc->untried, fc->index,
+ fc->ac.tried, fc->ac.index,
+ error, fc->ac.abort_code);
if (fc->flags & AFS_FS_CURSOR_STOP) {
_leave(" = f [stopped]");
return false;
}
+ fc->nr_iterations++;
+
/* Evaluate the result of the previous operation, if there was one. */
- switch (fc->ac.error) {
+ switch (error) {
case SHRT_MAX:
goto start;
case 0:
default:
/* Success or local failure. Stop. */
+ fc->error = error;
fc->flags |= AFS_FS_CURSOR_STOP;
- _leave(" = f [okay/local %d]", fc->ac.error);
+ _leave(" = f [okay/local %d]", error);
return false;
case -ECONNABORTED:
@@ -178,7 +176,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
* - May indicate that the fileserver couldn't attach to the vol.
*/
if (fc->flags & AFS_FS_CURSOR_VNOVOL) {
- fc->ac.error = -EREMOTEIO;
+ fc->error = -EREMOTEIO;
goto next_server;
}
@@ -187,12 +185,12 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
write_unlock(&vnode->volume->servers_lock);
set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
- fc->ac.error = afs_check_volume_status(vnode->volume, fc->key);
- if (fc->ac.error < 0)
- goto failed;
+ error = afs_check_volume_status(vnode->volume, fc->key);
+ if (error < 0)
+ goto failed_set_error;
if (test_bit(AFS_VOLUME_DELETED, &vnode->volume->flags)) {
- fc->ac.error = -ENOMEDIUM;
+ fc->error = -ENOMEDIUM;
goto failed;
}
@@ -200,7 +198,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
* it's the fileserver having trouble.
*/
if (vnode->volume->servers == fc->server_list) {
- fc->ac.error = -EREMOTEIO;
+ fc->error = -EREMOTEIO;
goto next_server;
}
@@ -215,7 +213,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
case VONLINE:
case VDISKFULL:
case VOVERQUOTA:
- fc->ac.error = afs_abort_to_error(fc->ac.abort_code);
+ fc->error = afs_abort_to_error(fc->ac.abort_code);
goto next_server;
case VOFFLINE:
@@ -224,11 +222,11 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags);
}
if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) {
- fc->ac.error = -EADV;
+ fc->error = -EADV;
goto failed;
}
if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
- fc->ac.error = -ESTALE;
+ fc->error = -ESTALE;
goto failed;
}
goto busy;
@@ -240,7 +238,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
* have a file lock we need to maintain.
*/
if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) {
- fc->ac.error = -EBUSY;
+ fc->error = -EBUSY;
goto failed;
}
if (!test_and_set_bit(AFS_VOLUME_BUSY, &vnode->volume->flags)) {
@@ -269,16 +267,16 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
* honour, just in case someone sets up a loop.
*/
if (fc->flags & AFS_FS_CURSOR_VMOVED) {
- fc->ac.error = -EREMOTEIO;
+ fc->error = -EREMOTEIO;
goto failed;
}
fc->flags |= AFS_FS_CURSOR_VMOVED;
set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags);
set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
- fc->ac.error = afs_check_volume_status(vnode->volume, fc->key);
- if (fc->ac.error < 0)
- goto failed;
+ error = afs_check_volume_status(vnode->volume, fc->key);
+ if (error < 0)
+ goto failed_set_error;
/* If the server list didn't change, then the VLDB is
* out of sync with the fileservers. This is hopefully
@@ -290,7 +288,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
* TODO: Retry a few times with sleeps.
*/
if (vnode->volume->servers == fc->server_list) {
- fc->ac.error = -ENOMEDIUM;
+ fc->error = -ENOMEDIUM;
goto failed;
}
@@ -299,20 +297,25 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
default:
clear_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags);
clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags);
- fc->ac.error = afs_abort_to_error(fc->ac.abort_code);
+ fc->error = afs_abort_to_error(fc->ac.abort_code);
goto failed;
}
+ case -ETIMEDOUT:
+ case -ETIME:
+ if (fc->error != -EDESTADDRREQ)
+ goto iterate_address;
+ /* Fall through */
case -ENETUNREACH:
case -EHOSTUNREACH:
case -ECONNREFUSED:
- case -ETIMEDOUT:
- case -ETIME:
_debug("no conn");
+ fc->error = error;
goto iterate_address;
case -ECONNRESET:
_debug("call reset");
+ fc->error = error;
goto failed;
}
@@ -328,15 +331,57 @@ start:
/* See if we need to do an update of the volume record. Note that the
* volume may have moved or even have been deleted.
*/
- fc->ac.error = afs_check_volume_status(vnode->volume, fc->key);
- if (fc->ac.error < 0)
- goto failed;
+ error = afs_check_volume_status(vnode->volume, fc->key);
+ if (error < 0)
+ goto failed_set_error;
if (!afs_start_fs_iteration(fc, vnode))
goto failed;
-use_server:
- _debug("use");
+ _debug("__ VOL %llx __", vnode->volume->vid);
+ error = afs_probe_fileservers(afs_v2net(vnode), fc->key, fc->server_list);
+ if (error < 0)
+ goto failed_set_error;
+
+pick_server:
+ _debug("pick [%lx]", fc->untried);
+
+ error = afs_wait_for_fs_probes(fc->server_list, fc->untried);
+ if (error < 0)
+ goto failed_set_error;
+
+ /* Pick the untried server with the lowest RTT. If we have outstanding
+ * callbacks, we stick with the server we're already using if we can.
+ */
+ if (fc->cbi) {
+ _debug("cbi %u", fc->index);
+ if (test_bit(fc->index, &fc->untried))
+ goto selected_server;
+ afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
+ fc->cbi = NULL;
+ _debug("nocbi");
+ }
+
+ fc->index = -1;
+ rtt = U32_MAX;
+ for (i = 0; i < fc->server_list->nr_servers; i++) {
+ struct afs_server *s = fc->server_list->servers[i].server;
+
+ if (!test_bit(i, &fc->untried) || !s->probe.responded)
+ continue;
+ if (s->probe.rtt < rtt) {
+ fc->index = i;
+ rtt = s->probe.rtt;
+ }
+ }
+
+ if (fc->index == -1)
+ goto no_more_servers;
+
+selected_server:
+ _debug("use %d", fc->index);
+ __clear_bit(fc->index, &fc->untried);
+
/* We're starting on a different fileserver from the list. We need to
* check it, create a callback intercept, find its address list and
* probe its capabilities before we use it.
@@ -354,10 +399,10 @@ use_server:
* break request before we've finished decoding the reply and
* installing the vnode.
*/
- fc->ac.error = afs_register_server_cb_interest(vnode, fc->server_list,
- fc->index);
- if (fc->ac.error < 0)
- goto failed;
+ error = afs_register_server_cb_interest(vnode, fc->server_list,
+ fc->index);
+ if (error < 0)
+ goto failed_set_error;
fc->cbi = afs_get_cb_interest(vnode->cb_interest);
@@ -369,66 +414,88 @@ use_server:
memset(&fc->ac, 0, sizeof(fc->ac));
- /* Probe the current fileserver if we haven't done so yet. */
- if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) {
- fc->ac.alist = afs_get_addrlist(alist);
-
- if (!afs_probe_fileserver(fc)) {
- switch (fc->ac.error) {
- case -ENOMEM:
- case -ERESTARTSYS:
- case -EINTR:
- goto failed;
- default:
- goto next_server;
- }
- }
- }
-
if (!fc->ac.alist)
fc->ac.alist = alist;
else
afs_put_addrlist(alist);
- fc->ac.start = READ_ONCE(alist->index);
- fc->ac.index = fc->ac.start;
+ fc->ac.index = -1;
iterate_address:
ASSERT(fc->ac.alist);
- _debug("iterate %d/%d", fc->ac.index, fc->ac.alist->nr_addrs);
/* Iterate over the current server's address list to try and find an
* address on which it will respond to us.
*/
if (!afs_iterate_addresses(&fc->ac))
goto next_server;
+ _debug("address [%u] %u/%u", fc->index, fc->ac.index, fc->ac.alist->nr_addrs);
+
_leave(" = t");
return true;
next_server:
_debug("next");
afs_end_cursor(&fc->ac);
- afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
- fc->cbi = NULL;
- fc->index++;
- if (fc->index >= fc->server_list->nr_servers)
- fc->index = 0;
- if (fc->index != fc->start)
- goto use_server;
+ goto pick_server;
+no_more_servers:
/* That's all the servers poked to no good effect. Try again if some
* of them were busy.
*/
if (fc->flags & AFS_FS_CURSOR_VBUSY)
goto restart_from_beginning;
- fc->ac.error = -EDESTADDRREQ;
- goto failed;
+ abort_code = 0;
+ error = -EDESTADDRREQ;
+ for (i = 0; i < fc->server_list->nr_servers; i++) {
+ struct afs_server *s = fc->server_list->servers[i].server;
+ int probe_error = READ_ONCE(s->probe.error);
+
+ switch (probe_error) {
+ case 0:
+ continue;
+ default:
+ if (error == -ETIMEDOUT ||
+ error == -ETIME)
+ continue;
+ case -ETIMEDOUT:
+ case -ETIME:
+ if (error == -ENOMEM ||
+ error == -ENONET)
+ continue;
+ case -ENOMEM:
+ case -ENONET:
+ if (error == -ENETUNREACH)
+ continue;
+ case -ENETUNREACH:
+ if (error == -EHOSTUNREACH)
+ continue;
+ case -EHOSTUNREACH:
+ if (error == -ECONNREFUSED)
+ continue;
+ case -ECONNREFUSED:
+ if (error == -ECONNRESET)
+ continue;
+ case -ECONNRESET: /* Responded, but call expired. */
+ if (error == -ECONNABORTED)
+ continue;
+ case -ECONNABORTED:
+ abort_code = s->probe.abort_code;
+ error = probe_error;
+ continue;
+ }
+ }
+
+ if (error == -ECONNABORTED)
+ error = afs_abort_to_error(abort_code);
+failed_set_error:
+ fc->error = error;
failed:
fc->flags |= AFS_FS_CURSOR_STOP;
afs_end_cursor(&fc->ac);
- _leave(" = f [failed %d]", fc->ac.error);
+ _leave(" = f [failed %d]", fc->error);
return false;
}
@@ -442,13 +509,14 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
struct afs_vnode *vnode = fc->vnode;
struct afs_cb_interest *cbi = vnode->cb_interest;
struct afs_addr_list *alist;
+ int error = fc->ac.error;
_enter("");
- switch (fc->ac.error) {
+ switch (error) {
case SHRT_MAX:
if (!cbi) {
- fc->ac.error = -ESTALE;
+ fc->error = -ESTALE;
fc->flags |= AFS_FS_CURSOR_STOP;
return false;
}
@@ -461,25 +529,26 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
afs_get_addrlist(alist);
read_unlock(&cbi->server->fs_lock);
if (!alist) {
- fc->ac.error = -ESTALE;
+ fc->error = -ESTALE;
fc->flags |= AFS_FS_CURSOR_STOP;
return false;
}
memset(&fc->ac, 0, sizeof(fc->ac));
fc->ac.alist = alist;
- fc->ac.start = READ_ONCE(alist->index);
- fc->ac.index = fc->ac.start;
+ fc->ac.index = -1;
goto iterate_address;
case 0:
default:
/* Success or local failure. Stop. */
+ fc->error = error;
fc->flags |= AFS_FS_CURSOR_STOP;
- _leave(" = f [okay/local %d]", fc->ac.error);
+ _leave(" = f [okay/local %d]", error);
return false;
case -ECONNABORTED:
+ fc->error = afs_abort_to_error(fc->ac.abort_code);
fc->flags |= AFS_FS_CURSOR_STOP;
_leave(" = f [abort]");
return false;
@@ -490,6 +559,7 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
case -ETIMEDOUT:
case -ETIME:
_debug("no conn");
+ fc->error = error;
goto iterate_address;
}
@@ -507,12 +577,65 @@ iterate_address:
}
/*
+ * Dump cursor state in the case of the error being EDESTADDRREQ.
+ */
+static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc)
+{
+ static int count;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_AFS_DEBUG_CURSOR) || count > 3)
+ return;
+ count++;
+
+ rcu_read_lock();
+
+ pr_notice("EDESTADDR occurred\n");
+ pr_notice("FC: cbb=%x cbb2=%x fl=%hx err=%hd\n",
+ fc->cb_break, fc->cb_break_2, fc->flags, fc->error);
+ pr_notice("FC: ut=%lx ix=%d ni=%u\n",
+ fc->untried, fc->index, fc->nr_iterations);
+
+ if (fc->server_list) {
+ const struct afs_server_list *sl = fc->server_list;
+ pr_notice("FC: SL nr=%u pr=%u vnov=%hx\n",
+ sl->nr_servers, sl->preferred, sl->vnovol_mask);
+ for (i = 0; i < sl->nr_servers; i++) {
+ const struct afs_server *s = sl->servers[i].server;
+ pr_notice("FC: server fl=%lx av=%u %pU\n",
+ s->flags, s->addr_version, &s->uuid);
+ if (s->addresses) {
+ const struct afs_addr_list *a =
+ rcu_dereference(s->addresses);
+ pr_notice("FC: - av=%u nr=%u/%u/%u pr=%u\n",
+ a->version,
+ a->nr_ipv4, a->nr_addrs, a->max_addrs,
+ a->preferred);
+ pr_notice("FC: - pr=%lx R=%lx F=%lx\n",
+ a->probed, a->responded, a->failed);
+ if (a == fc->ac.alist)
+ pr_notice("FC: - current\n");
+ }
+ }
+ }
+
+ pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
+ fc->ac.tried, fc->ac.index, fc->ac.abort_code, fc->ac.error,
+ fc->ac.responded, fc->ac.nr_iterations);
+ rcu_read_unlock();
+}
+
+/*
* Tidy up a filesystem cursor and unlock the vnode.
*/
int afs_end_vnode_operation(struct afs_fs_cursor *fc)
{
struct afs_net *net = afs_v2net(fc->vnode);
- int ret;
+
+ if (fc->error == -EDESTADDRREQ ||
+ fc->error == -ENETUNREACH ||
+ fc->error == -EHOSTUNREACH)
+ afs_dump_edestaddrreq(fc);
mutex_unlock(&fc->vnode->io_lock);
@@ -520,9 +643,8 @@ int afs_end_vnode_operation(struct afs_fs_cursor *fc)
afs_put_cb_interest(net, fc->cbi);
afs_put_serverlist(net, fc->server_list);
- ret = fc->ac.error;
- if (ret == -ECONNABORTED)
- afs_abort_to_error(fc->ac.abort_code);
+ if (fc->error == -ECONNABORTED)
+ fc->error = afs_abort_to_error(fc->ac.abort_code);
- return fc->ac.error;
+ return fc->error;
}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 77a83790a31f..59970886690f 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -16,6 +16,7 @@
#include <net/af_rxrpc.h>
#include "internal.h"
#include "afs_cm.h"
+#include "protocol_yfs.h"
struct workqueue_struct *afs_async_calls;
@@ -75,6 +76,18 @@ int afs_open_socket(struct afs_net *net)
if (ret < 0)
goto error_2;
+ srx.srx_service = YFS_CM_SERVICE;
+ ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+ if (ret < 0)
+ goto error_2;
+
+ /* Ideally, we'd turn on service upgrade here, but we can't because
+ * OpenAFS is buggy and leaks the userStatus field from packet to
+ * packet and between FS packets and CB packets - so if we try to do an
+ * upgrade on an FS packet, OpenAFS will leak that into the CB packet
+ * it sends back to us.
+ */
+
rxrpc_kernel_new_call_notification(socket, afs_rx_new_call,
afs_rx_discard_new_call);
@@ -143,6 +156,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
INIT_WORK(&call->async_work, afs_process_async_call);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->state_lock);
+ call->_iter = &call->iter;
o = atomic_inc_return(&net->nr_outstanding_calls);
trace_afs_call(call, afs_call_trace_alloc, 1, o,
@@ -176,6 +190,7 @@ void afs_put_call(struct afs_call *call)
afs_put_server(call->net, call->cm_server);
afs_put_cb_interest(call->net, call->cbi);
+ afs_put_addrlist(call->alist);
kfree(call->request);
trace_afs_call(call, afs_call_trace_free, 0, o,
@@ -189,21 +204,22 @@ void afs_put_call(struct afs_call *call)
}
/*
- * Queue the call for actual work. Returns 0 unconditionally for convenience.
+ * Queue the call for actual work.
*/
-int afs_queue_call_work(struct afs_call *call)
+static void afs_queue_call_work(struct afs_call *call)
{
- int u = atomic_inc_return(&call->usage);
+ if (call->type->work) {
+ int u = atomic_inc_return(&call->usage);
- trace_afs_call(call, afs_call_trace_work, u,
- atomic_read(&call->net->nr_outstanding_calls),
- __builtin_return_address(0));
+ trace_afs_call(call, afs_call_trace_work, u,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
- INIT_WORK(&call->work, call->type->work);
+ INIT_WORK(&call->work, call->type->work);
- if (!queue_work(afs_wq, &call->work))
- afs_put_call(call);
- return 0;
+ if (!queue_work(afs_wq, &call->work))
+ afs_put_call(call);
+ }
}
/*
@@ -233,6 +249,7 @@ struct afs_call *afs_alloc_flat_call(struct afs_net *net,
goto nomem_free;
}
+ afs_extract_to_buf(call, call->reply_max);
call->operation_ID = type->op;
init_waitqueue_head(&call->waitq);
return call;
@@ -286,7 +303,7 @@ static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
offset = 0;
}
- iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes);
+ iov_iter_bvec(&msg->msg_iter, WRITE, bv, nr, bytes);
}
/*
@@ -342,7 +359,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
gfp_t gfp, bool async)
{
- struct sockaddr_rxrpc *srx = ac->addr;
+ struct sockaddr_rxrpc *srx = &ac->alist->addrs[ac->index];
struct rxrpc_call *rxcall;
struct msghdr msg;
struct kvec iov[1];
@@ -359,6 +376,8 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
atomic_read(&call->net->nr_outstanding_calls));
call->async = async;
+ call->addr_ix = ac->index;
+ call->alist = afs_get_addrlist(ac->alist);
/* Work out the length we're going to transmit. This is awkward for
* calls such as FS.StoreData where there's an extra injection of data
@@ -390,6 +409,7 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
call->debug_id);
if (IS_ERR(rxcall)) {
ret = PTR_ERR(rxcall);
+ call->error = ret;
goto error_kill_call;
}
@@ -401,8 +421,7 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
- call->request_size);
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = MSG_WAITALL | (call->send_pages ? MSG_MORE : 0);
@@ -432,7 +451,7 @@ error_do_abort:
rxrpc_kernel_abort_call(call->net->socket, rxcall,
RX_USER_ABORT, ret, "KSD");
} else {
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, NULL, 0, 0);
+ iov_iter_kvec(&msg.msg_iter, READ, NULL, 0, 0);
rxrpc_kernel_recv_data(call->net->socket, rxcall,
&msg.msg_iter, false,
&call->abort_code, &call->service_id);
@@ -442,6 +461,8 @@ error_do_abort:
call->error = ret;
trace_afs_call_done(call);
error_kill_call:
+ if (call->type->done)
+ call->type->done(call);
afs_put_call(call);
ac->error = ret;
_leave(" = %d", ret);
@@ -466,14 +487,12 @@ static void afs_deliver_to_call(struct afs_call *call)
state == AFS_CALL_SV_AWAIT_ACK
) {
if (state == AFS_CALL_SV_AWAIT_ACK) {
- struct iov_iter iter;
-
- iov_iter_kvec(&iter, READ | ITER_KVEC, NULL, 0, 0);
+ iov_iter_kvec(&call->iter, READ, NULL, 0, 0);
ret = rxrpc_kernel_recv_data(call->net->socket,
- call->rxcall, &iter, false,
- &remote_abort,
+ call->rxcall, &call->iter,
+ false, &remote_abort,
&call->service_id);
- trace_afs_recv_data(call, 0, 0, false, ret);
+ trace_afs_receive_data(call, &call->iter, false, ret);
if (ret == -EINPROGRESS || ret == -EAGAIN)
return;
@@ -485,10 +504,17 @@ static void afs_deliver_to_call(struct afs_call *call)
return;
}
+ if (call->want_reply_time &&
+ rxrpc_kernel_get_reply_time(call->net->socket,
+ call->rxcall,
+ &call->reply_time))
+ call->want_reply_time = false;
+
ret = call->type->deliver(call);
state = READ_ONCE(call->state);
switch (ret) {
case 0:
+ afs_queue_call_work(call);
if (state == AFS_CALL_CL_PROC_REPLY) {
if (call->cbi)
set_bit(AFS_SERVER_FL_MAY_HAVE_CB,
@@ -500,7 +526,6 @@ static void afs_deliver_to_call(struct afs_call *call)
case -EINPROGRESS:
case -EAGAIN:
goto out;
- case -EIO:
case -ECONNABORTED:
ASSERTCMP(state, ==, AFS_CALL_COMPLETE);
goto done;
@@ -509,6 +534,10 @@ static void afs_deliver_to_call(struct afs_call *call)
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, ret, "KIV");
goto local_abort;
+ case -EIO:
+ pr_err("kAFS: Call %u in bad state %u\n",
+ call->debug_id, state);
+ /* Fall through */
case -ENODATA:
case -EBADMSG:
case -EMSGSIZE:
@@ -517,12 +546,14 @@ static void afs_deliver_to_call(struct afs_call *call)
if (state != AFS_CALL_CL_AWAIT_REPLY)
abort_code = RXGEN_SS_UNMARSHAL;
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
- abort_code, -EBADMSG, "KUM");
+ abort_code, ret, "KUM");
goto local_abort;
}
}
done:
+ if (call->type->done)
+ call->type->done(call);
if (state == AFS_CALL_COMPLETE && call->incoming)
afs_put_call(call);
out:
@@ -728,6 +759,7 @@ void afs_charge_preallocation(struct work_struct *work)
call->async = true;
call->state = AFS_CALL_SV_AWAIT_OP_ID;
init_waitqueue_head(&call->waitq);
+ afs_extract_to_tmp(call);
}
if (rxrpc_kernel_charge_accept(net->socket,
@@ -773,18 +805,15 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
{
int ret;
- _enter("{%zu}", call->offset);
-
- ASSERTCMP(call->offset, <, 4);
+ _enter("{%zu}", iov_iter_count(call->_iter));
/* the operation ID forms the first four bytes of the request data */
- ret = afs_extract_data(call, &call->tmp, 4, true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
call->operation_ID = ntohl(call->tmp);
afs_set_call_state(call, AFS_CALL_SV_AWAIT_OP_ID, AFS_CALL_SV_AWAIT_REQUEST);
- call->offset = 0;
/* ask the cache manager to route the call (it'll change the call type
* if successful) */
@@ -825,7 +854,7 @@ void afs_send_empty_reply(struct afs_call *call)
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
+ iov_iter_kvec(&msg.msg_iter, WRITE, NULL, 0, 0);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -864,7 +893,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
iov[0].iov_len = len;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -888,30 +917,19 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
/*
* Extract a piece of data from the received data socket buffers.
*/
-int afs_extract_data(struct afs_call *call, void *buf, size_t count,
- bool want_more)
+int afs_extract_data(struct afs_call *call, bool want_more)
{
struct afs_net *net = call->net;
- struct iov_iter iter;
- struct kvec iov;
+ struct iov_iter *iter = call->_iter;
enum afs_call_state state;
u32 remote_abort = 0;
int ret;
- _enter("{%s,%zu},,%zu,%d",
- call->type->name, call->offset, count, want_more);
-
- ASSERTCMP(call->offset, <=, count);
-
- iov.iov_base = buf + call->offset;
- iov.iov_len = count - call->offset;
- iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, count - call->offset);
+ _enter("{%s,%zu},%d", call->type->name, iov_iter_count(iter), want_more);
- ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, &iter,
+ ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
want_more, &remote_abort,
&call->service_id);
- call->offset += (count - call->offset) - iov_iter_count(&iter);
- trace_afs_recv_data(call, count, call->offset, want_more, ret);
if (ret == 0 || ret == -EAGAIN)
return ret;
@@ -926,7 +944,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
break;
case AFS_CALL_COMPLETE:
kdebug("prem complete %d", call->error);
- return -EIO;
+ return afs_io_error(call, afs_io_error_extract);
default:
break;
}
@@ -940,8 +958,9 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
/*
* Log protocol error production.
*/
-noinline int afs_protocol_error(struct afs_call *call, int error)
+noinline int afs_protocol_error(struct afs_call *call, int error,
+ enum afs_eproto_cause cause)
{
- trace_afs_protocol_error(call, error, __builtin_return_address(0));
+ trace_afs_protocol_error(call, error, cause);
return error;
}
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 81dfedb7879f..5f58a9a17e69 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -126,7 +126,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
bool changed = false;
int i, j;
- _enter("{%x:%u},%x,%x",
+ _enter("{%llx:%llu},%x,%x",
vnode->fid.vid, vnode->fid.vnode, key_serial(key), caller_access);
rcu_read_lock();
@@ -147,7 +147,8 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
break;
}
- if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest)) {
+ if (afs_cb_is_broken(cb_break, vnode,
+ vnode->cb_interest)) {
changed = true;
break;
}
@@ -177,7 +178,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
}
}
- if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest))
+ if (afs_cb_is_broken(cb_break, vnode, vnode->cb_interest))
goto someone_else_changed_it;
/* We need a ref on any permits list we want to copy as we'll have to
@@ -256,7 +257,7 @@ found:
spin_lock(&vnode->lock);
zap = rcu_access_pointer(vnode->permit_cache);
- if (cb_break == afs_cb_break_sum(vnode, vnode->cb_interest) &&
+ if (!afs_cb_is_broken(cb_break, vnode, vnode->cb_interest) &&
zap == permits)
rcu_assign_pointer(vnode->permit_cache, replacement);
else
@@ -289,7 +290,7 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key,
bool valid = false;
int i, ret;
- _enter("{%x:%u},%x",
+ _enter("{%llx:%llu},%x",
vnode->fid.vid, vnode->fid.vnode, key_serial(key));
/* check the permits to see if we've got one yet */
@@ -349,7 +350,7 @@ int afs_permission(struct inode *inode, int mask)
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
- _enter("{{%x:%u},%lx},%x,",
+ _enter("{{%llx:%llu},%lx},%x,",
vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask);
key = afs_request_key(vnode->volume->cell);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 1d329e6981d5..642afa2e9783 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include "afs_fs.h"
#include "internal.h"
+#include "protocol_yfs.h"
static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */
static unsigned afs_server_update_delay = 30; /* Time till VLDB recheck in secs */
@@ -230,6 +231,8 @@ static struct afs_server *afs_alloc_server(struct afs_net *net,
rwlock_init(&server->fs_lock);
INIT_HLIST_HEAD(&server->cb_volumes);
rwlock_init(&server->cb_break_lock);
+ init_waitqueue_head(&server->probe_wq);
+ spin_lock_init(&server->probe_lock);
afs_inc_servers_outstanding(net);
_leave(" = %p", server);
@@ -246,41 +249,23 @@ enomem:
static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
struct key *key, const uuid_t *uuid)
{
- struct afs_addr_cursor ac;
- struct afs_addr_list *alist;
+ struct afs_vl_cursor vc;
+ struct afs_addr_list *alist = NULL;
int ret;
- ret = afs_set_vl_cursor(&ac, cell);
- if (ret < 0)
- return ERR_PTR(ret);
-
- while (afs_iterate_addresses(&ac)) {
- if (test_bit(ac.index, &ac.alist->yfs))
- alist = afs_yfsvl_get_endpoints(cell->net, &ac, key, uuid);
- else
- alist = afs_vl_get_addrs_u(cell->net, &ac, key, uuid);
- switch (ac.error) {
- case 0:
- afs_end_cursor(&ac);
- return alist;
- case -ECONNABORTED:
- ac.error = afs_abort_to_error(ac.abort_code);
- goto error;
- case -ENOMEM:
- case -ENONET:
- goto error;
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -ECONNREFUSED:
- break;
- default:
- ac.error = -EIO;
- goto error;
+ ret = -ERESTARTSYS;
+ if (afs_begin_vlserver_operation(&vc, cell, key)) {
+ while (afs_select_vlserver(&vc)) {
+ if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags))
+ alist = afs_yfsvl_get_endpoints(&vc, uuid);
+ else
+ alist = afs_vl_get_addrs_u(&vc, uuid);
}
+
+ ret = afs_end_vlserver_operation(&vc);
}
-error:
- return ERR_PTR(afs_end_cursor(&ac));
+ return ret < 0 ? ERR_PTR(ret) : alist;
}
/*
@@ -382,9 +367,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
struct afs_addr_cursor ac = {
.alist = alist,
- .start = alist->index,
- .index = 0,
- .addr = &alist->addrs[alist->index],
+ .index = alist->preferred,
.error = 0,
};
_enter("%p", server);
@@ -392,6 +375,9 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
+ wait_var_event(&server->probe_outstanding,
+ atomic_read(&server->probe_outstanding) == 0);
+
call_rcu(&server->rcu, afs_server_rcu);
afs_dec_servers_outstanding(net);
}
@@ -525,99 +511,6 @@ void afs_purge_servers(struct afs_net *net)
}
/*
- * Probe a fileserver to find its capabilities.
- *
- * TODO: Try service upgrade.
- */
-static bool afs_do_probe_fileserver(struct afs_fs_cursor *fc)
-{
- _enter("");
-
- fc->ac.addr = NULL;
- fc->ac.start = READ_ONCE(fc->ac.alist->index);
- fc->ac.index = fc->ac.start;
- fc->ac.error = 0;
- fc->ac.begun = false;
-
- while (afs_iterate_addresses(&fc->ac)) {
- afs_fs_get_capabilities(afs_v2net(fc->vnode), fc->cbi->server,
- &fc->ac, fc->key);
- switch (fc->ac.error) {
- case 0:
- afs_end_cursor(&fc->ac);
- set_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags);
- return true;
- case -ECONNABORTED:
- fc->ac.error = afs_abort_to_error(fc->ac.abort_code);
- goto error;
- case -ENOMEM:
- case -ENONET:
- goto error;
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -ECONNREFUSED:
- case -ETIMEDOUT:
- case -ETIME:
- break;
- default:
- fc->ac.error = -EIO;
- goto error;
- }
- }
-
-error:
- afs_end_cursor(&fc->ac);
- return false;
-}
-
-/*
- * If we haven't already, try probing the fileserver to get its capabilities.
- * We try not to instigate parallel probes, but it's possible that the parallel
- * probes will fail due to authentication failure when ours would succeed.
- *
- * TODO: Try sending an anonymous probe if an authenticated probe fails.
- */
-bool afs_probe_fileserver(struct afs_fs_cursor *fc)
-{
- bool success;
- int ret, retries = 0;
-
- _enter("");
-
-retry:
- if (test_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags)) {
- _leave(" = t");
- return true;
- }
-
- if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags)) {
- success = afs_do_probe_fileserver(fc);
- clear_bit_unlock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags);
- wake_up_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING);
- _leave(" = t");
- return success;
- }
-
- _debug("wait");
- ret = wait_on_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING,
- TASK_INTERRUPTIBLE);
- if (ret == -ERESTARTSYS) {
- fc->ac.error = ret;
- _leave(" = f [%d]", ret);
- return false;
- }
-
- retries++;
- if (retries == 4) {
- fc->ac.error = -ESTALE;
- _leave(" = f [stale]");
- return false;
- }
- _debug("retry");
- goto retry;
-}
-
-/*
* Get an update for a server's address list.
*/
static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 8a5760aa5832..95d0761cdb34 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -118,11 +118,11 @@ bool afs_annotate_server_list(struct afs_server_list *new,
return false;
changed:
- /* Maintain the same current server as before if possible. */
- cur = old->servers[old->index].server;
+ /* Maintain the same preferred server as before if possible. */
+ cur = old->servers[old->preferred].server;
for (j = 0; j < new->nr_servers; j++) {
if (new->servers[j].server == cur) {
- new->index = j;
+ new->preferred = j;
break;
}
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 4d3e274207fb..dcd07fe99871 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -406,10 +406,11 @@ static int afs_fill_super(struct super_block *sb,
inode = afs_iget_pseudo_dir(sb, true);
sb->s_flags |= SB_RDONLY;
} else {
- sprintf(sb->s_id, "%u", as->volume->vid);
+ sprintf(sb->s_id, "%llu", as->volume->vid);
afs_activate_volume(as->volume);
fid.vid = as->volume->vid;
fid.vnode = 1;
+ fid.vnode_hi = 0;
fid.unique = 1;
inode = afs_iget(sb, params->key, &fid, NULL, NULL, NULL);
}
@@ -663,7 +664,7 @@ static void afs_destroy_inode(struct inode *inode)
{
struct afs_vnode *vnode = AFS_FS_I(inode);
- _enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode);
+ _enter("%p{%llx:%llu}", inode, vnode->fid.vid, vnode->fid.vnode);
_debug("DESTROY INODE %p", inode);
diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c
new file mode 100644
index 000000000000..b4f1a84519b9
--- /dev/null
+++ b/fs/afs/vl_list.c
@@ -0,0 +1,340 @@
+/* AFS vlserver list management.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len,
+ unsigned short port)
+{
+ struct afs_vlserver *vlserver;
+
+ vlserver = kzalloc(struct_size(vlserver, name, name_len + 1),
+ GFP_KERNEL);
+ if (vlserver) {
+ atomic_set(&vlserver->usage, 1);
+ rwlock_init(&vlserver->lock);
+ init_waitqueue_head(&vlserver->probe_wq);
+ spin_lock_init(&vlserver->probe_lock);
+ vlserver->name_len = name_len;
+ vlserver->port = port;
+ memcpy(vlserver->name, name, name_len);
+ }
+ return vlserver;
+}
+
+static void afs_vlserver_rcu(struct rcu_head *rcu)
+{
+ struct afs_vlserver *vlserver = container_of(rcu, struct afs_vlserver, rcu);
+
+ afs_put_addrlist(rcu_access_pointer(vlserver->addresses));
+ kfree_rcu(vlserver, rcu);
+}
+
+void afs_put_vlserver(struct afs_net *net, struct afs_vlserver *vlserver)
+{
+ if (vlserver) {
+ unsigned int u = atomic_dec_return(&vlserver->usage);
+ //_debug("VL PUT %p{%u}", vlserver, u);
+
+ if (u == 0)
+ call_rcu(&vlserver->rcu, afs_vlserver_rcu);
+ }
+}
+
+struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers)
+{
+ struct afs_vlserver_list *vllist;
+
+ vllist = kzalloc(struct_size(vllist, servers, nr_servers), GFP_KERNEL);
+ if (vllist) {
+ atomic_set(&vllist->usage, 1);
+ rwlock_init(&vllist->lock);
+ }
+
+ return vllist;
+}
+
+void afs_put_vlserverlist(struct afs_net *net, struct afs_vlserver_list *vllist)
+{
+ if (vllist) {
+ unsigned int u = atomic_dec_return(&vllist->usage);
+
+ //_debug("VLLS PUT %p{%u}", vllist, u);
+ if (u == 0) {
+ int i;
+
+ for (i = 0; i < vllist->nr_servers; i++) {
+ afs_put_vlserver(net, vllist->servers[i].server);
+ }
+ kfree_rcu(vllist, rcu);
+ }
+ }
+}
+
+static u16 afs_extract_le16(const u8 **_b)
+{
+ u16 val;
+
+ val = (u16)*(*_b)++ << 0;
+ val |= (u16)*(*_b)++ << 8;
+ return val;
+}
+
+/*
+ * Build a VL server address list from a DNS queried server list.
+ */
+static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end,
+ u8 nr_addrs, u16 port)
+{
+ struct afs_addr_list *alist;
+ const u8 *b = *_b;
+ int ret = -EINVAL;
+
+ alist = afs_alloc_addrlist(nr_addrs, VL_SERVICE, port);
+ if (!alist)
+ return ERR_PTR(-ENOMEM);
+ if (nr_addrs == 0)
+ return alist;
+
+ for (; nr_addrs > 0 && end - b >= nr_addrs; nr_addrs--) {
+ struct dns_server_list_v1_address hdr;
+ __be32 x[4];
+
+ hdr.address_type = *b++;
+
+ switch (hdr.address_type) {
+ case DNS_ADDRESS_IS_IPV4:
+ if (end - b < 4) {
+ _leave(" = -EINVAL [short inet]");
+ goto error;
+ }
+ memcpy(x, b, 4);
+ afs_merge_fs_addr4(alist, x[0], port);
+ b += 4;
+ break;
+
+ case DNS_ADDRESS_IS_IPV6:
+ if (end - b < 16) {
+ _leave(" = -EINVAL [short inet6]");
+ goto error;
+ }
+ memcpy(x, b, 16);
+ afs_merge_fs_addr6(alist, x, port);
+ b += 16;
+ break;
+
+ default:
+ _leave(" = -EADDRNOTAVAIL [unknown af %u]",
+ hdr.address_type);
+ ret = -EADDRNOTAVAIL;
+ goto error;
+ }
+ }
+
+ /* Start with IPv6 if available. */
+ if (alist->nr_ipv4 < alist->nr_addrs)
+ alist->preferred = alist->nr_ipv4;
+
+ *_b = b;
+ return alist;
+
+error:
+ *_b = b;
+ afs_put_addrlist(alist);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Build a VL server list from a DNS queried server list.
+ */
+struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
+ const void *buffer,
+ size_t buffer_size)
+{
+ const struct dns_server_list_v1_header *hdr = buffer;
+ struct dns_server_list_v1_server bs;
+ struct afs_vlserver_list *vllist, *previous;
+ struct afs_addr_list *addrs;
+ struct afs_vlserver *server;
+ const u8 *b = buffer, *end = buffer + buffer_size;
+ int ret = -ENOMEM, nr_servers, i, j;
+
+ _enter("");
+
+ /* Check that it's a server list, v1 */
+ if (end - b < sizeof(*hdr) ||
+ hdr->hdr.content != DNS_PAYLOAD_IS_SERVER_LIST ||
+ hdr->hdr.version != 1) {
+ pr_notice("kAFS: Got DNS record [%u,%u] len %zu\n",
+ hdr->hdr.content, hdr->hdr.version, end - b);
+ ret = -EDESTADDRREQ;
+ goto dump;
+ }
+
+ nr_servers = hdr->nr_servers;
+
+ vllist = afs_alloc_vlserver_list(nr_servers);
+ if (!vllist)
+ return ERR_PTR(-ENOMEM);
+
+ vllist->source = (hdr->source < NR__dns_record_source) ?
+ hdr->source : NR__dns_record_source;
+ vllist->status = (hdr->status < NR__dns_lookup_status) ?
+ hdr->status : NR__dns_lookup_status;
+
+ read_lock(&cell->vl_servers_lock);
+ previous = afs_get_vlserverlist(
+ rcu_dereference_protected(cell->vl_servers,
+ lockdep_is_held(&cell->vl_servers_lock)));
+ read_unlock(&cell->vl_servers_lock);
+
+ b += sizeof(*hdr);
+ while (end - b >= sizeof(bs)) {
+ bs.name_len = afs_extract_le16(&b);
+ bs.priority = afs_extract_le16(&b);
+ bs.weight = afs_extract_le16(&b);
+ bs.port = afs_extract_le16(&b);
+ bs.source = *b++;
+ bs.status = *b++;
+ bs.protocol = *b++;
+ bs.nr_addrs = *b++;
+
+ _debug("extract %u %u %u %u %u %u %*.*s",
+ bs.name_len, bs.priority, bs.weight,
+ bs.port, bs.protocol, bs.nr_addrs,
+ bs.name_len, bs.name_len, b);
+
+ if (end - b < bs.name_len)
+ break;
+
+ ret = -EPROTONOSUPPORT;
+ if (bs.protocol == DNS_SERVER_PROTOCOL_UNSPECIFIED) {
+ bs.protocol = DNS_SERVER_PROTOCOL_UDP;
+ } else if (bs.protocol != DNS_SERVER_PROTOCOL_UDP) {
+ _leave(" = [proto %u]", bs.protocol);
+ goto error;
+ }
+
+ if (bs.port == 0)
+ bs.port = AFS_VL_PORT;
+ if (bs.source > NR__dns_record_source)
+ bs.source = NR__dns_record_source;
+ if (bs.status > NR__dns_lookup_status)
+ bs.status = NR__dns_lookup_status;
+
+ server = NULL;
+ if (previous) {
+ /* See if we can update an old server record */
+ for (i = 0; i < previous->nr_servers; i++) {
+ struct afs_vlserver *p = previous->servers[i].server;
+
+ if (p->name_len == bs.name_len &&
+ p->port == bs.port &&
+ strncasecmp(b, p->name, bs.name_len) == 0) {
+ server = afs_get_vlserver(p);
+ break;
+ }
+ }
+ }
+
+ if (!server) {
+ ret = -ENOMEM;
+ server = afs_alloc_vlserver(b, bs.name_len, bs.port);
+ if (!server)
+ goto error;
+ }
+
+ b += bs.name_len;
+
+ /* Extract the addresses - note that we can't skip this as we
+ * have to advance the payload pointer.
+ */
+ addrs = afs_extract_vl_addrs(&b, end, bs.nr_addrs, bs.port);
+ if (IS_ERR(addrs)) {
+ ret = PTR_ERR(addrs);
+ goto error_2;
+ }
+
+ if (vllist->nr_servers >= nr_servers) {
+ _debug("skip %u >= %u", vllist->nr_servers, nr_servers);
+ afs_put_addrlist(addrs);
+ afs_put_vlserver(cell->net, server);
+ continue;
+ }
+
+ addrs->source = bs.source;
+ addrs->status = bs.status;
+
+ if (addrs->nr_addrs == 0) {
+ afs_put_addrlist(addrs);
+ if (!rcu_access_pointer(server->addresses)) {
+ afs_put_vlserver(cell->net, server);
+ continue;
+ }
+ } else {
+ struct afs_addr_list *old = addrs;
+
+ write_lock(&server->lock);
+ rcu_swap_protected(server->addresses, old,
+ lockdep_is_held(&server->lock));
+ write_unlock(&server->lock);
+ afs_put_addrlist(old);
+ }
+
+
+ /* TODO: Might want to check for duplicates */
+
+ /* Insertion-sort by priority and weight */
+ for (j = 0; j < vllist->nr_servers; j++) {
+ if (bs.priority < vllist->servers[j].priority)
+ break; /* Lower preferable */
+ if (bs.priority == vllist->servers[j].priority &&
+ bs.weight > vllist->servers[j].weight)
+ break; /* Higher preferable */
+ }
+
+ if (j < vllist->nr_servers) {
+ memmove(vllist->servers + j + 1,
+ vllist->servers + j,
+ (vllist->nr_servers - j) * sizeof(struct afs_vlserver_entry));
+ }
+
+ clear_bit(AFS_VLSERVER_FL_PROBED, &server->flags);
+
+ vllist->servers[j].priority = bs.priority;
+ vllist->servers[j].weight = bs.weight;
+ vllist->servers[j].server = server;
+ vllist->nr_servers++;
+ }
+
+ if (b != end) {
+ _debug("parse error %zd", b - end);
+ goto error;
+ }
+
+ afs_put_vlserverlist(cell->net, previous);
+ _leave(" = ok [%u]", vllist->nr_servers);
+ return vllist;
+
+error_2:
+ afs_put_vlserver(cell->net, server);
+error:
+ afs_put_vlserverlist(cell->net, vllist);
+ afs_put_vlserverlist(cell->net, previous);
+dump:
+ if (ret != -ENOMEM) {
+ printk(KERN_DEBUG "DNS: at %zu\n", (const void *)b - buffer);
+ print_hex_dump_bytes("DNS: ", DUMP_PREFIX_NONE, buffer, buffer_size);
+ }
+ return ERR_PTR(ret);
+}
diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
new file mode 100644
index 000000000000..c0f616bd70cb
--- /dev/null
+++ b/fs/afs/vl_probe.c
@@ -0,0 +1,273 @@
+/* AFS vlserver probing
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "afs_fs.h"
+#include "internal.h"
+#include "protocol_yfs.h"
+
+static bool afs_vl_probe_done(struct afs_vlserver *server)
+{
+ if (!atomic_dec_and_test(&server->probe_outstanding))
+ return false;
+
+ wake_up_var(&server->probe_outstanding);
+ clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags);
+ wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING);
+ return true;
+}
+
+/*
+ * Process the result of probing a vlserver. This is called after successful
+ * or failed delivery of an VL.GetCapabilities operation.
+ */
+void afs_vlserver_probe_result(struct afs_call *call)
+{
+ struct afs_addr_list *alist = call->alist;
+ struct afs_vlserver *server = call->reply[0];
+ unsigned int server_index = (long)call->reply[1];
+ unsigned int index = call->addr_ix;
+ unsigned int rtt = UINT_MAX;
+ bool have_result = false;
+ u64 _rtt;
+ int ret = call->error;
+
+ _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code);
+
+ spin_lock(&server->probe_lock);
+
+ switch (ret) {
+ case 0:
+ server->probe.error = 0;
+ goto responded;
+ case -ECONNABORTED:
+ if (!server->probe.responded) {
+ server->probe.abort_code = call->abort_code;
+ server->probe.error = ret;
+ }
+ goto responded;
+ case -ENOMEM:
+ case -ENONET:
+ server->probe.local_failure = true;
+ afs_io_error(call, afs_io_error_vl_probe_fail);
+ goto out;
+ case -ECONNRESET: /* Responded, but call expired. */
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
+ case -ECONNREFUSED:
+ case -ETIMEDOUT:
+ case -ETIME:
+ default:
+ clear_bit(index, &alist->responded);
+ set_bit(index, &alist->failed);
+ if (!server->probe.responded &&
+ (server->probe.error == 0 ||
+ server->probe.error == -ETIMEDOUT ||
+ server->probe.error == -ETIME))
+ server->probe.error = ret;
+ afs_io_error(call, afs_io_error_vl_probe_fail);
+ goto out;
+ }
+
+responded:
+ set_bit(index, &alist->responded);
+ clear_bit(index, &alist->failed);
+
+ if (call->service_id == YFS_VL_SERVICE) {
+ server->probe.is_yfs = true;
+ set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
+ alist->addrs[index].srx_service = call->service_id;
+ } else {
+ server->probe.not_yfs = true;
+ if (!server->probe.is_yfs) {
+ clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
+ alist->addrs[index].srx_service = call->service_id;
+ }
+ }
+
+ /* Get the RTT and scale it to fit into a 32-bit value that represents
+ * over a minute of time so that we can access it with one instruction
+ * on a 32-bit system.
+ */
+ _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
+ _rtt /= 64;
+ rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt;
+ if (rtt < server->probe.rtt) {
+ server->probe.rtt = rtt;
+ alist->preferred = index;
+ have_result = true;
+ }
+
+ smp_wmb(); /* Set rtt before responded. */
+ server->probe.responded = true;
+ set_bit(AFS_VLSERVER_FL_PROBED, &server->flags);
+out:
+ spin_unlock(&server->probe_lock);
+
+ _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
+ server_index, index, &alist->addrs[index].transport,
+ (unsigned int)rtt, ret);
+
+ have_result |= afs_vl_probe_done(server);
+ if (have_result) {
+ server->probe.have_result = true;
+ wake_up_var(&server->probe.have_result);
+ wake_up_all(&server->probe_wq);
+ }
+}
+
+/*
+ * Probe all of a vlserver's addresses to find out the best route and to
+ * query its capabilities.
+ */
+static int afs_do_probe_vlserver(struct afs_net *net,
+ struct afs_vlserver *server,
+ struct key *key,
+ unsigned int server_index)
+{
+ struct afs_addr_cursor ac = {
+ .index = 0,
+ };
+ int ret;
+
+ _enter("%s", server->name);
+
+ read_lock(&server->lock);
+ ac.alist = rcu_dereference_protected(server->addresses,
+ lockdep_is_held(&server->lock));
+ read_unlock(&server->lock);
+
+ atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
+ memset(&server->probe, 0, sizeof(server->probe));
+ server->probe.rtt = UINT_MAX;
+
+ for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
+ ret = afs_vl_get_capabilities(net, &ac, key, server,
+ server_index, true);
+ if (ret != -EINPROGRESS) {
+ afs_vl_probe_done(server);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Send off probes to all unprobed servers.
+ */
+int afs_send_vl_probes(struct afs_net *net, struct key *key,
+ struct afs_vlserver_list *vllist)
+{
+ struct afs_vlserver *server;
+ int i, ret;
+
+ for (i = 0; i < vllist->nr_servers; i++) {
+ server = vllist->servers[i].server;
+ if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags))
+ continue;
+
+ if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags)) {
+ ret = afs_do_probe_vlserver(net, server, key, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Wait for the first as-yet untried server to respond.
+ */
+int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist,
+ unsigned long untried)
+{
+ struct wait_queue_entry *waits;
+ struct afs_vlserver *server;
+ unsigned int rtt = UINT_MAX;
+ bool have_responders = false;
+ int pref = -1, i;
+
+ _enter("%u,%lx", vllist->nr_servers, untried);
+
+ /* Only wait for servers that have a probe outstanding. */
+ for (i = 0; i < vllist->nr_servers; i++) {
+ if (test_bit(i, &untried)) {
+ server = vllist->servers[i].server;
+ if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
+ __clear_bit(i, &untried);
+ if (server->probe.responded)
+ have_responders = true;
+ }
+ }
+ if (have_responders || !untried)
+ return 0;
+
+ waits = kmalloc(array_size(vllist->nr_servers, sizeof(*waits)), GFP_KERNEL);
+ if (!waits)
+ return -ENOMEM;
+
+ for (i = 0; i < vllist->nr_servers; i++) {
+ if (test_bit(i, &untried)) {
+ server = vllist->servers[i].server;
+ init_waitqueue_entry(&waits[i], current);
+ add_wait_queue(&server->probe_wq, &waits[i]);
+ }
+ }
+
+ for (;;) {
+ bool still_probing = false;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ for (i = 0; i < vllist->nr_servers; i++) {
+ if (test_bit(i, &untried)) {
+ server = vllist->servers[i].server;
+ if (server->probe.responded)
+ goto stop;
+ if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
+ still_probing = true;
+ }
+ }
+
+ if (!still_probing || unlikely(signal_pending(current)))
+ goto stop;
+ schedule();
+ }
+
+stop:
+ set_current_state(TASK_RUNNING);
+
+ for (i = 0; i < vllist->nr_servers; i++) {
+ if (test_bit(i, &untried)) {
+ server = vllist->servers[i].server;
+ if (server->probe.responded &&
+ server->probe.rtt < rtt) {
+ pref = i;
+ rtt = server->probe.rtt;
+ }
+
+ remove_wait_queue(&server->probe_wq, &waits[i]);
+ }
+ }
+
+ kfree(waits);
+
+ if (pref == -1 && signal_pending(current))
+ return -ERESTARTSYS;
+
+ if (pref >= 0)
+ vllist->preferred = pref;
+
+ _leave(" = 0 [%u]", pref);
+ return 0;
+}
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
new file mode 100644
index 000000000000..b64a284b99d2
--- /dev/null
+++ b/fs/afs/vl_rotate.c
@@ -0,0 +1,355 @@
+/* Handle vlserver selection and rotation.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include "internal.h"
+#include "afs_vl.h"
+
+/*
+ * Begin an operation on a volume location server.
+ */
+bool afs_begin_vlserver_operation(struct afs_vl_cursor *vc, struct afs_cell *cell,
+ struct key *key)
+{
+ memset(vc, 0, sizeof(*vc));
+ vc->cell = cell;
+ vc->key = key;
+ vc->error = -EDESTADDRREQ;
+ vc->ac.error = SHRT_MAX;
+
+ if (signal_pending(current)) {
+ vc->error = -EINTR;
+ vc->flags |= AFS_VL_CURSOR_STOP;
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Begin iteration through a server list, starting with the last used server if
+ * possible, or the last recorded good server if not.
+ */
+static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
+{
+ struct afs_cell *cell = vc->cell;
+
+ if (wait_on_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET,
+ TASK_INTERRUPTIBLE)) {
+ vc->error = -ERESTARTSYS;
+ return false;
+ }
+
+ read_lock(&cell->vl_servers_lock);
+ vc->server_list = afs_get_vlserverlist(
+ rcu_dereference_protected(cell->vl_servers,
+ lockdep_is_held(&cell->vl_servers_lock)));
+ read_unlock(&cell->vl_servers_lock);
+ if (!vc->server_list || !vc->server_list->nr_servers)
+ return false;
+
+ vc->untried = (1UL << vc->server_list->nr_servers) - 1;
+ vc->index = -1;
+ return true;
+}
+
+/*
+ * Select the vlserver to use. May be called multiple times to rotate
+ * through the vlservers.
+ */
+bool afs_select_vlserver(struct afs_vl_cursor *vc)
+{
+ struct afs_addr_list *alist;
+ struct afs_vlserver *vlserver;
+ u32 rtt;
+ int error = vc->ac.error, abort_code, i;
+
+ _enter("%lx[%d],%lx[%d],%d,%d",
+ vc->untried, vc->index,
+ vc->ac.tried, vc->ac.index,
+ error, vc->ac.abort_code);
+
+ if (vc->flags & AFS_VL_CURSOR_STOP) {
+ _leave(" = f [stopped]");
+ return false;
+ }
+
+ vc->nr_iterations++;
+
+ /* Evaluate the result of the previous operation, if there was one. */
+ switch (error) {
+ case SHRT_MAX:
+ goto start;
+
+ default:
+ case 0:
+ /* Success or local failure. Stop. */
+ vc->error = error;
+ vc->flags |= AFS_VL_CURSOR_STOP;
+ _leave(" = f [okay/local %d]", vc->ac.error);
+ return false;
+
+ case -ECONNABORTED:
+ /* The far side rejected the operation on some grounds. This
+ * might involve the server being busy or the volume having been moved.
+ */
+ switch (vc->ac.abort_code) {
+ case AFSVL_IO:
+ case AFSVL_BADVOLOPER:
+ case AFSVL_NOMEM:
+ /* The server went weird. */
+ vc->error = -EREMOTEIO;
+ //write_lock(&vc->cell->vl_servers_lock);
+ //vc->server_list->weird_mask |= 1 << vc->index;
+ //write_unlock(&vc->cell->vl_servers_lock);
+ goto next_server;
+
+ default:
+ vc->error = afs_abort_to_error(vc->ac.abort_code);
+ goto failed;
+ }
+
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
+ case -ECONNREFUSED:
+ case -ETIMEDOUT:
+ case -ETIME:
+ _debug("no conn %d", error);
+ vc->error = error;
+ goto iterate_address;
+
+ case -ECONNRESET:
+ _debug("call reset");
+ vc->error = error;
+ vc->flags |= AFS_VL_CURSOR_RETRY;
+ goto next_server;
+ }
+
+restart_from_beginning:
+ _debug("restart");
+ afs_end_cursor(&vc->ac);
+ afs_put_vlserverlist(vc->cell->net, vc->server_list);
+ vc->server_list = NULL;
+ if (vc->flags & AFS_VL_CURSOR_RETRIED)
+ goto failed;
+ vc->flags |= AFS_VL_CURSOR_RETRIED;
+start:
+ _debug("start");
+
+ if (!afs_start_vl_iteration(vc))
+ goto failed;
+
+ error = afs_send_vl_probes(vc->cell->net, vc->key, vc->server_list);
+ if (error < 0)
+ goto failed_set_error;
+
+pick_server:
+ _debug("pick [%lx]", vc->untried);
+
+ error = afs_wait_for_vl_probes(vc->server_list, vc->untried);
+ if (error < 0)
+ goto failed_set_error;
+
+ /* Pick the untried server with the lowest RTT. */
+ vc->index = vc->server_list->preferred;
+ if (test_bit(vc->index, &vc->untried))
+ goto selected_server;
+
+ vc->index = -1;
+ rtt = U32_MAX;
+ for (i = 0; i < vc->server_list->nr_servers; i++) {
+ struct afs_vlserver *s = vc->server_list->servers[i].server;
+
+ if (!test_bit(i, &vc->untried) || !s->probe.responded)
+ continue;
+ if (s->probe.rtt < rtt) {
+ vc->index = i;
+ rtt = s->probe.rtt;
+ }
+ }
+
+ if (vc->index == -1)
+ goto no_more_servers;
+
+selected_server:
+ _debug("use %d", vc->index);
+ __clear_bit(vc->index, &vc->untried);
+
+ /* We're starting on a different vlserver from the list. We need to
+ * check it, find its address list and probe its capabilities before we
+ * use it.
+ */
+ ASSERTCMP(vc->ac.alist, ==, NULL);
+ vlserver = vc->server_list->servers[vc->index].server;
+ vc->server = vlserver;
+
+ _debug("USING VLSERVER: %s", vlserver->name);
+
+ read_lock(&vlserver->lock);
+ alist = rcu_dereference_protected(vlserver->addresses,
+ lockdep_is_held(&vlserver->lock));
+ afs_get_addrlist(alist);
+ read_unlock(&vlserver->lock);
+
+ memset(&vc->ac, 0, sizeof(vc->ac));
+
+ if (!vc->ac.alist)
+ vc->ac.alist = alist;
+ else
+ afs_put_addrlist(alist);
+
+ vc->ac.index = -1;
+
+iterate_address:
+ ASSERT(vc->ac.alist);
+ /* Iterate over the current server's address list to try and find an
+ * address on which it will respond to us.
+ */
+ if (!afs_iterate_addresses(&vc->ac))
+ goto next_server;
+
+ _debug("VL address %d/%d", vc->ac.index, vc->ac.alist->nr_addrs);
+
+ _leave(" = t %pISpc", &vc->ac.alist->addrs[vc->ac.index].transport);
+ return true;
+
+next_server:
+ _debug("next");
+ afs_end_cursor(&vc->ac);
+ goto pick_server;
+
+no_more_servers:
+ /* That's all the servers poked to no good effect. Try again if some
+ * of them were busy.
+ */
+ if (vc->flags & AFS_VL_CURSOR_RETRY)
+ goto restart_from_beginning;
+
+ abort_code = 0;
+ error = -EDESTADDRREQ;
+ for (i = 0; i < vc->server_list->nr_servers; i++) {
+ struct afs_vlserver *s = vc->server_list->servers[i].server;
+ int probe_error = READ_ONCE(s->probe.error);
+
+ switch (probe_error) {
+ case 0:
+ continue;
+ default:
+ if (error == -ETIMEDOUT ||
+ error == -ETIME)
+ continue;
+ case -ETIMEDOUT:
+ case -ETIME:
+ if (error == -ENOMEM ||
+ error == -ENONET)
+ continue;
+ case -ENOMEM:
+ case -ENONET:
+ if (error == -ENETUNREACH)
+ continue;
+ case -ENETUNREACH:
+ if (error == -EHOSTUNREACH)
+ continue;
+ case -EHOSTUNREACH:
+ if (error == -ECONNREFUSED)
+ continue;
+ case -ECONNREFUSED:
+ if (error == -ECONNRESET)
+ continue;
+ case -ECONNRESET: /* Responded, but call expired. */
+ if (error == -ECONNABORTED)
+ continue;
+ case -ECONNABORTED:
+ abort_code = s->probe.abort_code;
+ error = probe_error;
+ continue;
+ }
+ }
+
+ if (error == -ECONNABORTED)
+ error = afs_abort_to_error(abort_code);
+
+failed_set_error:
+ vc->error = error;
+failed:
+ vc->flags |= AFS_VL_CURSOR_STOP;
+ afs_end_cursor(&vc->ac);
+ _leave(" = f [failed %d]", vc->error);
+ return false;
+}
+
+/*
+ * Dump cursor state in the case of the error being EDESTADDRREQ.
+ */
+static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+{
+ static int count;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_AFS_DEBUG_CURSOR) || count > 3)
+ return;
+ count++;
+
+ rcu_read_lock();
+ pr_notice("EDESTADDR occurred\n");
+ pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
+ vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
+
+ if (vc->server_list) {
+ const struct afs_vlserver_list *sl = vc->server_list;
+ pr_notice("VC: SL nr=%u ix=%u\n",
+ sl->nr_servers, sl->index);
+ for (i = 0; i < sl->nr_servers; i++) {
+ const struct afs_vlserver *s = sl->servers[i].server;
+ pr_notice("VC: server %s+%hu fl=%lx E=%hd\n",
+ s->name, s->port, s->flags, s->probe.error);
+ if (s->addresses) {
+ const struct afs_addr_list *a =
+ rcu_dereference(s->addresses);
+ pr_notice("VC: - nr=%u/%u/%u pf=%u\n",
+ a->nr_ipv4, a->nr_addrs, a->max_addrs,
+ a->preferred);
+ pr_notice("VC: - pr=%lx R=%lx F=%lx\n",
+ a->probed, a->responded, a->failed);
+ if (a == vc->ac.alist)
+ pr_notice("VC: - current\n");
+ }
+ }
+ }
+
+ pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
+ vc->ac.tried, vc->ac.index, vc->ac.abort_code, vc->ac.error,
+ vc->ac.responded, vc->ac.nr_iterations);
+ rcu_read_unlock();
+}
+
+/*
+ * Tidy up a volume location server cursor and unlock the vnode.
+ */
+int afs_end_vlserver_operation(struct afs_vl_cursor *vc)
+{
+ struct afs_net *net = vc->cell->net;
+
+ if (vc->error == -EDESTADDRREQ ||
+ vc->error == -ENETUNREACH ||
+ vc->error == -EHOSTUNREACH)
+ afs_vl_dump_edestaddrreq(vc);
+
+ afs_end_cursor(&vc->ac);
+ afs_put_vlserverlist(net, vc->server_list);
+
+ if (vc->error == -ECONNABORTED)
+ vc->error = afs_abort_to_error(vc->ac.abort_code);
+
+ return vc->error;
+}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index c3b740813fc7..c3d9e5a5f67e 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -128,14 +128,13 @@ static const struct afs_call_type afs_RXVLGetEntryByNameU = {
* Dispatch a get volume entry by name or ID operation (uuid variant). If the
* volname is a decimal number then it's a volume ID not a volume name.
*/
-struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *net,
- struct afs_addr_cursor *ac,
- struct key *key,
+struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *vc,
const char *volname,
int volnamesz)
{
struct afs_vldb_entry *entry;
struct afs_call *call;
+ struct afs_net *net = vc->cell->net;
size_t reqsz, padsz;
__be32 *bp;
@@ -155,7 +154,7 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *net,
return ERR_PTR(-ENOMEM);
}
- call->key = key;
+ call->key = vc->key;
call->reply[0] = entry;
call->ret_reply0 = true;
@@ -168,7 +167,7 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *net,
memset((void *)bp + volnamesz, 0, padsz);
trace_afs_make_vl_call(call);
- return (struct afs_vldb_entry *)afs_make_call(ac, call, GFP_KERNEL, false);
+ return (struct afs_vldb_entry *)afs_make_call(&vc->ac, call, GFP_KERNEL, false);
}
/*
@@ -187,19 +186,18 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
u32 uniquifier, nentries, count;
int i, ret;
- _enter("{%u,%zu/%u}", call->unmarshall, call->offset, call->count);
+ _enter("{%u,%zu/%u}",
+ call->unmarshall, iov_iter_count(call->_iter), call->count);
-again:
switch (call->unmarshall) {
case 0:
- call->offset = 0;
+ afs_extract_to_buf(call,
+ sizeof(struct afs_uuid__xdr) + 3 * sizeof(__be32));
call->unmarshall++;
/* Extract the returned uuid, uniquifier, nentries and blkaddrs size */
case 1:
- ret = afs_extract_data(call, call->buffer,
- sizeof(struct afs_uuid__xdr) + 3 * sizeof(__be32),
- true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
@@ -216,28 +214,28 @@ again:
call->reply[0] = alist;
call->count = count;
call->count2 = nentries;
- call->offset = 0;
call->unmarshall++;
+ more_entries:
+ count = min(call->count, 4U);
+ afs_extract_to_buf(call, count * sizeof(__be32));
+
/* Extract entries */
case 2:
- count = min(call->count, 4U);
- ret = afs_extract_data(call, call->buffer,
- count * sizeof(__be32),
- call->count > 4);
+ ret = afs_extract_data(call, call->count > 4);
if (ret < 0)
return ret;
alist = call->reply[0];
bp = call->buffer;
+ count = min(call->count, 4U);
for (i = 0; i < count; i++)
if (alist->nr_addrs < call->count2)
afs_merge_fs_addr4(alist, *bp++, AFS_FS_PORT);
call->count -= count;
if (call->count > 0)
- goto again;
- call->offset = 0;
+ goto more_entries;
call->unmarshall++;
break;
}
@@ -267,14 +265,13 @@ static const struct afs_call_type afs_RXVLGetAddrsU = {
* Dispatch an operation to get the addresses for a server, where the server is
* nominated by UUID.
*/
-struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net,
- struct afs_addr_cursor *ac,
- struct key *key,
+struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc,
const uuid_t *uuid)
{
struct afs_ListAddrByAttributes__xdr *r;
const struct afs_uuid *u = (const struct afs_uuid *)uuid;
struct afs_call *call;
+ struct afs_net *net = vc->cell->net;
__be32 *bp;
int i;
@@ -286,7 +283,7 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net,
if (!call)
return ERR_PTR(-ENOMEM);
- call->key = key;
+ call->key = vc->key;
call->reply[0] = NULL;
call->ret_reply0 = true;
@@ -307,7 +304,7 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net,
r->uuid.node[i] = htonl(u->node[i]);
trace_afs_make_vl_call(call);
- return (struct afs_addr_list *)afs_make_call(ac, call, GFP_KERNEL, false);
+ return (struct afs_addr_list *)afs_make_call(&vc->ac, call, GFP_KERNEL, false);
}
/*
@@ -318,54 +315,51 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call)
u32 count;
int ret;
- _enter("{%u,%zu/%u}", call->unmarshall, call->offset, call->count);
+ _enter("{%u,%zu/%u}",
+ call->unmarshall, iov_iter_count(call->_iter), call->count);
-again:
switch (call->unmarshall) {
case 0:
- call->offset = 0;
+ afs_extract_to_tmp(call);
call->unmarshall++;
/* Extract the capabilities word count */
case 1:
- ret = afs_extract_data(call, &call->tmp,
- 1 * sizeof(__be32),
- true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
count = ntohl(call->tmp);
-
call->count = count;
call->count2 = count;
- call->offset = 0;
+
call->unmarshall++;
+ afs_extract_discard(call, count * sizeof(__be32));
/* Extract capabilities words */
case 2:
- count = min(call->count, 16U);
- ret = afs_extract_data(call, call->buffer,
- count * sizeof(__be32),
- call->count > 16);
+ ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
/* TODO: Examine capabilities */
- call->count -= count;
- if (call->count > 0)
- goto again;
- call->offset = 0;
call->unmarshall++;
break;
}
- call->reply[0] = (void *)(unsigned long)call->service_id;
-
_leave(" = 0 [done]");
return 0;
}
+static void afs_destroy_vl_get_capabilities(struct afs_call *call)
+{
+ struct afs_vlserver *server = call->reply[0];
+
+ afs_put_vlserver(call->net, server);
+ afs_flat_call_destructor(call);
+}
+
/*
* VL.GetCapabilities operation type
*/
@@ -373,11 +367,12 @@ static const struct afs_call_type afs_RXVLGetCapabilities = {
.name = "VL.GetCapabilities",
.op = afs_VL_GetCapabilities,
.deliver = afs_deliver_vl_get_capabilities,
- .destructor = afs_flat_call_destructor,
+ .done = afs_vlserver_probe_result,
+ .destructor = afs_destroy_vl_get_capabilities,
};
/*
- * Probe a fileserver for the capabilities that it supports. This can
+ * Probe a volume server for the capabilities that it supports. This can
* return up to 196 words.
*
* We use this to probe for service upgrade to determine what the server at the
@@ -385,7 +380,10 @@ static const struct afs_call_type afs_RXVLGetCapabilities = {
*/
int afs_vl_get_capabilities(struct afs_net *net,
struct afs_addr_cursor *ac,
- struct key *key)
+ struct key *key,
+ struct afs_vlserver *server,
+ unsigned int server_index,
+ bool async)
{
struct afs_call *call;
__be32 *bp;
@@ -397,9 +395,10 @@ int afs_vl_get_capabilities(struct afs_net *net,
return -ENOMEM;
call->key = key;
- call->upgrade = true; /* Let's see if this is a YFS server */
- call->reply[0] = (void *)VLGETCAPABILITIES;
- call->ret_reply0 = true;
+ call->reply[0] = afs_get_vlserver(server);
+ call->reply[1] = (void *)(long)server_index;
+ call->upgrade = true;
+ call->want_reply_time = true;
/* marshall the parameters */
bp = call->request;
@@ -407,7 +406,7 @@ int afs_vl_get_capabilities(struct afs_net *net,
/* Can't take a ref on server */
trace_afs_make_vl_call(call);
- return afs_make_call(ac, call, GFP_KERNEL, false);
+ return afs_make_call(ac, call, GFP_KERNEL, async);
}
/*
@@ -426,22 +425,19 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
u32 uniquifier, size;
int ret;
- _enter("{%u,%zu/%u,%u}", call->unmarshall, call->offset, call->count, call->count2);
+ _enter("{%u,%zu,%u}",
+ call->unmarshall, iov_iter_count(call->_iter), call->count2);
-again:
switch (call->unmarshall) {
case 0:
- call->offset = 0;
+ afs_extract_to_buf(call, sizeof(uuid_t) + 3 * sizeof(__be32));
call->unmarshall = 1;
/* Extract the returned uuid, uniquifier, fsEndpoints count and
* either the first fsEndpoint type or the volEndpoints
* count if there are no fsEndpoints. */
case 1:
- ret = afs_extract_data(call, call->buffer,
- sizeof(uuid_t) +
- 3 * sizeof(__be32),
- true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
@@ -451,22 +447,19 @@ again:
call->count2 = ntohl(*bp); /* Type or next count */
if (call->count > YFS_MAXENDPOINTS)
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_fsendpt_num);
alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT);
if (!alist)
return -ENOMEM;
alist->version = uniquifier;
call->reply[0] = alist;
- call->offset = 0;
if (call->count == 0)
goto extract_volendpoints;
- call->unmarshall = 2;
-
- /* Extract fsEndpoints[] entries */
- case 2:
+ next_fsendpoint:
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
size = sizeof(__be32) * (1 + 1 + 1);
@@ -475,11 +468,17 @@ again:
size = sizeof(__be32) * (1 + 4 + 1);
break;
default:
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_fsendpt_type);
}
size += sizeof(__be32);
- ret = afs_extract_data(call, call->buffer, size, true);
+ afs_extract_to_buf(call, size);
+ call->unmarshall = 2;
+
+ /* Extract fsEndpoints[] entries */
+ case 2:
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
@@ -488,18 +487,21 @@ again:
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
if (ntohl(bp[0]) != sizeof(__be32) * 2)
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_fsendpt4_len);
afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2]));
bp += 3;
break;
case YFS_ENDPOINT_IPV6:
if (ntohl(bp[0]) != sizeof(__be32) * 5)
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_fsendpt6_len);
afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5]));
bp += 6;
break;
default:
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_fsendpt_type);
}
/* Got either the type of the next entry or the count of
@@ -507,10 +509,9 @@ again:
*/
call->count2 = ntohl(*bp++);
- call->offset = 0;
call->count--;
if (call->count > 0)
- goto again;
+ goto next_fsendpoint;
extract_volendpoints:
/* Extract the list of volEndpoints. */
@@ -518,8 +519,10 @@ again:
if (!call->count)
goto end;
if (call->count > YFS_MAXENDPOINTS)
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_vlendpt_type);
+ afs_extract_to_buf(call, 1 * sizeof(__be32));
call->unmarshall = 3;
/* Extract the type of volEndpoints[0]. Normally we would
@@ -527,17 +530,14 @@ again:
* data of the current one, but this is the first...
*/
case 3:
- ret = afs_extract_data(call, call->buffer, sizeof(__be32), true);
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
bp = call->buffer;
- call->count2 = ntohl(*bp++);
- call->offset = 0;
- call->unmarshall = 4;
- /* Extract volEndpoints[] entries */
- case 4:
+ next_volendpoint:
+ call->count2 = ntohl(*bp++);
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
size = sizeof(__be32) * (1 + 1 + 1);
@@ -546,12 +546,18 @@ again:
size = sizeof(__be32) * (1 + 4 + 1);
break;
default:
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_vlendpt_type);
}
if (call->count > 1)
- size += sizeof(__be32);
- ret = afs_extract_data(call, call->buffer, size, true);
+ size += sizeof(__be32); /* Get next type too */
+ afs_extract_to_buf(call, size);
+ call->unmarshall = 4;
+
+ /* Extract volEndpoints[] entries */
+ case 4:
+ ret = afs_extract_data(call, true);
if (ret < 0)
return ret;
@@ -559,34 +565,35 @@ again:
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
if (ntohl(bp[0]) != sizeof(__be32) * 2)
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_vlendpt4_len);
bp += 3;
break;
case YFS_ENDPOINT_IPV6:
if (ntohl(bp[0]) != sizeof(__be32) * 5)
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_vlendpt6_len);
bp += 6;
break;
default:
- return afs_protocol_error(call, -EBADMSG);
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_yvl_vlendpt_type);
}
/* Got either the type of the next entry or the count of
* volEndpoints if no more fsEndpoints.
*/
- call->offset = 0;
call->count--;
- if (call->count > 0) {
- call->count2 = ntohl(*bp++);
- goto again;
- }
+ if (call->count > 0)
+ goto next_volendpoint;
end:
+ afs_extract_discard(call, 0);
call->unmarshall = 5;
/* Done */
case 5:
- ret = afs_extract_data(call, call->buffer, 0, false);
+ ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
call->unmarshall = 6;
@@ -596,11 +603,6 @@ again:
}
alist = call->reply[0];
-
- /* Start with IPv6 if available. */
- if (alist->nr_ipv4 < alist->nr_addrs)
- alist->index = alist->nr_ipv4;
-
_leave(" = 0 [done]");
return 0;
}
@@ -619,12 +621,11 @@ static const struct afs_call_type afs_YFSVLGetEndpoints = {
* Dispatch an operation to get the addresses for a server, where the server is
* nominated by UUID.
*/
-struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *net,
- struct afs_addr_cursor *ac,
- struct key *key,
+struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
const uuid_t *uuid)
{
struct afs_call *call;
+ struct afs_net *net = vc->cell->net;
__be32 *bp;
_enter("");
@@ -635,7 +636,7 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *net,
if (!call)
return ERR_PTR(-ENOMEM);
- call->key = key;
+ call->key = vc->key;
call->reply[0] = NULL;
call->ret_reply0 = true;
@@ -646,5 +647,5 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *net,
memcpy(bp, uuid, sizeof(*uuid)); /* Type opr_uuid */
trace_afs_make_vl_call(call);
- return (struct afs_addr_list *)afs_make_call(ac, call, GFP_KERNEL, false);
+ return (struct afs_addr_list *)afs_make_call(&vc->ac, call, GFP_KERNEL, false);
}
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 3037bd01f617..00975ed3640f 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -74,55 +74,19 @@ static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell,
const char *volname,
size_t volnamesz)
{
- struct afs_addr_cursor ac;
- struct afs_vldb_entry *vldb;
+ struct afs_vldb_entry *vldb = ERR_PTR(-EDESTADDRREQ);
+ struct afs_vl_cursor vc;
int ret;
- ret = afs_set_vl_cursor(&ac, cell);
- if (ret < 0)
- return ERR_PTR(ret);
-
- while (afs_iterate_addresses(&ac)) {
- if (!test_bit(ac.index, &ac.alist->probed)) {
- ret = afs_vl_get_capabilities(cell->net, &ac, key);
- switch (ret) {
- case VL_SERVICE:
- clear_bit(ac.index, &ac.alist->yfs);
- set_bit(ac.index, &ac.alist->probed);
- ac.addr->srx_service = ret;
- break;
- case YFS_VL_SERVICE:
- set_bit(ac.index, &ac.alist->yfs);
- set_bit(ac.index, &ac.alist->probed);
- ac.addr->srx_service = ret;
- break;
- }
- }
-
- vldb = afs_vl_get_entry_by_name_u(cell->net, &ac, key,
- volname, volnamesz);
- switch (ac.error) {
- case 0:
- afs_end_cursor(&ac);
- return vldb;
- case -ECONNABORTED:
- ac.error = afs_abort_to_error(ac.abort_code);
- goto error;
- case -ENOMEM:
- case -ENONET:
- goto error;
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -ECONNREFUSED:
- break;
- default:
- ac.error = -EIO;
- goto error;
- }
+ if (!afs_begin_vlserver_operation(&vc, cell, key))
+ return ERR_PTR(-ERESTARTSYS);
+
+ while (afs_select_vlserver(&vc)) {
+ vldb = afs_vl_get_entry_by_name_u(&vc, volname, volnamesz);
}
-error:
- return ERR_PTR(afs_end_cursor(&ac));
+ ret = afs_end_vlserver_operation(&vc);
+ return ret < 0 ? ERR_PTR(ret) : vldb;
}
/*
@@ -270,7 +234,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
/* We look up an ID by passing it as a decimal string in the
* operation's name parameter.
*/
- idsz = sprintf(idbuf, "%u", volume->vid);
+ idsz = sprintf(idbuf, "%llu", volume->vid);
vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
if (IS_ERR(vldb)) {
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 19c04caf3c01..72efcfcf9f95 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -33,10 +33,21 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
loff_t pos, unsigned int len, struct page *page)
{
struct afs_read *req;
+ size_t p;
+ void *data;
int ret;
_enter(",,%llu", (unsigned long long)pos);
+ if (pos >= vnode->vfs_inode.i_size) {
+ p = pos & ~PAGE_MASK;
+ ASSERTCMP(p + len, <=, PAGE_SIZE);
+ data = kmap(page);
+ memset(data + p, 0, len);
+ kunmap(page);
+ return 0;
+ }
+
req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
GFP_KERNEL);
if (!req)
@@ -81,7 +92,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
- _enter("{%x:%u},{%lx},%u,%u",
+ _enter("{%llx:%llu},{%lx},%u,%u",
vnode->fid.vid, vnode->fid.vnode, index, from, to);
/* We want to store information about how much of a page is altered in
@@ -181,7 +192,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
loff_t i_size, maybe_i_size;
int ret;
- _enter("{%x:%u},{%lx}",
+ _enter("{%llx:%llu},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index);
maybe_i_size = pos + copied;
@@ -230,7 +241,7 @@ static void afs_kill_pages(struct address_space *mapping,
struct pagevec pv;
unsigned count, loop;
- _enter("{%x:%u},%lx-%lx",
+ _enter("{%llx:%llu},%lx-%lx",
vnode->fid.vid, vnode->fid.vnode, first, last);
pagevec_init(&pv);
@@ -272,7 +283,7 @@ static void afs_redirty_pages(struct writeback_control *wbc,
struct pagevec pv;
unsigned count, loop;
- _enter("{%x:%u},%lx-%lx",
+ _enter("{%llx:%llu},%lx-%lx",
vnode->fid.vid, vnode->fid.vnode, first, last);
pagevec_init(&pv);
@@ -314,7 +325,7 @@ static int afs_store_data(struct address_space *mapping,
struct list_head *p;
int ret = -ENOKEY, ret2;
- _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
+ _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
vnode->volume->name,
vnode->fid.vid,
vnode->fid.vnode,
@@ -533,6 +544,7 @@ no_more:
case -ENOENT:
case -ENOMEDIUM:
case -ENXIO:
+ trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
afs_kill_pages(mapping, first, last);
mapping_set_error(mapping, ret);
break;
@@ -675,7 +687,7 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
unsigned count, loop;
pgoff_t first = call->first, last = call->last;
- _enter("{%x:%u},{%lx-%lx}",
+ _enter("{%llx:%llu},{%lx-%lx}",
vnode->fid.vid, vnode->fid.vnode, first, last);
pagevec_init(&pv);
@@ -714,7 +726,7 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
ssize_t result;
size_t count = iov_iter_count(from);
- _enter("{%x.%u},{%zu},",
+ _enter("{%llx:%llu},{%zu},",
vnode->fid.vid, vnode->fid.vnode, count);
if (IS_SWAPFILE(&vnode->vfs_inode)) {
@@ -742,7 +754,7 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
- _enter("{%x:%u},{n=%pD},%d",
+ _enter("{%llx:%llu},{n=%pD},%d",
vnode->fid.vid, vnode->fid.vnode, file,
datasync);
@@ -760,7 +772,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
struct afs_vnode *vnode = AFS_FS_I(inode);
unsigned long priv;
- _enter("{{%x:%u}},{%lx}",
+ _enter("{{%llx:%llu}},{%lx}",
vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
sb_start_pagefault(inode->i_sb);
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index cfcc674e64a5..a2cdf25573e2 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -72,7 +72,7 @@ static int afs_xattr_get_fid(const struct xattr_handler *handler,
char text[8 + 1 + 8 + 1 + 8 + 1];
size_t len;
- len = sprintf(text, "%x:%x:%x",
+ len = sprintf(text, "%llx:%llx:%x",
vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
if (size == 0)
return len;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
new file mode 100644
index 000000000000..12658c1363ae
--- /dev/null
+++ b/fs/afs/yfsclient.c
@@ -0,0 +1,2184 @@
+/* YFS File Server client stubs
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/circ_buf.h>
+#include <linux/iversion.h>
+#include "internal.h"
+#include "afs_fs.h"
+#include "xdr_fs.h"
+#include "protocol_yfs.h"
+
+static const struct afs_fid afs_zero_fid;
+
+static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
+{
+ call->cbi = afs_get_cb_interest(cbi);
+}
+
+#define xdr_size(x) (sizeof(*x) / sizeof(__be32))
+
+static void xdr_decode_YFSFid(const __be32 **_bp, struct afs_fid *fid)
+{
+ const struct yfs_xdr_YFSFid *x = (const void *)*_bp;
+
+ fid->vid = xdr_to_u64(x->volume);
+ fid->vnode = xdr_to_u64(x->vnode.lo);
+ fid->vnode_hi = ntohl(x->vnode.hi);
+ fid->unique = ntohl(x->vnode.unique);
+ *_bp += xdr_size(x);
+}
+
+static __be32 *xdr_encode_u32(__be32 *bp, u32 n)
+{
+ *bp++ = htonl(n);
+ return bp;
+}
+
+static __be32 *xdr_encode_u64(__be32 *bp, u64 n)
+{
+ struct yfs_xdr_u64 *x = (void *)bp;
+
+ *x = u64_to_xdr(n);
+ return bp + xdr_size(x);
+}
+
+static __be32 *xdr_encode_YFSFid(__be32 *bp, struct afs_fid *fid)
+{
+ struct yfs_xdr_YFSFid *x = (void *)bp;
+
+ x->volume = u64_to_xdr(fid->vid);
+ x->vnode.lo = u64_to_xdr(fid->vnode);
+ x->vnode.hi = htonl(fid->vnode_hi);
+ x->vnode.unique = htonl(fid->unique);
+ return bp + xdr_size(x);
+}
+
+static size_t xdr_strlen(unsigned int len)
+{
+ return sizeof(__be32) + round_up(len, sizeof(__be32));
+}
+
+static __be32 *xdr_encode_string(__be32 *bp, const char *p, unsigned int len)
+{
+ bp = xdr_encode_u32(bp, len);
+ bp = memcpy(bp, p, len);
+ if (len & 3) {
+ unsigned int pad = 4 - (len & 3);
+
+ memset((u8 *)bp + len, 0, pad);
+ len += pad;
+ }
+
+ return bp + len / sizeof(__be32);
+}
+
+static s64 linux_to_yfs_time(const struct timespec64 *t)
+{
+ /* Convert to 100ns intervals. */
+ return (u64)t->tv_sec * 10000000 + t->tv_nsec/100;
+}
+
+static __be32 *xdr_encode_YFSStoreStatus_mode(__be32 *bp, mode_t mode)
+{
+ struct yfs_xdr_YFSStoreStatus *x = (void *)bp;
+
+ x->mask = htonl(AFS_SET_MODE);
+ x->mode = htonl(mode & S_IALLUGO);
+ x->mtime_client = u64_to_xdr(0);
+ x->owner = u64_to_xdr(0);
+ x->group = u64_to_xdr(0);
+ return bp + xdr_size(x);
+}
+
+static __be32 *xdr_encode_YFSStoreStatus_mtime(__be32 *bp, const struct timespec64 *t)
+{
+ struct yfs_xdr_YFSStoreStatus *x = (void *)bp;
+ s64 mtime = linux_to_yfs_time(t);
+
+ x->mask = htonl(AFS_SET_MTIME);
+ x->mode = htonl(0);
+ x->mtime_client = u64_to_xdr(mtime);
+ x->owner = u64_to_xdr(0);
+ x->group = u64_to_xdr(0);
+ return bp + xdr_size(x);
+}
+
+/*
+ * Convert a signed 100ns-resolution 64-bit time into a timespec.
+ */
+static struct timespec64 yfs_time_to_linux(s64 t)
+{
+ struct timespec64 ts;
+ u64 abs_t;
+
+ /*
+ * Unfortunately can not use normal 64 bit division on 32 bit arch, but
+ * the alternative, do_div, does not work with negative numbers so have
+ * to special case them
+ */
+ if (t < 0) {
+ abs_t = -t;
+ ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100);
+ ts.tv_nsec = -ts.tv_nsec;
+ ts.tv_sec = -abs_t;
+ } else {
+ abs_t = t;
+ ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100;
+ ts.tv_sec = abs_t;
+ }
+
+ return ts;
+}
+
+static struct timespec64 xdr_to_time(const struct yfs_xdr_u64 xdr)
+{
+ s64 t = xdr_to_u64(xdr);
+
+ return yfs_time_to_linux(t);
+}
+
+static void yfs_check_req(struct afs_call *call, __be32 *bp)
+{
+ size_t len = (void *)bp - call->request;
+
+ if (len > call->request_size)
+ pr_err("kAFS: %s: Request buffer overflow (%zu>%u)\n",
+ call->type->name, len, call->request_size);
+ else if (len < call->request_size)
+ pr_warning("kAFS: %s: Request buffer underflow (%zu<%u)\n",
+ call->type->name, len, call->request_size);
+}
+
+/*
+ * Dump a bad file status record.
+ */
+static void xdr_dump_bad(const __be32 *bp)
+{
+ __be32 x[4];
+ int i;
+
+ pr_notice("YFS XDR: Bad status record\n");
+ for (i = 0; i < 5 * 4 * 4; i += 16) {
+ memcpy(x, bp, 16);
+ bp += 4;
+ pr_notice("%03x: %08x %08x %08x %08x\n",
+ i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3]));
+ }
+
+ memcpy(x, bp, 4);
+ pr_notice("0x50: %08x\n", ntohl(x[0]));
+}
+
+/*
+ * Decode a YFSFetchStatus block
+ */
+static int xdr_decode_YFSFetchStatus(struct afs_call *call,
+ const __be32 **_bp,
+ struct afs_file_status *status,
+ struct afs_vnode *vnode,
+ const afs_dataversion_t *expected_version,
+ struct afs_read *read_req)
+{
+ const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp;
+ u32 type;
+ u8 flags = 0;
+
+ status->abort_code = ntohl(xdr->abort_code);
+ if (status->abort_code != 0) {
+ if (vnode && status->abort_code == VNOVNODE) {
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ status->nlink = 0;
+ __afs_break_callback(vnode);
+ }
+ return 0;
+ }
+
+ type = ntohl(xdr->type);
+ switch (type) {
+ case AFS_FTYPE_FILE:
+ case AFS_FTYPE_DIR:
+ case AFS_FTYPE_SYMLINK:
+ if (type != status->type &&
+ vnode &&
+ !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
+ pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n",
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique,
+ status->type, type);
+ goto bad;
+ }
+ status->type = type;
+ break;
+ default:
+ goto bad;
+ }
+
+#define EXTRACT_M4(FIELD) \
+ do { \
+ u32 x = ntohl(xdr->FIELD); \
+ if (status->FIELD != x) { \
+ flags |= AFS_VNODE_META_CHANGED; \
+ status->FIELD = x; \
+ } \
+ } while (0)
+
+#define EXTRACT_M8(FIELD) \
+ do { \
+ u64 x = xdr_to_u64(xdr->FIELD); \
+ if (status->FIELD != x) { \
+ flags |= AFS_VNODE_META_CHANGED; \
+ status->FIELD = x; \
+ } \
+ } while (0)
+
+#define EXTRACT_D8(FIELD) \
+ do { \
+ u64 x = xdr_to_u64(xdr->FIELD); \
+ if (status->FIELD != x) { \
+ flags |= AFS_VNODE_DATA_CHANGED; \
+ status->FIELD = x; \
+ } \
+ } while (0)
+
+ EXTRACT_M4(nlink);
+ EXTRACT_D8(size);
+ EXTRACT_D8(data_version);
+ EXTRACT_M8(author);
+ EXTRACT_M8(owner);
+ EXTRACT_M8(group);
+ EXTRACT_M4(mode);
+ EXTRACT_M4(caller_access); /* call ticket dependent */
+ EXTRACT_M4(anon_access);
+
+ status->mtime_client = xdr_to_time(xdr->mtime_client);
+ status->mtime_server = xdr_to_time(xdr->mtime_server);
+ status->lock_count = ntohl(xdr->lock_count);
+
+ if (read_req) {
+ read_req->data_version = status->data_version;
+ read_req->file_size = status->size;
+ }
+
+ *_bp += xdr_size(xdr);
+
+ if (vnode) {
+ if (test_bit(AFS_VNODE_UNSET, &vnode->flags))
+ flags |= AFS_VNODE_NOT_YET_SET;
+ afs_update_inode_from_status(vnode, status, expected_version,
+ flags);
+ }
+
+ return 0;
+
+bad:
+ xdr_dump_bad(*_bp);
+ return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+}
+
+/*
+ * Decode the file status. We need to lock the target vnode if we're going to
+ * update its status so that stat() sees the attributes update atomically.
+ */
+static int yfs_decode_status(struct afs_call *call,
+ const __be32 **_bp,
+ struct afs_file_status *status,
+ struct afs_vnode *vnode,
+ const afs_dataversion_t *expected_version,
+ struct afs_read *read_req)
+{
+ int ret;
+
+ if (!vnode)
+ return xdr_decode_YFSFetchStatus(call, _bp, status, vnode,
+ expected_version, read_req);
+
+ write_seqlock(&vnode->cb_lock);
+ ret = xdr_decode_YFSFetchStatus(call, _bp, status, vnode,
+ expected_version, read_req);
+ write_sequnlock(&vnode->cb_lock);
+ return ret;
+}
+
+/*
+ * Decode a YFSCallBack block
+ */
+static void xdr_decode_YFSCallBack(struct afs_call *call,
+ struct afs_vnode *vnode,
+ const __be32 **_bp)
+{
+ struct yfs_xdr_YFSCallBack *xdr = (void *)*_bp;
+ struct afs_cb_interest *old, *cbi = call->cbi;
+ u64 cb_expiry;
+
+ write_seqlock(&vnode->cb_lock);
+
+ if (!afs_cb_is_broken(call->cb_break, vnode, cbi)) {
+ cb_expiry = xdr_to_u64(xdr->expiration_time);
+ do_div(cb_expiry, 10 * 1000 * 1000);
+ vnode->cb_version = ntohl(xdr->version);
+ vnode->cb_type = ntohl(xdr->type);
+ vnode->cb_expires_at = cb_expiry + ktime_get_real_seconds();
+ old = vnode->cb_interest;
+ if (old != call->cbi) {
+ vnode->cb_interest = cbi;
+ cbi = old;
+ }
+ set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
+
+ write_sequnlock(&vnode->cb_lock);
+ call->cbi = cbi;
+ *_bp += xdr_size(xdr);
+}
+
+static void xdr_decode_YFSCallBack_raw(const __be32 **_bp,
+ struct afs_callback *cb)
+{
+ struct yfs_xdr_YFSCallBack *x = (void *)*_bp;
+ u64 cb_expiry;
+
+ cb_expiry = xdr_to_u64(x->expiration_time);
+ do_div(cb_expiry, 10 * 1000 * 1000);
+ cb->version = ntohl(x->version);
+ cb->type = ntohl(x->type);
+ cb->expires_at = cb_expiry + ktime_get_real_seconds();
+
+ *_bp += xdr_size(x);
+}
+
+/*
+ * Decode a YFSVolSync block
+ */
+static void xdr_decode_YFSVolSync(const __be32 **_bp,
+ struct afs_volsync *volsync)
+{
+ struct yfs_xdr_YFSVolSync *x = (void *)*_bp;
+ u64 creation;
+
+ if (volsync) {
+ creation = xdr_to_u64(x->vol_creation_date);
+ do_div(creation, 10 * 1000 * 1000);
+ volsync->creation = creation;
+ }
+
+ *_bp += xdr_size(x);
+}
+
+/*
+ * Encode the requested attributes into a YFSStoreStatus block
+ */
+static __be32 *xdr_encode_YFS_StoreStatus(__be32 *bp, struct iattr *attr)
+{
+ struct yfs_xdr_YFSStoreStatus *x = (void *)bp;
+ s64 mtime = 0, owner = 0, group = 0;
+ u32 mask = 0, mode = 0;
+
+ mask = 0;
+ if (attr->ia_valid & ATTR_MTIME) {
+ mask |= AFS_SET_MTIME;
+ mtime = linux_to_yfs_time(&attr->ia_mtime);
+ }
+
+ if (attr->ia_valid & ATTR_UID) {
+ mask |= AFS_SET_OWNER;
+ owner = from_kuid(&init_user_ns, attr->ia_uid);
+ }
+
+ if (attr->ia_valid & ATTR_GID) {
+ mask |= AFS_SET_GROUP;
+ group = from_kgid(&init_user_ns, attr->ia_gid);
+ }
+
+ if (attr->ia_valid & ATTR_MODE) {
+ mask |= AFS_SET_MODE;
+ mode = attr->ia_mode & S_IALLUGO;
+ }
+
+ x->mask = htonl(mask);
+ x->mode = htonl(mode);
+ x->mtime_client = u64_to_xdr(mtime);
+ x->owner = u64_to_xdr(owner);
+ x->group = u64_to_xdr(group);
+ return bp + xdr_size(x);
+}
+
+/*
+ * Decode a YFSFetchVolumeStatus block.
+ */
+static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp,
+ struct afs_volume_status *vs)
+{
+ const struct yfs_xdr_YFSFetchVolumeStatus *x = (const void *)*_bp;
+ u32 flags;
+
+ vs->vid = xdr_to_u64(x->vid);
+ vs->parent_id = xdr_to_u64(x->parent_id);
+ flags = ntohl(x->flags);
+ vs->online = flags & yfs_FVSOnline;
+ vs->in_service = flags & yfs_FVSInservice;
+ vs->blessed = flags & yfs_FVSBlessed;
+ vs->needs_salvage = flags & yfs_FVSNeedsSalvage;
+ vs->type = ntohl(x->type);
+ vs->min_quota = 0;
+ vs->max_quota = xdr_to_u64(x->max_quota);
+ vs->blocks_in_use = xdr_to_u64(x->blocks_in_use);
+ vs->part_blocks_avail = xdr_to_u64(x->part_blocks_avail);
+ vs->part_max_blocks = xdr_to_u64(x->part_max_blocks);
+ vs->vol_copy_date = xdr_to_u64(x->vol_copy_date);
+ vs->vol_backup_date = xdr_to_u64(x->vol_backup_date);
+ *_bp += sizeof(*x) / sizeof(__be32);
+}
+
+/*
+ * deliver reply data to an FS.FetchStatus
+ */
+static int yfs_deliver_fs_fetch_status_vnode(struct afs_call *call)
+{
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ int ret;
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSCallBack(call, vnode, &bp);
+ xdr_decode_YFSVolSync(&bp, call->reply[1]);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * YFS.FetchStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSFetchStatus_vnode = {
+ .name = "YFS.FetchStatus(vnode)",
+ .op = yfs_FS_FetchStatus,
+ .deliver = yfs_deliver_fs_fetch_status_vnode,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Fetch the status information for a file.
+ */
+int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsync,
+ bool new_inode)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ __be32 *bp;
+
+ _enter(",%x,{%llx:%llu},,",
+ key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus_vnode,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_YFSFid),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSCallBack) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call) {
+ fc->ac.error = -ENOMEM;
+ return -ENOMEM;
+ }
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+ call->reply[1] = volsync;
+ call->expected_version = new_inode ? 1 : vnode->status.data_version;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSFETCHSTATUS);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ yfs_check_req(call, bp);
+
+ call->cb_break = fc->cb_break;
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to an YFS.FetchData64.
+ */
+static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
+{
+ struct afs_vnode *vnode = call->reply[0];
+ struct afs_read *req = call->reply[2];
+ const __be32 *bp;
+ unsigned int size;
+ int ret;
+
+ _enter("{%u,%zu/%llu}",
+ call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
+
+ switch (call->unmarshall) {
+ case 0:
+ req->actual_len = 0;
+ req->index = 0;
+ req->offset = req->pos & (PAGE_SIZE - 1);
+ afs_extract_to_tmp64(call);
+ call->unmarshall++;
+
+ /* extract the returned data length */
+ case 1:
+ _debug("extract data length");
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ req->actual_len = be64_to_cpu(call->tmp64);
+ _debug("DATA length: %llu", req->actual_len);
+ req->remain = min(req->len, req->actual_len);
+ if (req->remain == 0)
+ goto no_more_data;
+
+ call->unmarshall++;
+
+ begin_page:
+ ASSERTCMP(req->index, <, req->nr_pages);
+ if (req->remain > PAGE_SIZE - req->offset)
+ size = PAGE_SIZE - req->offset;
+ else
+ size = req->remain;
+ call->bvec[0].bv_len = size;
+ call->bvec[0].bv_offset = req->offset;
+ call->bvec[0].bv_page = req->pages[req->index];
+ iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
+ ASSERTCMP(size, <=, PAGE_SIZE);
+
+ /* extract the returned data */
+ case 2:
+ _debug("extract data %zu/%llu",
+ iov_iter_count(&call->iter), req->remain);
+
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+ req->remain -= call->bvec[0].bv_len;
+ req->offset += call->bvec[0].bv_len;
+ ASSERTCMP(req->offset, <=, PAGE_SIZE);
+ if (req->offset == PAGE_SIZE) {
+ req->offset = 0;
+ if (req->page_done)
+ req->page_done(call, req);
+ req->index++;
+ if (req->remain > 0)
+ goto begin_page;
+ }
+
+ ASSERTCMP(req->remain, ==, 0);
+ if (req->actual_len <= req->len)
+ goto no_more_data;
+
+ /* Discard any excess data the server gave us */
+ iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
+ call->unmarshall = 3;
+ case 3:
+ _debug("extract discard %zu/%llu",
+ iov_iter_count(&call->iter), req->actual_len - req->len);
+
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ no_more_data:
+ call->unmarshall = 4;
+ afs_extract_to_buf(call,
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSCallBack) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+
+ /* extract the metadata */
+ case 4:
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+ &vnode->status.data_version, req);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSCallBack(call, vnode, &bp);
+ xdr_decode_YFSVolSync(&bp, call->reply[1]);
+
+ call->unmarshall++;
+
+ case 5:
+ break;
+ }
+
+ for (; req->index < req->nr_pages; req->index++) {
+ if (req->offset < PAGE_SIZE)
+ zero_user_segment(req->pages[req->index],
+ req->offset, PAGE_SIZE);
+ if (req->page_done)
+ req->page_done(call, req);
+ req->offset = 0;
+ }
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+static void yfs_fetch_data_destructor(struct afs_call *call)
+{
+ struct afs_read *req = call->reply[2];
+
+ afs_put_read(req);
+ afs_flat_call_destructor(call);
+}
+
+/*
+ * YFS.FetchData64 operation type
+ */
+static const struct afs_call_type yfs_RXYFSFetchData64 = {
+ .name = "YFS.FetchData64",
+ .op = yfs_FS_FetchData64,
+ .deliver = yfs_deliver_fs_fetch_data64,
+ .destructor = yfs_fetch_data_destructor,
+};
+
+/*
+ * Fetch data from a file.
+ */
+int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ __be32 *bp;
+
+ _enter(",%x,{%llx:%llu},%llx,%llx",
+ key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode,
+ req->pos, req->len);
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSFetchData64,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_u64) * 2,
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSCallBack) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+ call->reply[1] = NULL; /* volsync */
+ call->reply[2] = req;
+ call->expected_version = vnode->status.data_version;
+ call->want_reply_time = true;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSFETCHDATA64);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_u64(bp, req->pos);
+ bp = xdr_encode_u64(bp, req->len);
+ yfs_check_req(call, bp);
+
+ refcount_inc(&req->usage);
+ call->cb_break = fc->cb_break;
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data for YFS.CreateFile or YFS.MakeDir.
+ */
+static int yfs_deliver_fs_create_vnode(struct afs_call *call)
+{
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ xdr_decode_YFSFid(&bp, call->reply[1]);
+ ret = yfs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSCallBack_raw(&bp, call->reply[3]);
+ xdr_decode_YFSVolSync(&bp, NULL);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * FS.CreateFile and FS.MakeDir operation type
+ */
+static const struct afs_call_type afs_RXFSCreateFile = {
+ .name = "YFS.CreateFile",
+ .op = yfs_FS_CreateFile,
+ .deliver = yfs_deliver_fs_create_vnode,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Create a file.
+ */
+int yfs_fs_create_file(struct afs_fs_cursor *fc,
+ const char *name,
+ umode_t mode,
+ u64 current_data_version,
+ struct afs_fid *newfid,
+ struct afs_file_status *newstatus,
+ struct afs_callback *newcb)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ size_t namesz, reqsz, rplsz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = strlen(name);
+ reqsz = (sizeof(__be32) +
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(namesz) +
+ sizeof(struct yfs_xdr_YFSStoreStatus) +
+ sizeof(__be32));
+ rplsz = (sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSCallBack) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+
+ call = afs_alloc_flat_call(net, &afs_RXFSCreateFile, reqsz, rplsz);
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+ call->reply[1] = newfid;
+ call->reply[2] = newstatus;
+ call->reply[3] = newcb;
+ call->expected_version = current_data_version + 1;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSCREATEFILE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_string(bp, name, namesz);
+ bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
+ bp = xdr_encode_u32(bp, 0); /* ViceLockType */
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+static const struct afs_call_type yfs_RXFSMakeDir = {
+ .name = "YFS.MakeDir",
+ .op = yfs_FS_MakeDir,
+ .deliver = yfs_deliver_fs_create_vnode,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Make a directory.
+ */
+int yfs_fs_make_dir(struct afs_fs_cursor *fc,
+ const char *name,
+ umode_t mode,
+ u64 current_data_version,
+ struct afs_fid *newfid,
+ struct afs_file_status *newstatus,
+ struct afs_callback *newcb)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ size_t namesz, reqsz, rplsz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = strlen(name);
+ reqsz = (sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(namesz) +
+ sizeof(struct yfs_xdr_YFSStoreStatus));
+ rplsz = (sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSCallBack) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+
+ call = afs_alloc_flat_call(net, &yfs_RXFSMakeDir, reqsz, rplsz);
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+ call->reply[1] = newfid;
+ call->reply[2] = newstatus;
+ call->reply[3] = newcb;
+ call->expected_version = current_data_version + 1;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSMAKEDIR);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_string(bp, name, namesz);
+ bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.RemoveFile2 operation.
+ */
+static int yfs_deliver_fs_remove_file2(struct afs_call *call)
+{
+ struct afs_vnode *dvnode = call->reply[0];
+ struct afs_vnode *vnode = call->reply[1];
+ struct afs_fid fid;
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+
+ xdr_decode_YFSFid(&bp, &fid);
+ ret = yfs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ /* Was deleted if vnode->status.abort_code == VNOVNODE. */
+
+ xdr_decode_YFSVolSync(&bp, NULL);
+ return 0;
+}
+
+/*
+ * YFS.RemoveFile2 operation type.
+ */
+static const struct afs_call_type yfs_RXYFSRemoveFile2 = {
+ .name = "YFS.RemoveFile2",
+ .op = yfs_FS_RemoveFile2,
+ .deliver = yfs_deliver_fs_remove_file2,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Remove a file and retrieve new file status.
+ */
+int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
+ const char *name, u64 current_data_version)
+{
+ struct afs_vnode *dvnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(dvnode);
+ size_t namesz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = strlen(name);
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSRemoveFile2,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(namesz),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = dvnode;
+ call->reply[1] = vnode;
+ call->expected_version = current_data_version + 1;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSREMOVEFILE2);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &dvnode->fid);
+ bp = xdr_encode_string(bp, name, namesz);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &dvnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.RemoveFile or YFS.RemoveDir operation.
+ */
+static int yfs_deliver_fs_remove(struct afs_call *call)
+{
+ struct afs_vnode *dvnode = call->reply[0];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+
+ xdr_decode_YFSVolSync(&bp, NULL);
+ return 0;
+}
+
+/*
+ * FS.RemoveDir and FS.RemoveFile operation types.
+ */
+static const struct afs_call_type yfs_RXYFSRemoveFile = {
+ .name = "YFS.RemoveFile",
+ .op = yfs_FS_RemoveFile,
+ .deliver = yfs_deliver_fs_remove,
+ .destructor = afs_flat_call_destructor,
+};
+
+static const struct afs_call_type yfs_RXYFSRemoveDir = {
+ .name = "YFS.RemoveDir",
+ .op = yfs_FS_RemoveDir,
+ .deliver = yfs_deliver_fs_remove,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * remove a file or directory
+ */
+int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
+ const char *name, bool isdir, u64 current_data_version)
+{
+ struct afs_vnode *dvnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(dvnode);
+ size_t namesz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = strlen(name);
+ call = afs_alloc_flat_call(
+ net, isdir ? &yfs_RXYFSRemoveDir : &yfs_RXYFSRemoveFile,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(namesz),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = dvnode;
+ call->reply[1] = vnode;
+ call->expected_version = current_data_version + 1;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, isdir ? YFSREMOVEDIR : YFSREMOVEFILE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &dvnode->fid);
+ bp = xdr_encode_string(bp, name, namesz);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &dvnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.Link operation.
+ */
+static int yfs_deliver_fs_link(struct afs_call *call)
+{
+ struct afs_vnode *dvnode = call->reply[0], *vnode = call->reply[1];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSVolSync(&bp, NULL);
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * YFS.Link operation type.
+ */
+static const struct afs_call_type yfs_RXYFSLink = {
+ .name = "YFS.Link",
+ .op = yfs_FS_Link,
+ .deliver = yfs_deliver_fs_link,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Make a hard link.
+ */
+int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
+ const char *name, u64 current_data_version)
+{
+ struct afs_vnode *dvnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ size_t namesz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = strlen(name);
+ call = afs_alloc_flat_call(net, &yfs_RXYFSLink,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(namesz) +
+ sizeof(struct yfs_xdr_YFSFid),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = dvnode;
+ call->reply[1] = vnode;
+ call->expected_version = current_data_version + 1;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSLINK);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &dvnode->fid);
+ bp = xdr_encode_string(bp, name, namesz);
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.Symlink operation.
+ */
+static int yfs_deliver_fs_symlink(struct afs_call *call)
+{
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ xdr_decode_YFSFid(&bp, call->reply[1]);
+ ret = yfs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSVolSync(&bp, NULL);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * YFS.Symlink operation type
+ */
+static const struct afs_call_type yfs_RXYFSSymlink = {
+ .name = "YFS.Symlink",
+ .op = yfs_FS_Symlink,
+ .deliver = yfs_deliver_fs_symlink,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Create a symbolic link.
+ */
+int yfs_fs_symlink(struct afs_fs_cursor *fc,
+ const char *name,
+ const char *contents,
+ u64 current_data_version,
+ struct afs_fid *newfid,
+ struct afs_file_status *newstatus)
+{
+ struct afs_vnode *dvnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(dvnode);
+ size_t namesz, contents_sz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = strlen(name);
+ contents_sz = strlen(contents);
+ call = afs_alloc_flat_call(net, &yfs_RXYFSSymlink,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(namesz) +
+ xdr_strlen(contents_sz) +
+ sizeof(struct yfs_xdr_YFSStoreStatus),
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = dvnode;
+ call->reply[1] = newfid;
+ call->reply[2] = newstatus;
+ call->expected_version = current_data_version + 1;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSSYMLINK);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &dvnode->fid);
+ bp = xdr_encode_string(bp, name, namesz);
+ bp = xdr_encode_string(bp, contents, contents_sz);
+ bp = xdr_encode_YFSStoreStatus_mode(bp, S_IRWXUGO);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &dvnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.Rename operation.
+ */
+static int yfs_deliver_fs_rename(struct afs_call *call)
+{
+ struct afs_vnode *orig_dvnode = call->reply[0];
+ struct afs_vnode *new_dvnode = call->reply[1];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ if (new_dvnode != orig_dvnode) {
+ ret = yfs_decode_status(call, &bp, &new_dvnode->status, new_dvnode,
+ &call->expected_version_2, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ xdr_decode_YFSVolSync(&bp, NULL);
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * YFS.Rename operation type
+ */
+static const struct afs_call_type yfs_RXYFSRename = {
+ .name = "FS.Rename",
+ .op = yfs_FS_Rename,
+ .deliver = yfs_deliver_fs_rename,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Rename a file or directory.
+ */
+int yfs_fs_rename(struct afs_fs_cursor *fc,
+ const char *orig_name,
+ struct afs_vnode *new_dvnode,
+ const char *new_name,
+ u64 current_orig_data_version,
+ u64 current_new_data_version)
+{
+ struct afs_vnode *orig_dvnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(orig_dvnode);
+ size_t o_namesz, n_namesz;
+ __be32 *bp;
+
+ _enter("");
+
+ o_namesz = strlen(orig_name);
+ n_namesz = strlen(new_name);
+ call = afs_alloc_flat_call(net, &yfs_RXYFSRename,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(o_namesz) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(n_namesz),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = orig_dvnode;
+ call->reply[1] = new_dvnode;
+ call->expected_version = current_orig_data_version + 1;
+ call->expected_version_2 = current_new_data_version + 1;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSRENAME);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &orig_dvnode->fid);
+ bp = xdr_encode_string(bp, orig_name, o_namesz);
+ bp = xdr_encode_YFSFid(bp, &new_dvnode->fid);
+ bp = xdr_encode_string(bp, new_name, n_namesz);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &orig_dvnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.StoreData64 operation.
+ */
+static int yfs_deliver_fs_store_data(struct afs_call *call)
+{
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ int ret;
+
+ _enter("");
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSVolSync(&bp, NULL);
+
+ afs_pages_written_back(vnode, call);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * YFS.StoreData64 operation type.
+ */
+static const struct afs_call_type yfs_RXYFSStoreData64 = {
+ .name = "YFS.StoreData64",
+ .op = yfs_FS_StoreData64,
+ .deliver = yfs_deliver_fs_store_data,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Store a set of pages to a large file.
+ */
+int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
+ pgoff_t first, pgoff_t last,
+ unsigned offset, unsigned to)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ loff_t size, pos, i_size;
+ __be32 *bp;
+
+ _enter(",%x,{%llx:%llu},,",
+ key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+
+ size = (loff_t)to - (loff_t)offset;
+ if (first != last)
+ size += (loff_t)(last - first) << PAGE_SHIFT;
+ pos = (loff_t)first << PAGE_SHIFT;
+ pos += offset;
+
+ i_size = i_size_read(&vnode->vfs_inode);
+ if (pos + size > i_size)
+ i_size = size + pos;
+
+ _debug("size %llx, at %llx, i_size %llx",
+ (unsigned long long)size, (unsigned long long)pos,
+ (unsigned long long)i_size);
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64,
+ sizeof(__be32) +
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSStoreStatus) +
+ sizeof(struct yfs_xdr_u64) * 3,
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->mapping = mapping;
+ call->reply[0] = vnode;
+ call->first = first;
+ call->last = last;
+ call->first_offset = offset;
+ call->last_to = to;
+ call->send_pages = true;
+ call->expected_version = vnode->status.data_version + 1;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSSTOREDATA64);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSStoreStatus_mtime(bp, &vnode->vfs_inode.i_mtime);
+ bp = xdr_encode_u64(bp, pos);
+ bp = xdr_encode_u64(bp, size);
+ bp = xdr_encode_u64(bp, i_size);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * deliver reply data to an FS.StoreStatus
+ */
+static int yfs_deliver_fs_store_status(struct afs_call *call)
+{
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ int ret;
+
+ _enter("");
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSVolSync(&bp, NULL);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * YFS.StoreStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSStoreStatus = {
+ .name = "YFS.StoreStatus",
+ .op = yfs_FS_StoreStatus,
+ .deliver = yfs_deliver_fs_store_status,
+ .destructor = afs_flat_call_destructor,
+};
+
+static const struct afs_call_type yfs_RXYFSStoreData64_as_Status = {
+ .name = "YFS.StoreData64",
+ .op = yfs_FS_StoreData64,
+ .deliver = yfs_deliver_fs_store_status,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Set the attributes on a file, using YFS.StoreData64 rather than
+ * YFS.StoreStatus so as to alter the file size also.
+ */
+static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ __be32 *bp;
+
+ _enter(",%x,{%llx:%llu},,",
+ key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64_as_Status,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSStoreStatus) +
+ sizeof(struct yfs_xdr_u64) * 3,
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+ call->expected_version = vnode->status.data_version + 1;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSSTOREDATA64);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFS_StoreStatus(bp, attr);
+ bp = xdr_encode_u64(bp, 0); /* position of start of write */
+ bp = xdr_encode_u64(bp, 0); /* size of write */
+ bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Set the attributes on a file, using YFS.StoreData64 if there's a change in
+ * file size, and YFS.StoreStatus otherwise.
+ */
+int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ __be32 *bp;
+
+ if (attr->ia_valid & ATTR_SIZE)
+ return yfs_fs_setattr_size(fc, attr);
+
+ _enter(",%x,{%llx:%llu},,",
+ key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(struct yfs_xdr_YFSStoreStatus),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+ call->expected_version = vnode->status.data_version;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSSTORESTATUS);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFS_StoreStatus(bp, attr);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.GetVolumeStatus operation.
+ */
+static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
+{
+ const __be32 *bp;
+ char *p;
+ u32 size;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ switch (call->unmarshall) {
+ case 0:
+ call->unmarshall++;
+ afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus));
+
+ /* extract the returned status record */
+ case 1:
+ _debug("extract status");
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ xdr_decode_YFSFetchVolumeStatus(&bp, call->reply[1]);
+ call->unmarshall++;
+ afs_extract_to_tmp(call);
+
+ /* extract the volume name length */
+ case 2:
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ call->count = ntohl(call->tmp);
+ _debug("volname length: %u", call->count);
+ if (call->count >= AFSNAMEMAX)
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_volname_len);
+ size = (call->count + 3) & ~3; /* It's padded */
+ afs_extract_begin(call, call->reply[2], size);
+ call->unmarshall++;
+
+ /* extract the volume name */
+ case 3:
+ _debug("extract volname");
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ p = call->reply[2];
+ p[call->count] = 0;
+ _debug("volname '%s'", p);
+ afs_extract_to_tmp(call);
+ call->unmarshall++;
+
+ /* extract the offline message length */
+ case 4:
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ call->count = ntohl(call->tmp);
+ _debug("offline msg length: %u", call->count);
+ if (call->count >= AFSNAMEMAX)
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_offline_msg_len);
+ size = (call->count + 3) & ~3; /* It's padded */
+ afs_extract_begin(call, call->reply[2], size);
+ call->unmarshall++;
+
+ /* extract the offline message */
+ case 5:
+ _debug("extract offline");
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ p = call->reply[2];
+ p[call->count] = 0;
+ _debug("offline '%s'", p);
+
+ afs_extract_to_tmp(call);
+ call->unmarshall++;
+
+ /* extract the message of the day length */
+ case 6:
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ call->count = ntohl(call->tmp);
+ _debug("motd length: %u", call->count);
+ if (call->count >= AFSNAMEMAX)
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_motd_len);
+ size = (call->count + 3) & ~3; /* It's padded */
+ afs_extract_begin(call, call->reply[2], size);
+ call->unmarshall++;
+
+ /* extract the message of the day */
+ case 7:
+ _debug("extract motd");
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
+
+ p = call->reply[2];
+ p[call->count] = 0;
+ _debug("motd '%s'", p);
+
+ call->unmarshall++;
+
+ case 8:
+ break;
+ }
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * Destroy a YFS.GetVolumeStatus call.
+ */
+static void yfs_get_volume_status_call_destructor(struct afs_call *call)
+{
+ kfree(call->reply[2]);
+ call->reply[2] = NULL;
+ afs_flat_call_destructor(call);
+}
+
+/*
+ * YFS.GetVolumeStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSGetVolumeStatus = {
+ .name = "YFS.GetVolumeStatus",
+ .op = yfs_FS_GetVolumeStatus,
+ .deliver = yfs_deliver_fs_get_volume_status,
+ .destructor = yfs_get_volume_status_call_destructor,
+};
+
+/*
+ * fetch the status of a volume
+ */
+int yfs_fs_get_volume_status(struct afs_fs_cursor *fc,
+ struct afs_volume_status *vs)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ __be32 *bp;
+ void *tmpbuf;
+
+ _enter("");
+
+ tmpbuf = kmalloc(AFSOPAQUEMAX, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSGetVolumeStatus,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_u64),
+ sizeof(struct yfs_xdr_YFSFetchVolumeStatus) +
+ sizeof(__be32));
+ if (!call) {
+ kfree(tmpbuf);
+ return -ENOMEM;
+ }
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+ call->reply[1] = vs;
+ call->reply[2] = tmpbuf;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSGETVOLUMESTATUS);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_u64(bp, vnode->fid.vid);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to an YFS.SetLock, YFS.ExtendLock or YFS.ReleaseLock
+ */
+static int yfs_deliver_fs_xxxx_lock(struct afs_call *call)
+{
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSVolSync(&bp, NULL);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * YFS.SetLock operation type
+ */
+static const struct afs_call_type yfs_RXYFSSetLock = {
+ .name = "YFS.SetLock",
+ .op = yfs_FS_SetLock,
+ .deliver = yfs_deliver_fs_xxxx_lock,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * YFS.ExtendLock operation type
+ */
+static const struct afs_call_type yfs_RXYFSExtendLock = {
+ .name = "YFS.ExtendLock",
+ .op = yfs_FS_ExtendLock,
+ .deliver = yfs_deliver_fs_xxxx_lock,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * YFS.ReleaseLock operation type
+ */
+static const struct afs_call_type yfs_RXYFSReleaseLock = {
+ .name = "YFS.ReleaseLock",
+ .op = yfs_FS_ReleaseLock,
+ .deliver = yfs_deliver_fs_xxxx_lock,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Set a lock on a file
+ */
+int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSSetLock,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(__be32),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSSETLOCK);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_u32(bp, type);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * extend a lock on a file
+ */
+int yfs_fs_extend_lock(struct afs_fs_cursor *fc)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSExtendLock,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_YFSFid),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSEXTENDLOCK);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * release a lock on a file
+ */
+int yfs_fs_release_lock(struct afs_fs_cursor *fc)
+{
+ struct afs_vnode *vnode = fc->vnode;
+ struct afs_call *call;
+ struct afs_net *net = afs_v2net(vnode);
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSReleaseLock,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_YFSFid),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return -ENOMEM;
+
+ call->key = fc->key;
+ call->reply[0] = vnode;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSRELEASELOCK);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ yfs_check_req(call, bp);
+
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &vnode->fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to an FS.FetchStatus with no vnode.
+ */
+static int yfs_deliver_fs_fetch_status(struct afs_call *call)
+{
+ struct afs_file_status *status = call->reply[1];
+ struct afs_callback *callback = call->reply[2];
+ struct afs_volsync *volsync = call->reply[3];
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ int ret;
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ ret = yfs_decode_status(call, &bp, status, vnode,
+ &call->expected_version, NULL);
+ if (ret < 0)
+ return ret;
+ xdr_decode_YFSCallBack_raw(&bp, callback);
+ xdr_decode_YFSVolSync(&bp, volsync);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * YFS.FetchStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSFetchStatus = {
+ .name = "YFS.FetchStatus",
+ .op = yfs_FS_FetchStatus,
+ .deliver = yfs_deliver_fs_fetch_status,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Fetch the status information for a fid without needing a vnode handle.
+ */
+int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
+ struct afs_net *net,
+ struct afs_fid *fid,
+ struct afs_file_status *status,
+ struct afs_callback *callback,
+ struct afs_volsync *volsync)
+{
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter(",%x,{%llx:%llu},,",
+ key_serial(fc->key), fid->vid, fid->vnode);
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_YFSFid),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSCallBack) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call) {
+ fc->ac.error = -ENOMEM;
+ return -ENOMEM;
+ }
+
+ call->key = fc->key;
+ call->reply[0] = NULL; /* vnode for fid[0] */
+ call->reply[1] = status;
+ call->reply[2] = callback;
+ call->reply[3] = volsync;
+ call->expected_version = 1; /* vnode->status.data_version */
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSFETCHSTATUS);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, fid);
+ yfs_check_req(call, bp);
+
+ call->cb_break = fc->cb_break;
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to an YFS.InlineBulkStatus call
+ */
+static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
+{
+ struct afs_file_status *statuses;
+ struct afs_callback *callbacks;
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ u32 tmp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ switch (call->unmarshall) {
+ case 0:
+ afs_extract_to_tmp(call);
+ call->unmarshall++;
+
+ /* Extract the file status count and array in two steps */
+ case 1:
+ _debug("extract status count");
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ tmp = ntohl(call->tmp);
+ _debug("status count: %u/%u", tmp, call->count2);
+ if (tmp != call->count2)
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_ibulkst_count);
+
+ call->count = 0;
+ call->unmarshall++;
+ more_counts:
+ afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus));
+
+ case 2:
+ _debug("extract status array %u", call->count);
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ statuses = call->reply[1];
+ ret = yfs_decode_status(call, &bp, &statuses[call->count],
+ call->count == 0 ? vnode : NULL,
+ NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ call->count++;
+ if (call->count < call->count2)
+ goto more_counts;
+
+ call->count = 0;
+ call->unmarshall++;
+ afs_extract_to_tmp(call);
+
+ /* Extract the callback count and array in two steps */
+ case 3:
+ _debug("extract CB count");
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ tmp = ntohl(call->tmp);
+ _debug("CB count: %u", tmp);
+ if (tmp != call->count2)
+ return afs_protocol_error(call, -EBADMSG,
+ afs_eproto_ibulkst_cb_count);
+ call->count = 0;
+ call->unmarshall++;
+ more_cbs:
+ afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack));
+
+ case 4:
+ _debug("extract CB array");
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ _debug("unmarshall CB array");
+ bp = call->buffer;
+ callbacks = call->reply[2];
+ xdr_decode_YFSCallBack_raw(&bp, &callbacks[call->count]);
+ statuses = call->reply[1];
+ if (call->count == 0 && vnode && statuses[0].abort_code == 0) {
+ bp = call->buffer;
+ xdr_decode_YFSCallBack(call, vnode, &bp);
+ }
+ call->count++;
+ if (call->count < call->count2)
+ goto more_cbs;
+
+ afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync));
+ call->unmarshall++;
+
+ case 5:
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ xdr_decode_YFSVolSync(&bp, call->reply[3]);
+
+ call->unmarshall++;
+
+ case 6:
+ break;
+ }
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * FS.InlineBulkStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSInlineBulkStatus = {
+ .name = "YFS.InlineBulkStatus",
+ .op = yfs_FS_InlineBulkStatus,
+ .deliver = yfs_deliver_fs_inline_bulk_status,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Fetch the status information for up to 1024 files
+ */
+int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
+ struct afs_net *net,
+ struct afs_fid *fids,
+ struct afs_file_status *statuses,
+ struct afs_callback *callbacks,
+ unsigned int nr_fids,
+ struct afs_volsync *volsync)
+{
+ struct afs_call *call;
+ __be32 *bp;
+ int i;
+
+ _enter(",%x,{%llx:%llu},%u",
+ key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids);
+
+ call = afs_alloc_flat_call(net, &yfs_RXYFSInlineBulkStatus,
+ sizeof(__be32) +
+ sizeof(__be32) +
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_YFSFid) * nr_fids,
+ sizeof(struct yfs_xdr_YFSFetchStatus));
+ if (!call) {
+ fc->ac.error = -ENOMEM;
+ return -ENOMEM;
+ }
+
+ call->key = fc->key;
+ call->reply[0] = NULL; /* vnode for fid[0] */
+ call->reply[1] = statuses;
+ call->reply[2] = callbacks;
+ call->reply[3] = volsync;
+ call->count2 = nr_fids;
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSINLINEBULKSTATUS);
+ bp = xdr_encode_u32(bp, 0); /* RPCFlags */
+ bp = xdr_encode_u32(bp, nr_fids);
+ for (i = 0; i < nr_fids; i++)
+ bp = xdr_encode_YFSFid(bp, &fids[i]);
+ yfs_check_req(call, bp);
+
+ call->cb_break = fc->cb_break;
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &fids[0]);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 9a69392f1fb3..d81c148682e7 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
s->s_magic = BFS_MAGIC;
- if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
+ if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) ||
+ le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) {
printf("Superblock is corrupted\n");
goto out1;
}
@@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
sizeof(struct bfs_inode)
+ BFS_ROOT_INO - 1;
imap_len = (info->si_lasti / 8) + 1;
- info->si_imap = kzalloc(imap_len, GFP_KERNEL);
- if (!info->si_imap)
+ info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN);
+ if (!info->si_imap) {
+ printf("Cannot allocate %u bytes\n", imap_len);
goto out1;
+ }
for (i = 0; i < BFS_ROOT_INO; i++)
set_bit(i, info->si_imap);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 38b8ce05cbc7..a80b4f0ee7c4 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -349,7 +349,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
dio->size = 0;
dio->multi_bio = false;
- dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
+ dio->should_dirty = is_read && iter_is_iovec(iter);
blk_start_plug(&plug);
for (;;) {
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 2ee43b6a4f09..539901fb5165 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1014,9 +1014,26 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
parent_start = parent->start;
+ /*
+ * If we are COWing a node/leaf from the extent, chunk or device trees,
+ * make sure that we do not finish block group creation of pending block
+ * groups. We do this to avoid a deadlock.
+ * COWing can result in allocation of a new chunk, and flushing pending
+ * block groups (btrfs_create_pending_block_groups()) can be triggered
+ * when finishing allocation of a new chunk. Creation of a pending block
+ * group modifies the extent, chunk and device trees, therefore we could
+ * deadlock with ourselves since we are holding a lock on an extent
+ * buffer that btrfs_create_pending_block_groups() may try to COW later.
+ */
+ if (root == fs_info->extent_root ||
+ root == fs_info->chunk_root ||
+ root == fs_info->dev_root)
+ trans->can_flush_pending_bgs = false;
+
cow = btrfs_alloc_tree_block(trans, root, parent_start,
root->root_key.objectid, &disk_key, level,
search_start, empty_size);
+ trans->can_flush_pending_bgs = true;
if (IS_ERR(cow))
return PTR_ERR(cow);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 68ca41dbbef3..80953528572d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3201,9 +3201,6 @@ void btrfs_get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space);
void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_balance_args *bargs);
-int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
- struct file *dst_file, loff_t dst_loff,
- u64 olen);
/* file.c */
int __init btrfs_auto_defrag_init(void);
@@ -3233,8 +3230,9 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
struct extent_state **cached);
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
-int btrfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len);
+loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
/* tree-defrag.c */
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 5149165b49a4..9301b3ad9217 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -164,14 +164,27 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
return NULL;
}
+static struct btrfs_delayed_ref_head *find_first_ref_head(
+ struct btrfs_delayed_ref_root *dr)
+{
+ struct rb_node *n;
+ struct btrfs_delayed_ref_head *entry;
+
+ n = rb_first_cached(&dr->href_root);
+ if (!n)
+ return NULL;
+
+ entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
+
+ return entry;
+}
+
/*
- * find an head entry based on bytenr. This returns the delayed ref
- * head if it was able to find one, or NULL if nothing was in that spot.
- * If return_bigger is given, the next bigger entry is returned if no exact
- * match is found. But if no bigger one is found then the first node of the
- * ref head tree will be returned.
+ * Find a head entry based on bytenr. This returns the delayed ref head if it
+ * was able to find one, or NULL if nothing was in that spot. If return_bigger
+ * is given, the next bigger entry is returned if no exact match is found.
*/
-static struct btrfs_delayed_ref_head* find_ref_head(
+static struct btrfs_delayed_ref_head *find_ref_head(
struct btrfs_delayed_ref_root *dr, u64 bytenr,
bool return_bigger)
{
@@ -195,10 +208,9 @@ static struct btrfs_delayed_ref_head* find_ref_head(
if (bytenr > entry->bytenr) {
n = rb_next(&entry->href_node);
if (!n)
- n = rb_first_cached(&dr->href_root);
+ return NULL;
entry = rb_entry(n, struct btrfs_delayed_ref_head,
href_node);
- return entry;
}
return entry;
}
@@ -355,33 +367,25 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
struct btrfs_delayed_ref_root *delayed_refs)
{
struct btrfs_delayed_ref_head *head;
- u64 start;
- bool loop = false;
again:
- start = delayed_refs->run_delayed_start;
- head = find_ref_head(delayed_refs, start, true);
- if (!head && !loop) {
+ head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
+ true);
+ if (!head && delayed_refs->run_delayed_start != 0) {
delayed_refs->run_delayed_start = 0;
- start = 0;
- loop = true;
- head = find_ref_head(delayed_refs, start, true);
- if (!head)
- return NULL;
- } else if (!head && loop) {
- return NULL;
+ head = find_first_ref_head(delayed_refs);
}
+ if (!head)
+ return NULL;
while (head->processing) {
struct rb_node *node;
node = rb_next(&head->href_node);
if (!node) {
- if (loop)
+ if (delayed_refs->run_delayed_start == 0)
return NULL;
delayed_refs->run_delayed_start = 0;
- start = 0;
- loop = true;
goto again;
}
head = rb_entry(node, struct btrfs_delayed_ref_head,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a4cd0221bc8d..a1febf155747 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2366,6 +2366,9 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
insert_reserved);
else
BUG();
+ if (ret && insert_reserved)
+ btrfs_pin_extent(trans->fs_info, node->bytenr,
+ node->num_bytes, 1);
return ret;
}
@@ -2954,7 +2957,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head;
int ret;
int run_all = count == (unsigned long)-1;
- bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
/* We'll clean this up in btrfs_cleanup_transaction */
if (trans->aborted)
@@ -2971,7 +2973,6 @@ again:
#ifdef SCRAMBLE_DELAYED_REFS
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
- trans->can_flush_pending_bgs = false;
ret = __btrfs_run_delayed_refs(trans, count);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
@@ -3002,7 +3003,6 @@ again:
goto again;
}
out:
- trans->can_flush_pending_bgs = can_flush_pending_bgs;
return 0;
}
@@ -4568,6 +4568,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
goto out;
} else {
ret = 1;
+ space_info->max_extent_size = 0;
}
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
@@ -4589,11 +4590,9 @@ out:
* the block groups that were made dirty during the lifetime of the
* transaction.
*/
- if (trans->can_flush_pending_bgs &&
- trans->chunk_bytes_reserved >= (u64)SZ_2M) {
+ if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
btrfs_create_pending_block_groups(trans);
- btrfs_trans_release_chunk_metadata(trans);
- }
+
return ret;
}
@@ -6464,6 +6463,7 @@ static void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
space_info->bytes_readonly += num_bytes;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
+ space_info->max_extent_size = 0;
if (delalloc)
cache->delalloc_bytes -= num_bytes;
@@ -7260,6 +7260,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group = NULL;
u64 search_start = 0;
u64 max_extent_size = 0;
+ u64 max_free_space = 0;
u64 empty_cluster = 0;
struct btrfs_space_info *space_info;
int loop = 0;
@@ -7555,8 +7556,8 @@ unclustered_alloc:
spin_lock(&ctl->tree_lock);
if (ctl->free_space <
num_bytes + empty_cluster + empty_size) {
- if (ctl->free_space > max_extent_size)
- max_extent_size = ctl->free_space;
+ max_free_space = max(max_free_space,
+ ctl->free_space);
spin_unlock(&ctl->tree_lock);
goto loop;
}
@@ -7723,6 +7724,8 @@ loop:
}
out:
if (ret == -ENOSPC) {
+ if (!max_extent_size)
+ max_extent_size = max_free_space;
spin_lock(&space_info->lock);
space_info->max_extent_size = max_extent_size;
spin_unlock(&space_info->lock);
@@ -8004,21 +8007,14 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
}
path = btrfs_alloc_path();
- if (!path) {
- btrfs_free_and_pin_reserved_extent(fs_info,
- extent_key.objectid,
- fs_info->nodesize);
+ if (!path)
return -ENOMEM;
- }
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
&extent_key, size);
if (ret) {
btrfs_free_path(path);
- btrfs_free_and_pin_reserved_extent(fs_info,
- extent_key.objectid,
- fs_info->nodesize);
return ret;
}
@@ -10132,9 +10128,10 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_block_group_item item;
struct btrfs_key key;
int ret = 0;
- bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
- trans->can_flush_pending_bgs = false;
+ if (!trans->can_flush_pending_bgs)
+ return;
+
while (!list_empty(&trans->new_bgs)) {
block_group = list_first_entry(&trans->new_bgs,
struct btrfs_block_group_cache,
@@ -10159,7 +10156,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
next:
list_del_init(&block_group->bg_list);
}
- trans->can_flush_pending_bgs = can_flush_pending_bgs;
+ btrfs_trans_release_chunk_metadata(trans);
}
int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 15b925142793..a3c22e16509b 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2078,6 +2078,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
inode_lock(inode);
+
+ /*
+ * We take the dio_sem here because the tree log stuff can race with
+ * lockless dio writes and get an extent map logged for an extent we
+ * never waited on. We need it this high up for lockdep reasons.
+ */
+ down_write(&BTRFS_I(inode)->dio_sem);
+
atomic_inc(&root->log_batch);
/*
@@ -2086,6 +2094,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = btrfs_wait_ordered_range(inode, start, len);
if (ret) {
+ up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
goto out;
}
@@ -2109,6 +2118,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* checked called fsync.
*/
ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
+ up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
goto out;
}
@@ -2127,6 +2137,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
goto out;
}
@@ -2148,6 +2159,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
+ up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
/*
@@ -3286,8 +3298,7 @@ const struct file_operations btrfs_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_compat_ioctl,
#endif
- .clone_file_range = btrfs_clone_file_range,
- .dedupe_file_range = btrfs_dedupe_file_range,
+ .remap_file_range = btrfs_remap_file_range,
};
void __cold btrfs_auto_defrag_exit(void)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 67441219d6c9..4ba0aedc878b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1772,6 +1772,13 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
return -1;
}
+static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
+{
+ if (entry->bitmap)
+ return entry->max_extent_size;
+ return entry->bytes;
+}
+
/* Cache the size of the max extent in bytes */
static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
@@ -1793,8 +1800,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
for (node = &entry->offset_index; node; node = rb_next(node)) {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
if (entry->bytes < *bytes) {
- if (entry->bytes > *max_extent_size)
- *max_extent_size = entry->bytes;
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
continue;
}
@@ -1812,8 +1819,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
}
if (entry->bytes < *bytes + align_off) {
- if (entry->bytes > *max_extent_size)
- *max_extent_size = entry->bytes;
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
continue;
}
@@ -1825,8 +1832,10 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
*offset = tmp;
*bytes = size;
return entry;
- } else if (size > *max_extent_size) {
- *max_extent_size = size;
+ } else {
+ *max_extent_size =
+ max(get_max_extent_size(entry),
+ *max_extent_size);
}
continue;
}
@@ -2449,6 +2458,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
struct rb_node *n;
int count = 0;
+ spin_lock(&ctl->tree_lock);
for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes && !block_group->ro)
@@ -2457,6 +2467,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
info->offset, info->bytes,
(info->bitmap) ? "yes" : "no");
}
+ spin_unlock(&ctl->tree_lock);
btrfs_info(fs_info, "block group has cluster?: %s",
list_empty(&block_group->cluster_list) ? "no" : "yes");
btrfs_info(fs_info,
@@ -2685,8 +2696,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
if (err) {
- if (search_bytes > *max_extent_size)
- *max_extent_size = search_bytes;
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
return 0;
}
@@ -2723,8 +2734,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
entry = rb_entry(node, struct btrfs_free_space, offset_index);
while (1) {
- if (entry->bytes < bytes && entry->bytes > *max_extent_size)
- *max_extent_size = entry->bytes;
+ if (entry->bytes < bytes)
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
if (entry->bytes < bytes ||
(!entry->bitmap && entry->offset < min_start)) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 181c58b23110..d3df5b52278c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -502,6 +502,7 @@ again:
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {
/* just bail out to the uncompressed code */
+ nr_pages = 0;
goto cont;
}
@@ -2940,6 +2941,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
bool truncated = false;
bool range_locked = false;
bool clear_new_delalloc_bytes = false;
+ bool clear_reserved_extent = true;
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
@@ -3043,10 +3045,12 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
logical_len, logical_len,
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
- if (!ret)
+ if (!ret) {
+ clear_reserved_extent = false;
btrfs_release_delalloc_bytes(fs_info,
ordered_extent->start,
ordered_extent->disk_len);
+ }
}
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset, ordered_extent->len,
@@ -3107,8 +3111,13 @@ out:
* wrong we need to return the space for this ordered extent
* back to the allocator. We only free the extent in the
* truncated case if we didn't write out the extent at all.
+ *
+ * If we made it past insert_reserved_file_extent before we
+ * errored out then we don't need to do this as the accounting
+ * has already been done.
*/
if ((ret || !logical_len) &&
+ clear_reserved_extent &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
btrfs_free_reserved_extent(fs_info,
@@ -5259,11 +5268,13 @@ static void evict_inode_truncate_pages(struct inode *inode)
struct extent_state *cached_state = NULL;
u64 start;
u64 end;
+ unsigned state_flags;
node = rb_first(&io_tree->state);
state = rb_entry(node, struct extent_state, rb_node);
start = state->start;
end = state->end;
+ state_flags = state->state;
spin_unlock(&io_tree->lock);
lock_extent_bits(io_tree, start, end, &cached_state);
@@ -5276,7 +5287,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
*
* Note, end is the bytenr of last byte, so we need + 1 here.
*/
- if (state->state & EXTENT_DELALLOC)
+ if (state_flags & EXTENT_DELALLOC)
btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
clear_extent_bit(io_tree, start, end,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a990a9045139..3ca6943827ef 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3629,26 +3629,6 @@ out_unlock:
return ret;
}
-int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
- struct file *dst_file, loff_t dst_loff,
- u64 olen)
-{
- struct inode *src = file_inode(src_file);
- struct inode *dst = file_inode(dst_file);
- u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
-
- if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
- /*
- * Btrfs does not support blocksize < page_size. As a
- * result, btrfs_cmp_data() won't correctly handle
- * this situation without an update.
- */
- return -EINVAL;
- }
-
- return btrfs_extent_same(src, src_loff, olen, dst, dst_loff);
-}
-
static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
struct inode *inode,
u64 endoff,
@@ -4350,10 +4330,34 @@ out_unlock:
return ret;
}
-int btrfs_clone_file_range(struct file *src_file, loff_t off,
- struct file *dst_file, loff_t destoff, u64 len)
+loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff, loff_t len,
+ unsigned int remap_flags)
{
- return btrfs_clone_files(dst_file, src_file, off, len, destoff);
+ int ret;
+
+ if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+ return -EINVAL;
+
+ if (remap_flags & REMAP_FILE_DEDUP) {
+ struct inode *src = file_inode(src_file);
+ struct inode *dst = file_inode(dst_file);
+ u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
+
+ if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
+ /*
+ * Btrfs does not support blocksize < page_size. As a
+ * result, btrfs_cmp_data() won't correctly handle
+ * this situation without an update.
+ */
+ return -EINVAL;
+ }
+
+ ret = btrfs_extent_same(src, off, len, dst, destoff);
+ } else {
+ ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
+ }
+ return ret < 0 ? ret : len;
}
static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5686290a50e1..d1eeef9ec5da 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2283,15 +2283,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
kmem_cache_free(btrfs_trans_handle_cachep, trans);
- /*
- * If fs has been frozen, we can not handle delayed iputs, otherwise
- * it'll result in deadlock about SB_FREEZE_FS.
- */
- if (current != fs_info->transaction_kthread &&
- current != fs_info->cleaner_kthread &&
- !test_bit(BTRFS_FS_FROZEN, &fs_info->flags))
- btrfs_run_delayed_iputs(fs_info);
-
return ret;
scrub_continue:
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 0dba09334a16..e07f3376b7df 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4390,7 +4390,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
INIT_LIST_HEAD(&extents);
- down_write(&inode->dio_sem);
write_lock(&tree->lock);
test_gen = root->fs_info->last_trans_committed;
logged_start = start;
@@ -4456,7 +4455,6 @@ process:
}
WARN_ON(!list_empty(&extents));
write_unlock(&tree->lock);
- up_write(&inode->dio_sem);
btrfs_release_path(path);
if (!ret)
@@ -4652,7 +4650,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
ASSERT(len == i_size ||
(len == fs_info->sectorsize &&
btrfs_file_extent_compression(leaf, extent) !=
- BTRFS_COMPRESS_NONE));
+ BTRFS_COMPRESS_NONE) ||
+ (len < i_size && i_size < fs_info->sectorsize));
return 0;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index d60d61e8ed7d..1286c2b95498 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3060,6 +3060,11 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
*/
bio = bio_alloc(GFP_NOIO, 1);
+ if (wbc) {
+ wbc_init_bio(wbc, bio);
+ wbc_account_io(wbc, bh->b_page, bh->b_size);
+ }
+
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint;
@@ -3079,11 +3084,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
op_flags |= REQ_PRIO;
bio_set_op_attrs(bio, op, op_flags);
- if (wbc) {
- wbc_init_bio(wbc, bio);
- wbc_account_io(wbc, bh->b_page, bh->b_size);
- }
-
submit_bio(bio);
return 0;
}
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 027408d55aee..5f0103f40079 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -104,6 +104,11 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
struct timespec64 old_ctime = inode->i_ctime;
umode_t new_mode = inode->i_mode, old_mode = inode->i_mode;
+ if (ceph_snap(inode) != CEPH_NOSNAP) {
+ ret = -EROFS;
+ goto out;
+ }
+
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
@@ -138,11 +143,6 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
goto out_free;
}
- if (ceph_snap(inode) != CEPH_NOSNAP) {
- ret = -EROFS;
- goto out_free;
- }
-
if (new_mode != old_mode) {
newattrs.ia_ctime = current_time(inode);
newattrs.ia_mode = new_mode;
@@ -206,10 +206,9 @@ int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
tmp_buf = kmalloc(max(val_size1, val_size2), GFP_KERNEL);
if (!tmp_buf)
goto out_err;
- pagelist = kmalloc(sizeof(struct ceph_pagelist), GFP_KERNEL);
+ pagelist = ceph_pagelist_alloc(GFP_KERNEL);
if (!pagelist)
goto out_err;
- ceph_pagelist_init(pagelist);
err = ceph_pagelist_reserve(pagelist, PAGE_SIZE);
if (err)
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 9c332a6f6667..8eade7a993c1 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -322,7 +322,7 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
/* caller of readpages does not hold buffer and read caps
* (fadvise, madvise and readahead cases) */
int want = CEPH_CAP_FILE_CACHE;
- ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
+ ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, true, &got);
if (ret < 0) {
dout("start_read %p, error getting cap\n", inode);
} else if (!(got & want)) {
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dd7dfdd2ba13..f3496db4bb3e 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -519,9 +519,9 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
* -> we take mdsc->cap_delay_lock
*/
static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
- struct ceph_inode_info *ci)
+ struct ceph_inode_info *ci,
+ bool set_timeout)
{
- __cap_set_timeouts(mdsc, ci);
dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
ci->i_ceph_flags, ci->i_hold_caps_max);
if (!mdsc->stopping) {
@@ -531,6 +531,8 @@ static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
goto no_change;
list_del_init(&ci->i_cap_delay_list);
}
+ if (set_timeout)
+ __cap_set_timeouts(mdsc, ci);
list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
no_change:
spin_unlock(&mdsc->cap_delay_lock);
@@ -720,7 +722,7 @@ void ceph_add_cap(struct inode *inode,
dout(" issued %s, mds wanted %s, actual %s, queueing\n",
ceph_cap_string(issued), ceph_cap_string(wanted),
ceph_cap_string(actual_wanted));
- __cap_delay_requeue(mdsc, ci);
+ __cap_delay_requeue(mdsc, ci, true);
}
if (flags & CEPH_CAP_FLAG_AUTH) {
@@ -1647,7 +1649,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
(mask & CEPH_CAP_FILE_BUFFER))
dirty |= I_DIRTY_DATASYNC;
- __cap_delay_requeue(mdsc, ci);
+ __cap_delay_requeue(mdsc, ci, true);
return dirty;
}
@@ -2065,7 +2067,7 @@ ack:
/* Reschedule delayed caps release if we delayed anything */
if (delayed)
- __cap_delay_requeue(mdsc, ci);
+ __cap_delay_requeue(mdsc, ci, false);
spin_unlock(&ci->i_ceph_lock);
@@ -2125,7 +2127,7 @@ retry:
if (delayed) {
spin_lock(&ci->i_ceph_lock);
- __cap_delay_requeue(mdsc, ci);
+ __cap_delay_requeue(mdsc, ci, true);
spin_unlock(&ci->i_ceph_lock);
}
} else {
@@ -2671,17 +2673,18 @@ static void check_max_size(struct inode *inode, loff_t endoff)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
}
-int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
+int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want,
+ bool nonblock, int *got)
{
int ret, err = 0;
BUG_ON(need & ~CEPH_CAP_FILE_RD);
- BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
+ BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO|CEPH_CAP_FILE_SHARED));
ret = ceph_pool_perm_check(ci, need);
if (ret < 0)
return ret;
- ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
+ ret = try_get_cap_refs(ci, need, want, 0, nonblock, got, &err);
if (ret) {
if (err == -EAGAIN) {
ret = 0;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 92ab20433682..27cad84dab23 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
+#include <linux/ceph/striper.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -557,90 +558,26 @@ enum {
};
/*
- * Read a range of bytes striped over one or more objects. Iterate over
- * objects we stripe over. (That's not atomic, but good enough for now.)
- *
- * If we get a short result from the OSD, check against i_size; we need to
- * only return a short read to the caller if we hit EOF.
- */
-static int striped_read(struct inode *inode,
- u64 pos, u64 len,
- struct page **pages, int num_pages,
- int page_align, int *checkeof)
-{
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_inode_info *ci = ceph_inode(inode);
- u64 this_len;
- loff_t i_size;
- int page_idx;
- int ret, read = 0;
- bool hit_stripe, was_short;
-
- /*
- * we may need to do multiple reads. not atomic, unfortunately.
- */
-more:
- this_len = len;
- page_idx = (page_align + read) >> PAGE_SHIFT;
- ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
- &ci->i_layout, pos, &this_len,
- ci->i_truncate_seq, ci->i_truncate_size,
- pages + page_idx, num_pages - page_idx,
- ((page_align + read) & ~PAGE_MASK));
- if (ret == -ENOENT)
- ret = 0;
- hit_stripe = this_len < len;
- was_short = ret >= 0 && ret < this_len;
- dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
- ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
-
- i_size = i_size_read(inode);
- if (ret >= 0) {
- if (was_short && (pos + ret < i_size)) {
- int zlen = min(this_len - ret, i_size - pos - ret);
- int zoff = page_align + read + ret;
- dout(" zero gap %llu to %llu\n",
- pos + ret, pos + ret + zlen);
- ceph_zero_page_vector_range(zoff, zlen, pages);
- ret += zlen;
- }
-
- read += ret;
- pos += ret;
- len -= ret;
-
- /* hit stripe and need continue*/
- if (len && hit_stripe && pos < i_size)
- goto more;
- }
-
- if (read > 0) {
- ret = read;
- /* did we bounce off eof? */
- if (pos + len > i_size)
- *checkeof = CHECK_EOF;
- }
-
- dout("striped_read returns %d\n", ret);
- return ret;
-}
-
-/*
* Completely synchronous read and write methods. Direct from __user
* buffer to osd, or directly to user pages (if O_DIRECT).
*
- * If the read spans object boundary, just do multiple reads.
+ * If the read spans object boundary, just do multiple reads. (That's not
+ * atomic, but good enough for now.)
+ *
+ * If we get a short result from the OSD, check against i_size; we need to
+ * only return a short read to the caller if we hit EOF.
*/
static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
- int *checkeof)
+ int *retry_op)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- struct page **pages;
- u64 off = iocb->ki_pos;
- int num_pages;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
ssize_t ret;
- size_t len = iov_iter_count(to);
+ u64 off = iocb->ki_pos;
+ u64 len = iov_iter_count(to);
dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
@@ -653,61 +590,118 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
* but it will at least behave sensibly when they are
* in sequence.
*/
- ret = filemap_write_and_wait_range(inode->i_mapping, off,
- off + len);
+ ret = filemap_write_and_wait_range(inode->i_mapping, off, off + len);
if (ret < 0)
return ret;
- if (unlikely(to->type & ITER_PIPE)) {
+ ret = 0;
+ while ((len = iov_iter_count(to)) > 0) {
+ struct ceph_osd_request *req;
+ struct page **pages;
+ int num_pages;
size_t page_off;
- ret = iov_iter_get_pages_alloc(to, &pages, len,
- &page_off);
- if (ret <= 0)
- return -ENOMEM;
- num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
+ u64 i_size;
+ bool more;
+
+ req = ceph_osdc_new_request(osdc, &ci->i_layout,
+ ci->i_vino, off, &len, 0, 1,
+ CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
+ NULL, ci->i_truncate_seq,
+ ci->i_truncate_size, false);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ break;
+ }
+
+ more = len < iov_iter_count(to);
- ret = striped_read(inode, off, ret, pages, num_pages,
- page_off, checkeof);
- if (ret > 0) {
- iov_iter_advance(to, ret);
- off += ret;
+ if (unlikely(iov_iter_is_pipe(to))) {
+ ret = iov_iter_get_pages_alloc(to, &pages, len,
+ &page_off);
+ if (ret <= 0) {
+ ceph_osdc_put_request(req);
+ ret = -ENOMEM;
+ break;
+ }
+ num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
+ if (ret < len) {
+ len = ret;
+ osd_req_op_extent_update(req, 0, len);
+ more = false;
+ }
} else {
- iov_iter_advance(to, 0);
+ num_pages = calc_pages_for(off, len);
+ page_off = off & ~PAGE_MASK;
+ pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
+ if (IS_ERR(pages)) {
+ ceph_osdc_put_request(req);
+ ret = PTR_ERR(pages);
+ break;
+ }
}
- ceph_put_page_vector(pages, num_pages, false);
- } else {
- num_pages = calc_pages_for(off, len);
- pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- ret = striped_read(inode, off, len, pages, num_pages,
- (off & ~PAGE_MASK), checkeof);
- if (ret > 0) {
- int l, k = 0;
- size_t left = ret;
-
- while (left) {
- size_t page_off = off & ~PAGE_MASK;
- size_t copy = min_t(size_t, left,
- PAGE_SIZE - page_off);
- l = copy_page_to_iter(pages[k++], page_off,
- copy, to);
- off += l;
- left -= l;
- if (l < copy)
+
+ osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
+ false, false);
+ ret = ceph_osdc_start_request(osdc, req, false);
+ if (!ret)
+ ret = ceph_osdc_wait_request(osdc, req);
+ ceph_osdc_put_request(req);
+
+ i_size = i_size_read(inode);
+ dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
+ off, len, ret, i_size, (more ? " MORE" : ""));
+
+ if (ret == -ENOENT)
+ ret = 0;
+ if (ret >= 0 && ret < len && (off + ret < i_size)) {
+ int zlen = min(len - ret, i_size - off - ret);
+ int zoff = page_off + ret;
+ dout("sync_read zero gap %llu~%llu\n",
+ off + ret, off + ret + zlen);
+ ceph_zero_page_vector_range(zoff, zlen, pages);
+ ret += zlen;
+ }
+
+ if (unlikely(iov_iter_is_pipe(to))) {
+ if (ret > 0) {
+ iov_iter_advance(to, ret);
+ off += ret;
+ } else {
+ iov_iter_advance(to, 0);
+ }
+ ceph_put_page_vector(pages, num_pages, false);
+ } else {
+ int idx = 0;
+ size_t left = ret > 0 ? ret : 0;
+ while (left > 0) {
+ size_t len, copied;
+ page_off = off & ~PAGE_MASK;
+ len = min_t(size_t, left, PAGE_SIZE - page_off);
+ copied = copy_page_to_iter(pages[idx++],
+ page_off, len, to);
+ off += copied;
+ left -= copied;
+ if (copied < len) {
+ ret = -EFAULT;
break;
+ }
}
+ ceph_release_page_vector(pages, num_pages);
}
- ceph_release_page_vector(pages, num_pages);
+
+ if (ret <= 0 || off >= i_size || !more)
+ break;
}
if (off > iocb->ki_pos) {
+ if (ret >= 0 &&
+ iov_iter_count(to) > 0 && off >= i_size_read(inode))
+ *retry_op = CHECK_EOF;
ret = off - iocb->ki_pos;
iocb->ki_pos = off;
}
- dout("sync_read result %zd\n", ret);
+ dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
return ret;
}
@@ -821,7 +815,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
aio_req->total_len = rc + zlen;
}
- iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs,
+ iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
osd_data->num_bvecs,
osd_data->bvec_pos.iter.bi_size);
iov_iter_advance(&i, rc);
@@ -865,7 +859,7 @@ static void ceph_aio_retry_work(struct work_struct *work)
}
spin_unlock(&ci->i_ceph_lock);
- req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
+ req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
false, GFP_NOFS);
if (!req) {
ret = -ENOMEM;
@@ -877,6 +871,11 @@ static void ceph_aio_retry_work(struct work_struct *work)
ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
+ req->r_ops[0] = orig_req->r_ops[0];
+
+ req->r_mtime = aio_req->mtime;
+ req->r_data_offset = req->r_ops[0].extent.offset;
+
ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
if (ret) {
ceph_osdc_put_request(req);
@@ -884,11 +883,6 @@ static void ceph_aio_retry_work(struct work_struct *work)
goto out;
}
- req->r_ops[0] = orig_req->r_ops[0];
-
- req->r_mtime = aio_req->mtime;
- req->r_data_offset = req->r_ops[0].extent.offset;
-
ceph_osdc_put_request(orig_req);
req->r_callback = ceph_aio_complete_req;
@@ -1044,8 +1038,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
int zlen = min_t(size_t, len - ret,
size - pos - ret);
- iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages,
- len);
+ iov_iter_bvec(&i, READ, bvecs, num_pages, len);
iov_iter_advance(&i, ret);
iov_iter_zero(zlen, &i);
ret += zlen;
@@ -1735,7 +1728,6 @@ static long ceph_fallocate(struct file *file, int mode,
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_cap_flush *prealloc_cf;
int want, got = 0;
int dirty;
@@ -1743,10 +1735,7 @@ static long ceph_fallocate(struct file *file, int mode,
loff_t endoff = 0;
loff_t size;
- if ((offset + length) > max(i_size_read(inode), fsc->max_file_size))
- return -EFBIG;
-
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
if (!S_ISREG(inode->i_mode))
@@ -1763,18 +1752,6 @@ static long ceph_fallocate(struct file *file, int mode,
goto unlock;
}
- if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) &&
- ceph_quota_is_max_bytes_exceeded(inode, offset + length)) {
- ret = -EDQUOT;
- goto unlock;
- }
-
- if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL) &&
- !(mode & FALLOC_FL_PUNCH_HOLE)) {
- ret = -ENOSPC;
- goto unlock;
- }
-
if (ci->i_inline_version != CEPH_INLINE_NONE) {
ret = ceph_uninline_data(file, NULL);
if (ret < 0)
@@ -1782,12 +1759,12 @@ static long ceph_fallocate(struct file *file, int mode,
}
size = i_size_read(inode);
- if (!(mode & FALLOC_FL_KEEP_SIZE)) {
- endoff = offset + length;
- ret = inode_newsize_ok(inode, endoff);
- if (ret)
- goto unlock;
- }
+
+ /* Are we punching a hole beyond EOF? */
+ if (offset >= size)
+ goto unlock;
+ if ((offset + length) > size)
+ length = size - offset;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
@@ -1798,16 +1775,8 @@ static long ceph_fallocate(struct file *file, int mode,
if (ret < 0)
goto unlock;
- if (mode & FALLOC_FL_PUNCH_HOLE) {
- if (offset < size)
- ceph_zero_pagecache_range(inode, offset, length);
- ret = ceph_zero_objects(inode, offset, length);
- } else if (endoff > size) {
- truncate_pagecache_range(inode, size, -1);
- if (ceph_inode_set_size(inode, endoff))
- ceph_check_caps(ceph_inode(inode),
- CHECK_CAPS_AUTHONLY, NULL);
- }
+ ceph_zero_pagecache_range(inode, offset, length);
+ ret = ceph_zero_objects(inode, offset, length);
if (!ret) {
spin_lock(&ci->i_ceph_lock);
@@ -1817,9 +1786,6 @@ static long ceph_fallocate(struct file *file, int mode,
spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
- if ((endoff > size) &&
- ceph_quota_is_max_bytes_approaching(inode, endoff))
- ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
}
ceph_put_cap_refs(ci, got);
@@ -1829,6 +1795,300 @@ unlock:
return ret;
}
+/*
+ * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
+ * src_ci. Two attempts are made to obtain both caps, and an error is return if
+ * this fails; zero is returned on success.
+ */
+static int get_rd_wr_caps(struct ceph_inode_info *src_ci,
+ loff_t src_endoff, int *src_got,
+ struct ceph_inode_info *dst_ci,
+ loff_t dst_endoff, int *dst_got)
+{
+ int ret = 0;
+ bool retrying = false;
+
+retry_caps:
+ ret = ceph_get_caps(dst_ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
+ dst_endoff, dst_got, NULL);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Since we're already holding the FILE_WR capability for the dst file,
+ * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
+ * retry dance instead to try to get both capabilities.
+ */
+ ret = ceph_try_get_caps(src_ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
+ false, src_got);
+ if (ret <= 0) {
+ /* Start by dropping dst_ci caps and getting src_ci caps */
+ ceph_put_cap_refs(dst_ci, *dst_got);
+ if (retrying) {
+ if (!ret)
+ /* ceph_try_get_caps masks EAGAIN */
+ ret = -EAGAIN;
+ return ret;
+ }
+ ret = ceph_get_caps(src_ci, CEPH_CAP_FILE_RD,
+ CEPH_CAP_FILE_SHARED, src_endoff,
+ src_got, NULL);
+ if (ret < 0)
+ return ret;
+ /*... drop src_ci caps too, and retry */
+ ceph_put_cap_refs(src_ci, *src_got);
+ retrying = true;
+ goto retry_caps;
+ }
+ return ret;
+}
+
+static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
+ struct ceph_inode_info *dst_ci, int dst_got)
+{
+ ceph_put_cap_refs(src_ci, src_got);
+ ceph_put_cap_refs(dst_ci, dst_got);
+}
+
+/*
+ * This function does several size-related checks, returning an error if:
+ * - source file is smaller than off+len
+ * - destination file size is not OK (inode_newsize_ok())
+ * - max bytes quotas is exceeded
+ */
+static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
+ loff_t src_off, loff_t dst_off, size_t len)
+{
+ loff_t size, endoff;
+
+ size = i_size_read(src_inode);
+ /*
+ * Don't copy beyond source file EOF. Instead of simply setting length
+ * to (size - src_off), just drop to VFS default implementation, as the
+ * local i_size may be stale due to other clients writing to the source
+ * inode.
+ */
+ if (src_off + len > size) {
+ dout("Copy beyond EOF (%llu + %zu > %llu)\n",
+ src_off, len, size);
+ return -EOPNOTSUPP;
+ }
+ size = i_size_read(dst_inode);
+
+ endoff = dst_off + len;
+ if (inode_newsize_ok(dst_inode, endoff))
+ return -EOPNOTSUPP;
+
+ if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
+ return -EDQUOT;
+
+ return 0;
+}
+
+static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off,
+ size_t len, unsigned int flags)
+{
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *dst_inode = file_inode(dst_file);
+ struct ceph_inode_info *src_ci = ceph_inode(src_inode);
+ struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
+ struct ceph_cap_flush *prealloc_cf;
+ struct ceph_object_locator src_oloc, dst_oloc;
+ struct ceph_object_id src_oid, dst_oid;
+ loff_t endoff = 0, size;
+ ssize_t ret = -EIO;
+ u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
+ u32 src_objlen, dst_objlen, object_size;
+ int src_got = 0, dst_got = 0, err, dirty;
+ bool do_final_copy = false;
+
+ if (src_inode == dst_inode)
+ return -EINVAL;
+ if (ceph_snap(dst_inode) != CEPH_NOSNAP)
+ return -EROFS;
+
+ /*
+ * Some of the checks below will return -EOPNOTSUPP, which will force a
+ * fallback to the default VFS copy_file_range implementation. This is
+ * desirable in several cases (for ex, the 'len' is smaller than the
+ * size of the objects, or in cases where that would be more
+ * efficient).
+ */
+
+ if (ceph_test_mount_opt(ceph_inode_to_client(src_inode), NOCOPYFROM))
+ return -EOPNOTSUPP;
+
+ if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
+ (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) ||
+ (src_ci->i_layout.object_size != dst_ci->i_layout.object_size))
+ return -EOPNOTSUPP;
+
+ if (len < src_ci->i_layout.object_size)
+ return -EOPNOTSUPP; /* no remote copy will be done */
+
+ prealloc_cf = ceph_alloc_cap_flush();
+ if (!prealloc_cf)
+ return -ENOMEM;
+
+ /* Start by sync'ing the source file */
+ ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
+ if (ret < 0)
+ goto out;
+
+ /*
+ * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
+ * clients may have dirty data in their caches. And OSDs know nothing
+ * about caps, so they can't safely do the remote object copies.
+ */
+ err = get_rd_wr_caps(src_ci, (src_off + len), &src_got,
+ dst_ci, (dst_off + len), &dst_got);
+ if (err < 0) {
+ dout("get_rd_wr_caps returned %d\n", err);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
+ if (ret < 0)
+ goto out_caps;
+
+ size = i_size_read(dst_inode);
+ endoff = dst_off + len;
+
+ /* Drop dst file cached pages */
+ ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
+ dst_off >> PAGE_SHIFT,
+ endoff >> PAGE_SHIFT);
+ if (ret < 0) {
+ dout("Failed to invalidate inode pages (%zd)\n", ret);
+ ret = 0; /* XXX */
+ }
+ src_oloc.pool = src_ci->i_layout.pool_id;
+ src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
+ dst_oloc.pool = dst_ci->i_layout.pool_id;
+ dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
+
+ ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
+ src_ci->i_layout.object_size,
+ &src_objnum, &src_objoff, &src_objlen);
+ ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
+ dst_ci->i_layout.object_size,
+ &dst_objnum, &dst_objoff, &dst_objlen);
+ /* object-level offsets need to the same */
+ if (src_objoff != dst_objoff) {
+ ret = -EOPNOTSUPP;
+ goto out_caps;
+ }
+
+ /*
+ * Do a manual copy if the object offset isn't object aligned.
+ * 'src_objlen' contains the bytes left until the end of the object,
+ * starting at the src_off
+ */
+ if (src_objoff) {
+ /*
+ * we need to temporarily drop all caps as we'll be calling
+ * {read,write}_iter, which will get caps again.
+ */
+ put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
+ ret = do_splice_direct(src_file, &src_off, dst_file,
+ &dst_off, src_objlen, flags);
+ if (ret < 0) {
+ dout("do_splice_direct returned %d\n", err);
+ goto out;
+ }
+ len -= ret;
+ err = get_rd_wr_caps(src_ci, (src_off + len),
+ &src_got, dst_ci,
+ (dst_off + len), &dst_got);
+ if (err < 0)
+ goto out;
+ err = is_file_size_ok(src_inode, dst_inode,
+ src_off, dst_off, len);
+ if (err < 0)
+ goto out_caps;
+ }
+ object_size = src_ci->i_layout.object_size;
+ while (len >= object_size) {
+ ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
+ object_size, &src_objnum,
+ &src_objoff, &src_objlen);
+ ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
+ object_size, &dst_objnum,
+ &dst_objoff, &dst_objlen);
+ ceph_oid_init(&src_oid);
+ ceph_oid_printf(&src_oid, "%llx.%08llx",
+ src_ci->i_vino.ino, src_objnum);
+ ceph_oid_init(&dst_oid);
+ ceph_oid_printf(&dst_oid, "%llx.%08llx",
+ dst_ci->i_vino.ino, dst_objnum);
+ /* Do an object remote copy */
+ err = ceph_osdc_copy_from(
+ &ceph_inode_to_client(src_inode)->client->osdc,
+ src_ci->i_vino.snap, 0,
+ &src_oid, &src_oloc,
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
+ &dst_oid, &dst_oloc,
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
+ if (err) {
+ dout("ceph_osdc_copy_from returned %d\n", err);
+ if (!ret)
+ ret = err;
+ goto out_caps;
+ }
+ len -= object_size;
+ src_off += object_size;
+ dst_off += object_size;
+ ret += object_size;
+ }
+
+ if (len)
+ /* We still need one final local copy */
+ do_final_copy = true;
+
+ file_update_time(dst_file);
+ if (endoff > size) {
+ int caps_flags = 0;
+
+ /* Let the MDS know about dst file size change */
+ if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff))
+ caps_flags |= CHECK_CAPS_NODELAY;
+ if (ceph_inode_set_size(dst_inode, endoff))
+ caps_flags |= CHECK_CAPS_AUTHONLY;
+ if (caps_flags)
+ ceph_check_caps(dst_ci, caps_flags, NULL);
+ }
+ /* Mark Fw dirty */
+ spin_lock(&dst_ci->i_ceph_lock);
+ dst_ci->i_inline_version = CEPH_INLINE_NONE;
+ dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
+ spin_unlock(&dst_ci->i_ceph_lock);
+ if (dirty)
+ __mark_inode_dirty(dst_inode, dirty);
+
+out_caps:
+ put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
+
+ if (do_final_copy) {
+ err = do_splice_direct(src_file, &src_off, dst_file,
+ &dst_off, len, flags);
+ if (err < 0) {
+ dout("do_splice_direct returned %d\n", err);
+ goto out;
+ }
+ len -= err;
+ ret += err;
+ }
+
+out:
+ ceph_free_cap_flush(prealloc_cf);
+
+ return ret;
+}
+
const struct file_operations ceph_file_fops = {
.open = ceph_open,
.release = ceph_release,
@@ -1844,5 +2104,5 @@ const struct file_operations ceph_file_fops = {
.unlocked_ioctl = ceph_ioctl,
.compat_ioctl = ceph_ioctl,
.fallocate = ceph_fallocate,
+ .copy_file_range = ceph_copy_file_range,
};
-
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ebc7bdaed2d0..79dd5e6ed755 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1132,8 +1132,12 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
if (IS_ERR(realdn)) {
pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
PTR_ERR(realdn), dn, in, ceph_vinop(in));
- dput(dn);
- dn = realdn; /* note realdn contains the error */
+ dn = realdn;
+ /*
+ * Caller should release 'dn' in the case of error.
+ * If 'req->r_dentry' is passed to this function,
+ * caller should leave 'req->r_dentry' untouched.
+ */
goto out;
} else if (realdn) {
dout("dn %p (%d) spliced with %p (%d) "
@@ -1196,7 +1200,9 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
WARN_ON_ONCE(1);
}
- if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
+ if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
+ test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
+ !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
struct qstr dname;
struct dentry *dn, *parent;
@@ -1677,7 +1683,6 @@ retry_lookup:
if (IS_ERR(realdn)) {
err = PTR_ERR(realdn);
d_drop(dn);
- dn = NULL;
goto next_item;
}
dn = realdn;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index bc43c822426a..67a9aeb2f4ec 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2071,7 +2071,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
if (req->r_old_dentry_drop)
len += req->r_old_dentry->d_name.len;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
+ msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
if (!msg) {
msg = ERR_PTR(-ENOMEM);
goto out_free2;
@@ -2136,7 +2136,6 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
if (req->r_pagelist) {
struct ceph_pagelist *pagelist = req->r_pagelist;
- refcount_inc(&pagelist->refcnt);
ceph_msg_data_add_pagelist(msg, pagelist);
msg->hdr.data_len = cpu_to_le32(pagelist->length);
} else {
@@ -3126,12 +3125,11 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
pr_info("mds%d reconnect start\n", mds);
- pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+ pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!pagelist)
goto fail_nopagelist;
- ceph_pagelist_init(pagelist);
- reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
+ reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
if (!reply)
goto fail_nomsg;
@@ -3241,6 +3239,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
mutex_unlock(&mdsc->mutex);
up_read(&mdsc->snap_rwsem);
+ ceph_pagelist_release(pagelist);
return;
fail:
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index eab1359d0553..b5ecd6f50360 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -165,6 +165,8 @@ enum {
Opt_noacl,
Opt_quotadf,
Opt_noquotadf,
+ Opt_copyfrom,
+ Opt_nocopyfrom,
};
static match_table_t fsopt_tokens = {
@@ -203,6 +205,8 @@ static match_table_t fsopt_tokens = {
{Opt_noacl, "noacl"},
{Opt_quotadf, "quotadf"},
{Opt_noquotadf, "noquotadf"},
+ {Opt_copyfrom, "copyfrom"},
+ {Opt_nocopyfrom, "nocopyfrom"},
{-1, NULL}
};
@@ -355,6 +359,12 @@ static int parse_fsopt_token(char *c, void *private)
case Opt_noquotadf:
fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
break;
+ case Opt_copyfrom:
+ fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
+ break;
+ case Opt_nocopyfrom:
+ fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
+ break;
#ifdef CONFIG_CEPH_FS_POSIX_ACL
case Opt_acl:
fsopt->sb_flags |= SB_POSIXACL;
@@ -553,6 +563,9 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",noacl");
#endif
+ if (fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM)
+ seq_puts(m, ",nocopyfrom");
+
if (fsopt->mds_namespace)
seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 582e28fd1b7b..c005a5400f2e 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -40,6 +40,7 @@
#define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */
#define CEPH_MOUNT_OPT_MOUNTWAIT (1<<12) /* mount waits if no mds is up */
#define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */
+#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */
#define CEPH_MOUNT_OPT_DEFAULT CEPH_MOUNT_OPT_DCACHE
@@ -1008,7 +1009,7 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
loff_t endoff, int *got, struct page **pinned_page);
extern int ceph_try_get_caps(struct ceph_inode_info *ci,
- int need, int want, int *got);
+ int need, int want, bool nonblock, int *got);
/* for counting open files by mode */
extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 5cc8b94f8206..316f6ad10644 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -951,11 +951,10 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
if (size > 0) {
/* copy value into pagelist */
- pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+ pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!pagelist)
return -ENOMEM;
- ceph_pagelist_init(pagelist);
err = ceph_pagelist_append(pagelist, value, size);
if (err)
goto out;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 3e812428ac8d..ba178b09de0b 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -145,6 +145,58 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
}
+static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+{
+ struct list_head *stmp, *tmp, *tmp1, *tmp2;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+ struct cifsFileInfo *cfile;
+
+ seq_puts(m, "# Version:1\n");
+ seq_puts(m, "# Format:\n");
+ seq_puts(m, "# <tree id> <persistent fid> <flags> <count> <pid> <uid>");
+#ifdef CONFIG_CIFS_DEBUG2
+ seq_printf(m, " <filename> <mid>\n");
+#else
+ seq_printf(m, " <filename>\n");
+#endif /* CIFS_DEBUG2 */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each(stmp, &cifs_tcp_ses_list) {
+ server = list_entry(stmp, struct TCP_Server_Info,
+ tcp_ses_list);
+ list_for_each(tmp, &server->smb_ses_list) {
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+ list_for_each(tmp1, &ses->tcon_list) {
+ tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp2, &tcon->openFileList) {
+ cfile = list_entry(tmp2, struct cifsFileInfo,
+ tlist);
+ seq_printf(m,
+ "0x%x 0x%llx 0x%x %d %d %d %s",
+ tcon->tid,
+ cfile->fid.persistent_fid,
+ cfile->f_flags,
+ cfile->count,
+ cfile->pid,
+ from_kuid(&init_user_ns, cfile->uid),
+ cfile->dentry->d_name.name);
+#ifdef CONFIG_CIFS_DEBUG2
+ seq_printf(m, " 0x%llx\n", cfile->fid.mid);
+#else
+ seq_printf(m, "\n");
+#endif /* CIFS_DEBUG2 */
+ }
+ spin_unlock(&tcon->open_file_lock);
+ }
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+ seq_putc(m, '\n');
+ return 0;
+}
+
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
{
struct list_head *tmp1, *tmp2, *tmp3;
@@ -565,6 +617,9 @@ cifs_proc_init(void)
proc_create_single("DebugData", 0, proc_fs_cifs,
cifs_debug_data_proc_show);
+ proc_create_single("open_files", 0400, proc_fs_cifs,
+ cifs_debug_files_proc_show);
+
proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_fops);
proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_fops);
proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_fops);
@@ -601,6 +656,7 @@ cifs_proc_clean(void)
return;
remove_proc_entry("DebugData", proc_fs_cifs);
+ remove_proc_entry("open_files", proc_fs_cifs);
remove_proc_entry("cifsFYI", proc_fs_cifs);
remove_proc_entry("traceSMB", proc_fs_cifs);
remove_proc_entry("Stats", proc_fs_cifs);
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index b611fc2e8984..7f01c6e60791 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -147,8 +147,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
sprintf(dp, ";sec=krb5");
else if (server->sec_mskerberos)
sprintf(dp, ";sec=mskrb5");
- else
- goto out;
+ else {
+ cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
+ sprintf(dp, ";sec=krb5");
+ }
dp = description + strlen(description);
sprintf(dp, ";uid=0x%x",
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 7de9603c54f1..865706edb307 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -992,17 +992,21 @@ const struct inode_operations cifs_symlink_inode_ops = {
.listxattr = cifs_listxattr,
};
-static int cifs_clone_file_range(struct file *src_file, loff_t off,
- struct file *dst_file, loff_t destoff, u64 len)
+static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff, loff_t len,
+ unsigned int remap_flags)
{
struct inode *src_inode = file_inode(src_file);
struct inode *target_inode = file_inode(dst_file);
struct cifsFileInfo *smb_file_src = src_file->private_data;
- struct cifsFileInfo *smb_file_target = dst_file->private_data;
- struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
+ struct cifsFileInfo *smb_file_target;
+ struct cifs_tcon *target_tcon;
unsigned int xid;
int rc;
+ if (remap_flags & ~REMAP_FILE_ADVISORY)
+ return -EINVAL;
+
cifs_dbg(FYI, "clone range\n");
xid = get_xid();
@@ -1013,6 +1017,9 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
goto out;
}
+ smb_file_target = dst_file->private_data;
+ target_tcon = tlink_tcon(smb_file_target->tlink);
+
/*
* Note: cifs case is easier than btrfs since server responsible for
* checks for proper open modes and file type and if it wants
@@ -1042,7 +1049,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
unlock_two_nondirectories(src_inode, target_inode);
out:
free_xid(xid);
- return rc;
+ return rc < 0 ? rc : len;
}
ssize_t cifs_file_copychunk_range(unsigned int xid,
@@ -1151,7 +1158,7 @@ const struct file_operations cifs_file_ops = {
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
- .clone_file_range = cifs_clone_file_range,
+ .remap_file_range = cifs_remap_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
};
@@ -1170,15 +1177,14 @@ const struct file_operations cifs_file_strict_ops = {
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
- .clone_file_range = cifs_clone_file_range,
+ .remap_file_range = cifs_remap_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
};
const struct file_operations cifs_file_direct_ops = {
- /* BB reevaluate whether they can be done with directio, no cache */
- .read_iter = cifs_user_readv,
- .write_iter = cifs_user_writev,
+ .read_iter = cifs_direct_readv,
+ .write_iter = cifs_direct_writev,
.open = cifs_open,
.release = cifs_close,
.lock = cifs_lock,
@@ -1189,7 +1195,7 @@ const struct file_operations cifs_file_direct_ops = {
.splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
- .clone_file_range = cifs_clone_file_range,
+ .remap_file_range = cifs_remap_file_range,
.llseek = cifs_llseek,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
@@ -1208,7 +1214,7 @@ const struct file_operations cifs_file_nobrl_ops = {
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
- .clone_file_range = cifs_clone_file_range,
+ .remap_file_range = cifs_remap_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
};
@@ -1226,15 +1232,14 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
- .clone_file_range = cifs_clone_file_range,
+ .remap_file_range = cifs_remap_file_range,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
};
const struct file_operations cifs_file_direct_nobrl_ops = {
- /* BB reevaluate whether they can be done with directio, no cache */
- .read_iter = cifs_user_readv,
- .write_iter = cifs_user_writev,
+ .read_iter = cifs_direct_readv,
+ .write_iter = cifs_direct_writev,
.open = cifs_open,
.release = cifs_close,
.fsync = cifs_fsync,
@@ -1244,7 +1249,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
- .clone_file_range = cifs_clone_file_range,
+ .remap_file_range = cifs_remap_file_range,
.llseek = cifs_llseek,
.setlease = cifs_setlease,
.fallocate = cifs_fallocate,
@@ -1256,7 +1261,7 @@ const struct file_operations cifs_dir_ops = {
.read = generic_read_dir,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
- .clone_file_range = cifs_clone_file_range,
+ .remap_file_range = cifs_remap_file_range,
.llseek = generic_file_llseek,
.fsync = cifs_dir_fsync,
};
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 24e265a51874..4c3b5cfccc49 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -101,8 +101,10 @@ extern int cifs_open(struct inode *inode, struct file *file);
extern int cifs_close(struct inode *inode, struct file *file);
extern int cifs_closedir(struct inode *inode, struct file *file);
extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to);
+extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
+extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from);
extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, loff_t, loff_t, int);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ed1e0fcb69e3..38ab0fca49e1 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1125,6 +1125,9 @@ struct cifs_fid {
__u8 create_guid[16];
struct cifs_pending_open *pending_open;
unsigned int epoch;
+#ifdef CONFIG_CIFS_DEBUG2
+ __u64 mid;
+#endif /* CIFS_DEBUG2 */
bool purge_cache;
};
@@ -1183,6 +1186,11 @@ struct cifs_aio_ctx {
unsigned int len;
unsigned int total_len;
bool should_dirty;
+ /*
+ * Indicates if this aio_ctx is for direct_io,
+ * If yes, iter is a copy of the user passed iov_iter
+ */
+ bool direct_io;
};
struct cifs_readdata;
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 1ce733f3582f..79d842e7240c 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1539,6 +1539,9 @@ struct reparse_symlink_data {
char PathBuffer[0];
} __attribute__((packed));
+/* Flag above */
+#define SYMLINK_FLAG_RELATIVE 0x00000001
+
/* For IO_REPARSE_TAG_NFS */
#define NFS_SPECFILE_LNK 0x00000000014B4E4C
#define NFS_SPECFILE_CHR 0x0000000000524843
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d82f0cc71755..6f24f129a751 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -589,7 +589,7 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
{
struct msghdr smb_msg;
struct kvec iov = {.iov_base = buf, .iov_len = to_read};
- iov_iter_kvec(&smb_msg.msg_iter, READ | ITER_KVEC, &iov, 1, to_read);
+ iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
return cifs_readv_from_socket(server, &smb_msg);
}
@@ -601,7 +601,7 @@ cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
struct msghdr smb_msg;
struct bio_vec bv = {
.bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
- iov_iter_bvec(&smb_msg.msg_iter, READ | ITER_BVEC, &bv, 1, to_read);
+ iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
return cifs_readv_from_socket(server, &smb_msg);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c620d4b5d5d4..74c33d5fafc8 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1005,7 +1005,7 @@ cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
* Set the byte-range lock (mandatory style). Returns:
* 1) 0, if we set the lock and don't need to request to the server;
* 2) 1, if no locks prevent us but we need to request to the server;
- * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ * 3) -EACCES, if there is a lock that prevents us and wait is false.
*/
static int
cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
@@ -2538,6 +2538,61 @@ wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
}
static int
+cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
+ struct cifs_aio_ctx *ctx)
+{
+ int wait_retry = 0;
+ unsigned int wsize, credits;
+ int rc;
+ struct TCP_Server_Info *server =
+ tlink_tcon(wdata->cfile->tlink)->ses->server;
+
+ /*
+ * Try to resend this wdata, waiting for credits up to 3 seconds.
+ * Note: we are attempting to resend the whole wdata not in segments
+ */
+ do {
+ rc = server->ops->wait_mtu_credits(
+ server, wdata->bytes, &wsize, &credits);
+
+ if (rc)
+ break;
+
+ if (wsize < wdata->bytes) {
+ add_credits_and_wake_if(server, credits, 0);
+ msleep(1000);
+ wait_retry++;
+ }
+ } while (wsize < wdata->bytes && wait_retry < 3);
+
+ if (wsize < wdata->bytes) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ rc = -EAGAIN;
+ while (rc == -EAGAIN) {
+ rc = 0;
+ if (wdata->cfile->invalidHandle)
+ rc = cifs_reopen_file(wdata->cfile, false);
+ if (!rc)
+ rc = server->ops->async_writev(wdata,
+ cifs_uncached_writedata_release);
+ }
+
+ if (!rc) {
+ list_add_tail(&wdata->list, wdata_list);
+ return 0;
+ }
+
+ add_credits_and_wake_if(server, wdata->credits, 0);
+out:
+ kref_put(&wdata->refcount, cifs_uncached_writedata_release);
+
+ return rc;
+}
+
+static int
cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
struct cifsFileInfo *open_file,
struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
@@ -2551,6 +2606,8 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
loff_t saved_offset = offset;
pid_t pid;
struct TCP_Server_Info *server;
+ struct page **pagevec;
+ size_t start;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -2567,38 +2624,79 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
if (rc)
break;
- nr_pages = get_numpages(wsize, len, &cur_len);
- wdata = cifs_writedata_alloc(nr_pages,
+ if (ctx->direct_io) {
+ ssize_t result;
+
+ result = iov_iter_get_pages_alloc(
+ from, &pagevec, wsize, &start);
+ if (result < 0) {
+ cifs_dbg(VFS,
+ "direct_writev couldn't get user pages "
+ "(rc=%zd) iter type %d iov_offset %zd "
+ "count %zd\n",
+ result, from->type,
+ from->iov_offset, from->count);
+ dump_stack();
+ break;
+ }
+ cur_len = (size_t)result;
+ iov_iter_advance(from, cur_len);
+
+ nr_pages =
+ (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ wdata = cifs_writedata_direct_alloc(pagevec,
cifs_uncached_writev_complete);
- if (!wdata) {
- rc = -ENOMEM;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
+ if (!wdata) {
+ rc = -ENOMEM;
+ add_credits_and_wake_if(server, credits, 0);
+ break;
+ }
- rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
- if (rc) {
- kfree(wdata);
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
- num_pages = nr_pages;
- rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
- if (rc) {
- for (i = 0; i < nr_pages; i++)
- put_page(wdata->pages[i]);
- kfree(wdata);
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
+ wdata->page_offset = start;
+ wdata->tailsz =
+ nr_pages > 1 ?
+ cur_len - (PAGE_SIZE - start) -
+ (nr_pages - 2) * PAGE_SIZE :
+ cur_len;
+ } else {
+ nr_pages = get_numpages(wsize, len, &cur_len);
+ wdata = cifs_writedata_alloc(nr_pages,
+ cifs_uncached_writev_complete);
+ if (!wdata) {
+ rc = -ENOMEM;
+ add_credits_and_wake_if(server, credits, 0);
+ break;
+ }
- /*
- * Bring nr_pages down to the number of pages we actually used,
- * and free any pages that we didn't use.
- */
- for ( ; nr_pages > num_pages; nr_pages--)
- put_page(wdata->pages[nr_pages - 1]);
+ rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
+ if (rc) {
+ kfree(wdata);
+ add_credits_and_wake_if(server, credits, 0);
+ break;
+ }
+
+ num_pages = nr_pages;
+ rc = wdata_fill_from_iovec(
+ wdata, from, &cur_len, &num_pages);
+ if (rc) {
+ for (i = 0; i < nr_pages; i++)
+ put_page(wdata->pages[i]);
+ kfree(wdata);
+ add_credits_and_wake_if(server, credits, 0);
+ break;
+ }
+
+ /*
+ * Bring nr_pages down to the number of pages we
+ * actually used, and free any pages that we didn't use.
+ */
+ for ( ; nr_pages > num_pages; nr_pages--)
+ put_page(wdata->pages[nr_pages - 1]);
+
+ wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
+ }
wdata->sync_mode = WB_SYNC_ALL;
wdata->nr_pages = nr_pages;
@@ -2607,7 +2705,6 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
wdata->pid = pid;
wdata->bytes = cur_len;
wdata->pagesz = PAGE_SIZE;
- wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
wdata->credits = credits;
wdata->ctx = ctx;
kref_get(&ctx->refcount);
@@ -2682,13 +2779,18 @@ restart_loop:
INIT_LIST_HEAD(&tmp_list);
list_del_init(&wdata->list);
- iov_iter_advance(&tmp_from,
+ if (ctx->direct_io)
+ rc = cifs_resend_wdata(
+ wdata, &tmp_list, ctx);
+ else {
+ iov_iter_advance(&tmp_from,
wdata->offset - ctx->pos);
- rc = cifs_write_from_iter(wdata->offset,
+ rc = cifs_write_from_iter(wdata->offset,
wdata->bytes, &tmp_from,
ctx->cfile, cifs_sb, &tmp_list,
ctx);
+ }
list_splice(&tmp_list, &ctx->list);
@@ -2701,8 +2803,9 @@ restart_loop:
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
- for (i = 0; i < ctx->npages; i++)
- put_page(ctx->bv[i].bv_page);
+ if (!ctx->direct_io)
+ for (i = 0; i < ctx->npages; i++)
+ put_page(ctx->bv[i].bv_page);
cifs_stats_bytes_written(tcon, ctx->total_len);
set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
@@ -2717,7 +2820,8 @@ restart_loop:
complete(&ctx->done);
}
-ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t __cifs_writev(
+ struct kiocb *iocb, struct iov_iter *from, bool direct)
{
struct file *file = iocb->ki_filp;
ssize_t total_written = 0;
@@ -2726,13 +2830,18 @@ ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
struct cifs_sb_info *cifs_sb;
struct cifs_aio_ctx *ctx;
struct iov_iter saved_from = *from;
+ size_t len = iov_iter_count(from);
int rc;
/*
- * BB - optimize the way when signing is disabled. We can drop this
- * extra memory-to-memory copying and use iovec buffers for constructing
- * write request.
+ * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
+ * In this case, fall back to non-direct write function.
+ * this could be improved by getting pages directly in ITER_KVEC
*/
+ if (direct && from->type & ITER_KVEC) {
+ cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
+ direct = false;
+ }
rc = generic_write_checks(iocb, from);
if (rc <= 0)
@@ -2756,10 +2865,16 @@ ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
ctx->pos = iocb->ki_pos;
- rc = setup_aio_ctx_iter(ctx, from, WRITE);
- if (rc) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
+ if (direct) {
+ ctx->direct_io = true;
+ ctx->iter = *from;
+ ctx->len = len;
+ } else {
+ rc = setup_aio_ctx_iter(ctx, from, WRITE);
+ if (rc) {
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ return rc;
+ }
}
/* grab a lock here due to read response handlers can access ctx */
@@ -2809,6 +2924,16 @@ ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
return total_written;
}
+ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
+{
+ return __cifs_writev(iocb, from, true);
+}
+
+ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
+{
+ return __cifs_writev(iocb, from, false);
+}
+
static ssize_t
cifs_writev(struct kiocb *iocb, struct iov_iter *from)
{
@@ -2979,7 +3104,6 @@ cifs_uncached_readdata_release(struct kref *refcount)
kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
for (i = 0; i < rdata->nr_pages; i++) {
put_page(rdata->pages[i]);
- rdata->pages[i] = NULL;
}
cifs_readdata_release(refcount);
}
@@ -3004,7 +3128,7 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
size_t copy = min_t(size_t, remaining, PAGE_SIZE);
size_t written;
- if (unlikely(iter->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(iter))) {
void *addr = kmap_atomic(page);
written = copy_to_iter(addr, copy, iter);
@@ -3106,6 +3230,67 @@ cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
return uncached_fill_pages(server, rdata, iter, iter->count);
}
+static int cifs_resend_rdata(struct cifs_readdata *rdata,
+ struct list_head *rdata_list,
+ struct cifs_aio_ctx *ctx)
+{
+ int wait_retry = 0;
+ unsigned int rsize, credits;
+ int rc;
+ struct TCP_Server_Info *server =
+ tlink_tcon(rdata->cfile->tlink)->ses->server;
+
+ /*
+ * Try to resend this rdata, waiting for credits up to 3 seconds.
+ * Note: we are attempting to resend the whole rdata not in segments
+ */
+ do {
+ rc = server->ops->wait_mtu_credits(server, rdata->bytes,
+ &rsize, &credits);
+
+ if (rc)
+ break;
+
+ if (rsize < rdata->bytes) {
+ add_credits_and_wake_if(server, credits, 0);
+ msleep(1000);
+ wait_retry++;
+ }
+ } while (rsize < rdata->bytes && wait_retry < 3);
+
+ /*
+ * If we can't find enough credits to send this rdata
+ * release the rdata and return failure, this will pass
+ * whatever I/O amount we have finished to VFS.
+ */
+ if (rsize < rdata->bytes) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ rc = -EAGAIN;
+ while (rc == -EAGAIN) {
+ rc = 0;
+ if (rdata->cfile->invalidHandle)
+ rc = cifs_reopen_file(rdata->cfile, true);
+ if (!rc)
+ rc = server->ops->async_readv(rdata);
+ }
+
+ if (!rc) {
+ /* Add to aio pending list */
+ list_add_tail(&rdata->list, rdata_list);
+ return 0;
+ }
+
+ add_credits_and_wake_if(server, rdata->credits, 0);
+out:
+ kref_put(&rdata->refcount,
+ cifs_uncached_readdata_release);
+
+ return rc;
+}
+
static int
cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
@@ -3117,6 +3302,9 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
int rc;
pid_t pid;
struct TCP_Server_Info *server;
+ struct page **pagevec;
+ size_t start;
+ struct iov_iter direct_iov = ctx->iter;
server = tlink_tcon(open_file->tlink)->ses->server;
@@ -3125,6 +3313,9 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
else
pid = current->tgid;
+ if (ctx->direct_io)
+ iov_iter_advance(&direct_iov, offset - ctx->pos);
+
do {
rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
&rsize, &credits);
@@ -3132,20 +3323,59 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
break;
cur_len = min_t(const size_t, len, rsize);
- npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
- /* allocate a readdata struct */
- rdata = cifs_readdata_alloc(npages,
+ if (ctx->direct_io) {
+ ssize_t result;
+
+ result = iov_iter_get_pages_alloc(
+ &direct_iov, &pagevec,
+ cur_len, &start);
+ if (result < 0) {
+ cifs_dbg(VFS,
+ "couldn't get user pages (cur_len=%zd)"
+ " iter type %d"
+ " iov_offset %zd count %zd\n",
+ result, direct_iov.type,
+ direct_iov.iov_offset,
+ direct_iov.count);
+ dump_stack();
+ break;
+ }
+ cur_len = (size_t)result;
+ iov_iter_advance(&direct_iov, cur_len);
+
+ rdata = cifs_readdata_direct_alloc(
+ pagevec, cifs_uncached_readv_complete);
+ if (!rdata) {
+ add_credits_and_wake_if(server, credits, 0);
+ rc = -ENOMEM;
+ break;
+ }
+
+ npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
+ rdata->page_offset = start;
+ rdata->tailsz = npages > 1 ?
+ cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
+ cur_len;
+
+ } else {
+
+ npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
+ /* allocate a readdata struct */
+ rdata = cifs_readdata_alloc(npages,
cifs_uncached_readv_complete);
- if (!rdata) {
- add_credits_and_wake_if(server, credits, 0);
- rc = -ENOMEM;
- break;
- }
+ if (!rdata) {
+ add_credits_and_wake_if(server, credits, 0);
+ rc = -ENOMEM;
+ break;
+ }
- rc = cifs_read_allocate_pages(rdata, npages);
- if (rc)
- goto error;
+ rc = cifs_read_allocate_pages(rdata, npages);
+ if (rc)
+ goto error;
+
+ rdata->tailsz = PAGE_SIZE;
+ }
rdata->cfile = cifsFileInfo_get(open_file);
rdata->nr_pages = npages;
@@ -3153,7 +3383,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
rdata->bytes = cur_len;
rdata->pid = pid;
rdata->pagesz = PAGE_SIZE;
- rdata->tailsz = PAGE_SIZE;
rdata->read_into_pages = cifs_uncached_read_into_pages;
rdata->copy_into_pages = cifs_uncached_copy_into_pages;
rdata->credits = credits;
@@ -3167,9 +3396,11 @@ error:
if (rc) {
add_credits_and_wake_if(server, rdata->credits, 0);
kref_put(&rdata->refcount,
- cifs_uncached_readdata_release);
- if (rc == -EAGAIN)
+ cifs_uncached_readdata_release);
+ if (rc == -EAGAIN) {
+ iov_iter_revert(&direct_iov, cur_len);
continue;
+ }
break;
}
@@ -3225,45 +3456,62 @@ again:
* reading.
*/
if (got_bytes && got_bytes < rdata->bytes) {
- rc = cifs_readdata_to_iov(rdata, to);
+ rc = 0;
+ if (!ctx->direct_io)
+ rc = cifs_readdata_to_iov(rdata, to);
if (rc) {
kref_put(&rdata->refcount,
- cifs_uncached_readdata_release);
+ cifs_uncached_readdata_release);
continue;
}
}
- rc = cifs_send_async_read(
+ if (ctx->direct_io) {
+ /*
+ * Re-use rdata as this is a
+ * direct I/O
+ */
+ rc = cifs_resend_rdata(
+ rdata,
+ &tmp_list, ctx);
+ } else {
+ rc = cifs_send_async_read(
rdata->offset + got_bytes,
rdata->bytes - got_bytes,
rdata->cfile, cifs_sb,
&tmp_list, ctx);
+ kref_put(&rdata->refcount,
+ cifs_uncached_readdata_release);
+ }
+
list_splice(&tmp_list, &ctx->list);
- kref_put(&rdata->refcount,
- cifs_uncached_readdata_release);
goto again;
} else if (rdata->result)
rc = rdata->result;
- else
+ else if (!ctx->direct_io)
rc = cifs_readdata_to_iov(rdata, to);
/* if there was a short read -- discard anything left */
if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
rc = -ENODATA;
+
+ ctx->total_len += rdata->got_bytes;
}
list_del_init(&rdata->list);
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
}
- for (i = 0; i < ctx->npages; i++) {
- if (ctx->should_dirty)
- set_page_dirty(ctx->bv[i].bv_page);
- put_page(ctx->bv[i].bv_page);
- }
+ if (!ctx->direct_io) {
+ for (i = 0; i < ctx->npages; i++) {
+ if (ctx->should_dirty)
+ set_page_dirty(ctx->bv[i].bv_page);
+ put_page(ctx->bv[i].bv_page);
+ }
- ctx->total_len = ctx->len - iov_iter_count(to);
+ ctx->total_len = ctx->len - iov_iter_count(to);
+ }
cifs_stats_bytes_read(tcon, ctx->total_len);
@@ -3281,18 +3529,28 @@ again:
complete(&ctx->done);
}
-ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t __cifs_readv(
+ struct kiocb *iocb, struct iov_iter *to, bool direct)
{
- struct file *file = iocb->ki_filp;
- ssize_t rc;
size_t len;
- ssize_t total_read = 0;
- loff_t offset = iocb->ki_pos;
+ struct file *file = iocb->ki_filp;
struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
struct cifsFileInfo *cfile;
+ struct cifs_tcon *tcon;
+ ssize_t rc, total_read = 0;
+ loff_t offset = iocb->ki_pos;
struct cifs_aio_ctx *ctx;
+ /*
+ * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
+ * fall back to data copy read path
+ * this could be improved by getting pages directly in ITER_KVEC
+ */
+ if (direct && to->type & ITER_KVEC) {
+ cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
+ direct = false;
+ }
+
len = iov_iter_count(to);
if (!len)
return 0;
@@ -3316,17 +3574,23 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
if (!is_sync_kiocb(iocb))
ctx->iocb = iocb;
- if (to->type == ITER_IOVEC)
+ if (iter_is_iovec(to))
ctx->should_dirty = true;
- rc = setup_aio_ctx_iter(ctx, to, READ);
- if (rc) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
+ if (direct) {
+ ctx->pos = offset;
+ ctx->direct_io = true;
+ ctx->iter = *to;
+ ctx->len = len;
+ } else {
+ rc = setup_aio_ctx_iter(ctx, to, READ);
+ if (rc) {
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ return rc;
+ }
+ len = ctx->len;
}
- len = ctx->len;
-
/* grab a lock here due to read response handlers can access ctx */
mutex_lock(&ctx->aio_mutex);
@@ -3368,6 +3632,16 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
return rc;
}
+ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
+{
+ return __cifs_readv(iocb, to, true);
+}
+
+ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+{
+ return __cifs_readv(iocb, to, false);
+}
+
ssize_t
cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
{
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 1023d78673fb..a81a9df997c1 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1320,8 +1320,8 @@ cifs_drop_nlink(struct inode *inode)
/*
* If d_inode(dentry) is null (usually meaning the cached dentry
* is a negative dentry) then we would attempt a standard SMB delete, but
- * if that fails we can not attempt the fall back mechanisms on EACCESS
- * but will return the EACCESS to the caller. Note that the VFS does not call
+ * if that fails we can not attempt the fall back mechanisms on EACCES
+ * but will return the EACCES to the caller. Note that the VFS does not call
* unlink on negative dentries currently.
*/
int cifs_unlink(struct inode *dir, struct dentry *dentry)
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index fc43d5d25d1d..8a41f4eba726 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -788,7 +788,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
struct page **pages = NULL;
struct bio_vec *bv = NULL;
- if (iter->type & ITER_KVEC) {
+ if (iov_iter_is_kvec(iter)) {
memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
ctx->len = count;
iov_iter_advance(iter, count);
@@ -859,7 +859,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
ctx->bv = bv;
ctx->len = saved_len - count;
ctx->npages = npages;
- iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
+ iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
return 0;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index f85fc5aa2710..225fec1cfa67 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -747,6 +747,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
int rc = 0;
unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
char *name, *value;
+ size_t buf_size = dst_size;
size_t name_len, value_len, user_name_len;
while (src_size > 0) {
@@ -782,9 +783,10 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
/* 'user.' plus a terminating null */
user_name_len = 5 + 1 + name_len;
- rc += user_name_len;
-
- if (dst_size >= user_name_len) {
+ if (buf_size == 0) {
+ /* skip copy - calc size only */
+ rc += user_name_len;
+ } else if (dst_size >= user_name_len) {
dst_size -= user_name_len;
memcpy(dst, "user.", 5);
dst += 5;
@@ -792,8 +794,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
dst += name_len;
*dst = 0;
++dst;
- } else if (dst_size == 0) {
- /* skip copy - calc size only */
+ rc += user_name_len;
} else {
/* stop before overrun buffer */
rc = -ERANGE;
@@ -1078,6 +1079,9 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
cfile->fid.persistent_fid = fid->persistent_fid;
cfile->fid.volatile_fid = fid->volatile_fid;
+#ifdef CONFIG_CIFS_DEBUG2
+ cfile->fid.mid = fid->mid;
+#endif /* CIFS_DEBUG2 */
server->ops->set_oplock_level(cinode, oplock, fid->epoch,
&fid->purge_cache);
cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
@@ -3152,13 +3156,13 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
return 0;
}
- iov_iter_bvec(&iter, WRITE | ITER_BVEC, bvec, npages, data_len);
+ iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
} else if (buf_len >= data_offset + data_len) {
/* read response payload is in buf */
WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
iov.iov_base = buf + data_offset;
iov.iov_len = data_len;
- iov_iter_kvec(&iter, WRITE | ITER_KVEC, &iov, 1, data_len);
+ iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
} else {
/* read response payload cannot be in both buf and pages */
WARN_ONCE(1, "buf can not contain only a part of read data");
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7d7b016fe8bb..27f86537a5d1 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1512,7 +1512,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
-
+ trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
if (rc != 0) {
if (tcon) {
cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
@@ -1559,6 +1559,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
if (tcon->ses->server->ops->validate_negotiate)
rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
tcon_exit:
+
free_rsp_buf(resp_buftype, rsp);
kfree(unc_path);
return rc;
@@ -2308,6 +2309,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
atomic_inc(&tcon->num_remote_opens);
oparms->fid->persistent_fid = rsp->PersistentFileId;
oparms->fid->volatile_fid = rsp->VolatileFileId;
+#ifdef CONFIG_CIFS_DEBUG2
+ oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId);
+#endif /* CIFS_DEBUG2 */
if (buf) {
memcpy(buf, &rsp->CreationTime, 32);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index f753f424d7f1..5671d5ee7f58 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -842,6 +842,41 @@ struct fsctl_get_integrity_information_rsp {
/* Integrity flags for above */
#define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF 0x00000001
+/* Reparse structures - see MS-FSCC 2.1.2 */
+
+/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
+
+struct reparse_data_buffer {
+ __le32 ReparseTag;
+ __le16 ReparseDataLength;
+ __u16 Reserved;
+ __u8 DataBuffer[0]; /* Variable Length */
+} __packed;
+
+struct reparse_guid_data_buffer {
+ __le32 ReparseTag;
+ __le16 ReparseDataLength;
+ __u16 Reserved;
+ __u8 ReparseGuid[16];
+ __u8 DataBuffer[0]; /* Variable Length */
+} __packed;
+
+struct reparse_mount_point_data_buffer {
+ __le32 ReparseTag;
+ __le16 ReparseDataLength;
+ __u16 Reserved;
+ __le16 SubstituteNameOffset;
+ __le16 SubstituteNameLength;
+ __le16 PrintNameOffset;
+ __le16 PrintNameLength;
+ __u8 PathBuffer[0]; /* Variable Length */
+} __packed;
+
+/* See MS-FSCC 2.1.2.4 and cifspdu.h for struct reparse_symlink_data */
+
+/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
+
+
/* See MS-DFSC 2.2.2 */
struct fsctl_get_dfs_referral_req {
__le16 MaxReferralLevel;
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 5e282368cc4a..e94a8d1d08a3 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -2054,14 +2054,22 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
info->smbd_recv_pending++;
- switch (msg->msg_iter.type) {
- case READ | ITER_KVEC:
+ if (iov_iter_rw(&msg->msg_iter) == WRITE) {
+ /* It's a bug in upper layer to get there */
+ cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
+ iov_iter_rw(&msg->msg_iter));
+ rc = -EINVAL;
+ goto out;
+ }
+
+ switch (iov_iter_type(&msg->msg_iter)) {
+ case ITER_KVEC:
buf = msg->msg_iter.kvec->iov_base;
to_read = msg->msg_iter.kvec->iov_len;
rc = smbd_recv_buf(info, buf, to_read);
break;
- case READ | ITER_BVEC:
+ case ITER_BVEC:
page = msg->msg_iter.bvec->bv_page;
page_offset = msg->msg_iter.bvec->bv_offset;
to_read = msg->msg_iter.bvec->bv_len;
@@ -2071,10 +2079,11 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
default:
/* It's a bug in upper layer to get there */
cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
- msg->msg_iter.type);
+ iov_iter_type(&msg->msg_iter));
rc = -EINVAL;
}
+out:
info->smbd_recv_pending--;
wake_up(&info->wait_smbd_recv_pending);
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index cce8414fe7ec..fb049809555f 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -374,6 +374,48 @@ DEFINE_SMB3_ENTER_EXIT_EVENT(enter);
DEFINE_SMB3_ENTER_EXIT_EVENT(exit_done);
/*
+ * For SMB2/SMB3 tree connect
+ */
+
+DECLARE_EVENT_CLASS(smb3_tcon_class,
+ TP_PROTO(unsigned int xid,
+ __u32 tid,
+ __u64 sesid,
+ const char *unc_name,
+ int rc),
+ TP_ARGS(xid, tid, sesid, unc_name, rc),
+ TP_STRUCT__entry(
+ __field(unsigned int, xid)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(const char *, unc_name)
+ __field(int, rc)
+ ),
+ TP_fast_assign(
+ __entry->xid = xid;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->unc_name = unc_name;
+ __entry->rc = rc;
+ ),
+ TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
+ __entry->xid, __entry->sesid, __entry->tid,
+ __entry->unc_name, __entry->rc)
+)
+
+#define DEFINE_SMB3_TCON_EVENT(name) \
+DEFINE_EVENT(smb3_tcon_class, smb3_##name, \
+ TP_PROTO(unsigned int xid, \
+ __u32 tid, \
+ __u64 sesid, \
+ const char *unc_name, \
+ int rc), \
+ TP_ARGS(xid, tid, sesid, unc_name, rc))
+
+DEFINE_SMB3_TCON_EVENT(tcon);
+
+
+/*
* For smb2/smb3 open call
*/
DECLARE_EVENT_CLASS(smb3_open_err_class,
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index f8112433f0c8..83ff0c25710d 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -316,8 +316,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
.iov_base = &rfc1002_marker,
.iov_len = 4
};
- iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
- 1, 4);
+ iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
goto uncork;
@@ -338,8 +337,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
size += iov[i].iov_len;
}
- iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
- iov, n_vec, size);
+ iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
@@ -355,7 +353,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
&bvec.bv_offset);
- iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+ iov_iter_bvec(&smb_msg.msg_iter, WRITE,
&bvec, 1, bvec.bv_len);
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 0c35e62f108d..9352487bd0fc 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -202,7 +202,8 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
continue;
blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
blk_offset += offset;
- if (blk_offset + len > BUFFER_SIZE)
+ if (blk_offset > BUFFER_SIZE ||
+ blk_offset + len > BUFFER_SIZE)
continue;
return read_buffers[i] + blk_offset;
}
@@ -872,8 +873,8 @@ static int cramfs_readpage(struct file *file, struct page *page)
if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) {
/* See comments on earlier code. */
u32 prev_start = block_start;
- block_start = prev_start & ~CRAMFS_BLK_FLAGS;
- block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
+ block_start = prev_start & ~CRAMFS_BLK_FLAGS;
+ block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) {
block_start += PAGE_SIZE;
} else {
diff --git a/fs/dcache.c b/fs/dcache.c
index c2e443fb76ae..2593153471cf 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -26,7 +26,7 @@
#include <linux/export.h>
#include <linux/security.h>
#include <linux/seqlock.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/bit_spinlock.h>
#include <linux/rculist_bl.h>
#include <linux/list_lru.h>
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 093fb54cd316..722d17c88edb 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1313,7 +1313,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
spin_lock_init(&dio->bio_lock);
dio->refcount = 1;
- dio->should_dirty = (iter->type == ITER_IOVEC);
+ dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
sdio.iter = iter;
sdio.final_block_in_request = end >> blkbits;
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index a5e4a221435c..76976d6e50f9 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -674,7 +674,7 @@ static int receive_from_sock(struct connection *con)
nvec = 2;
}
len = iov[0].iov_len + iov[1].iov_len;
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nvec, len);
+ iov_iter_kvec(&msg.msg_iter, READ, iov, nvec, len);
r = ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT | MSG_NOSIGNAL);
if (ret <= 0)
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 41cf2fbee50d..906839a4da8f 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -101,6 +101,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
token = match_token(p, tokens, args);
switch (token) {
case Opt_name:
+ kfree(opts->dev_name);
opts->dev_name = match_strdup(&args[0]);
if (unlikely(!opts->dev_name)) {
EXOFS_ERR("Error allocating dev_name");
@@ -117,7 +118,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
EXOFS_MIN_PID);
return -EINVAL;
}
- s_pid = 1;
+ s_pid = true;
break;
case Opt_to:
if (match_int(&args[0], &option))
@@ -866,8 +867,10 @@ static struct dentry *exofs_mount(struct file_system_type *type,
int ret;
ret = parse_options(data, &opts);
- if (ret)
+ if (ret) {
+ kfree(opts.dev_name);
return ERR_PTR(ret);
+ }
if (!opts.dev_name)
opts.dev_name = dev_name;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 12f90d48ba61..3f89d0ab08fc 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -45,15 +45,6 @@
#include <linux/compiler.h>
-/* Until this gets included into linux/compiler-gcc.h */
-#ifndef __nonstring
-#if defined(GCC_VERSION) && (GCC_VERSION >= 80000)
-#define __nonstring __attribute__((nonstring))
-#else
-#define __nonstring
-#endif
-#endif
-
/*
* The fourth extended filesystem constants/structures
*/
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2addcb8730e1..014f6a698cb7 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1216,7 +1216,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
if (IS_ERR(bitmap_bh))
- return (struct inode *) bitmap_bh;
+ return ERR_CAST(bitmap_bh);
/* Having the inode bit set should be a 100% indicator that this
* is a valid orphan (no e2fsck run on fs). Orphans also include
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 67a38532032a..17adcb16a9c8 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1556,7 +1556,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
if (IS_ERR(bh))
- return (struct dentry *) bh;
+ return ERR_CAST(bh);
inode = NULL;
if (bh) {
__u32 ino = le32_to_cpu(de->inode);
@@ -1600,7 +1600,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL);
if (IS_ERR(bh))
- return (struct dentry *) bh;
+ return ERR_CAST(bh);
if (!bh)
return ERR_PTR(-ENOENT);
ino = le32_to_cpu(de->inode);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 2aa62d58d8dd..db7590178dfc 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -374,13 +374,13 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
if (!bio)
return -ENOMEM;
+ wbc_init_bio(io->io_wbc, bio);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio;
io->io_next_block = bh->b_blocknr;
- wbc_init_bio(io->io_wbc, bio);
return 0;
}
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 7f5f3699fc6c..c8366cb8eccd 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -369,7 +369,9 @@ static int fat_parse_short(struct super_block *sb,
}
memcpy(work, de->name, sizeof(work));
- /* see namei.c, msdos_format_name */
+ /* For an explanation of the special treatment of 0x05 in
+ * filenames, see msdos_format_name in namei_msdos.c
+ */
if (work[0] == 0x05)
work[0] = 0xE5;
@@ -1071,7 +1073,7 @@ int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo)
}
}
- dir->i_mtime = dir->i_atime = current_time(dir);
+ fat_truncate_time(dir, NULL, S_ATIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 9d7d2d5da28b..4e1b2f6df5e6 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -416,6 +416,10 @@ extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs);
extern void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs);
+extern int fat_truncate_time(struct inode *inode, struct timespec64 *now,
+ int flags);
+extern int fat_update_time(struct inode *inode, struct timespec64 *now,
+ int flags);
extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
int fat_cache_init(void);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 4f3d72fb1e60..13935ee99e1e 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -227,7 +227,7 @@ static int fat_cont_expand(struct inode *inode, loff_t size)
if (err)
goto out;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
int err2;
@@ -330,7 +330,7 @@ static int fat_free(struct inode *inode, int skip)
MSDOS_I(inode)->i_logstart = 0;
}
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
if (wait) {
err = fat_sync_inode(inode);
if (err) {
@@ -542,6 +542,18 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
up_write(&MSDOS_I(inode)->truncate_lock);
}
+ /*
+ * setattr_copy can't truncate these appropriately, so we'll
+ * copy them ourselves
+ */
+ if (attr->ia_valid & ATTR_ATIME)
+ fat_truncate_time(inode, &attr->ia_atime, S_ATIME);
+ if (attr->ia_valid & ATTR_CTIME)
+ fat_truncate_time(inode, &attr->ia_ctime, S_CTIME);
+ if (attr->ia_valid & ATTR_MTIME)
+ fat_truncate_time(inode, &attr->ia_mtime, S_MTIME);
+ attr->ia_valid &= ~(ATTR_ATIME|ATTR_CTIME|ATTR_MTIME);
+
setattr_copy(inode, attr);
mark_inode_dirty(inode);
out:
@@ -552,4 +564,5 @@ EXPORT_SYMBOL_GPL(fat_setattr);
const struct inode_operations fat_file_inode_operations = {
.setattr = fat_setattr,
.getattr = fat_getattr,
+ .update_time = fat_update_time,
};
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index d6b81e31f9f5..c0b5b5c3373b 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -244,7 +244,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
if (err < len)
fat_write_failed(mapping, pos + len);
if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
mark_inode_dirty(inode);
}
@@ -564,7 +564,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
de->cdate, de->ctime_cs);
fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
} else
- inode->i_ctime = inode->i_atime = inode->i_mtime;
+ fat_truncate_time(inode, &inode->i_mtime, S_ATIME|S_CTIME);
return 0;
}
@@ -1626,6 +1626,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
sb->s_magic = MSDOS_SUPER_MAGIC;
sb->s_op = &fat_sops;
sb->s_export_op = &fat_export_ops;
+ /*
+ * fat timestamps are complex and truncated by fat itself, so
+ * we set 1 here to be fast
+ */
+ sb->s_time_gran = 1;
mutex_init(&sbi->nfs_build_inode_lock);
ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 573836dcaefc..fce0a76f3f1e 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -7,6 +7,7 @@
*/
#include "fat.h"
+#include <linux/iversion.h>
/*
* fat_fs_error reports a file system problem that might indicate fa data
@@ -185,6 +186,13 @@ static long days_in_year[] = {
0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
};
+static inline int fat_tz_offset(struct msdos_sb_info *sbi)
+{
+ return (sbi->options.tz_set ?
+ -sbi->options.time_offset :
+ sys_tz.tz_minuteswest) * SECS_PER_MIN;
+}
+
/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs)
@@ -210,10 +218,7 @@ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
+ days_in_year[month] + day
+ DAYS_DELTA) * SECS_PER_DAY;
- if (!sbi->options.tz_set)
- second += sys_tz.tz_minuteswest * SECS_PER_MIN;
- else
- second -= sbi->options.time_offset * SECS_PER_MIN;
+ second += fat_tz_offset(sbi);
if (time_cs) {
ts->tv_sec = second + (time_cs / 100);
@@ -229,9 +234,7 @@ void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs)
{
struct tm tm;
- time64_to_tm(ts->tv_sec,
- (sbi->options.tz_set ? sbi->options.time_offset :
- -sys_tz.tz_minuteswest) * SECS_PER_MIN, &tm);
+ time64_to_tm(ts->tv_sec, -fat_tz_offset(sbi), &tm);
/* FAT can only support year between 1980 to 2107 */
if (tm.tm_year < 1980 - 1900) {
@@ -263,6 +266,80 @@ void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
}
EXPORT_SYMBOL_GPL(fat_time_unix2fat);
+static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts)
+{
+ return (struct timespec64){ ts.tv_sec & ~1ULL, 0 };
+}
+/*
+ * truncate the various times with appropriate granularity:
+ * root inode:
+ * all times always 0
+ * all other inodes:
+ * mtime - 2 seconds
+ * ctime
+ * msdos - 2 seconds
+ * vfat - 10 milliseconds
+ * atime - 24 hours (00:00:00 in local timezone)
+ */
+int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+ struct timespec64 ts;
+
+ if (inode->i_ino == MSDOS_ROOT_INO)
+ return 0;
+
+ if (now == NULL) {
+ now = &ts;
+ ts = current_time(inode);
+ }
+
+ if (flags & S_ATIME) {
+ /* to localtime */
+ time64_t seconds = now->tv_sec - fat_tz_offset(sbi);
+ s32 remainder;
+
+ div_s64_rem(seconds, SECS_PER_DAY, &remainder);
+ /* to day boundary, and back to unix time */
+ seconds = seconds + fat_tz_offset(sbi) - remainder;
+
+ inode->i_atime = (struct timespec64){ seconds, 0 };
+ }
+ if (flags & S_CTIME) {
+ if (sbi->options.isvfat)
+ inode->i_ctime = timespec64_trunc(*now, 10000000);
+ else
+ inode->i_ctime = fat_timespec64_trunc_2secs(*now);
+ }
+ if (flags & S_MTIME)
+ inode->i_mtime = fat_timespec64_trunc_2secs(*now);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fat_truncate_time);
+
+int fat_update_time(struct inode *inode, struct timespec64 *now, int flags)
+{
+ int iflags = I_DIRTY_TIME;
+ bool dirty = false;
+
+ if (inode->i_ino == MSDOS_ROOT_INO)
+ return 0;
+
+ fat_truncate_time(inode, now, flags);
+ if (flags & S_VERSION)
+ dirty = inode_maybe_inc_iversion(inode, false);
+ if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
+ !(inode->i_sb->s_flags & SB_LAZYTIME))
+ dirty = true;
+
+ if (dirty)
+ iflags |= I_DIRTY_SYNC;
+ __mark_inode_dirty(inode, iflags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fat_update_time);
+
int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
{
int i, err = 0;
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index efb8c40c9d27..f2cd365a4e86 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -250,7 +250,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
if (err)
return err;
- dir->i_ctime = dir->i_mtime = *ts;
+ fat_truncate_time(dir, ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
@@ -294,7 +294,7 @@ static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
err = PTR_ERR(inode);
goto out;
}
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -327,7 +327,7 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME);
fat_detach(inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -380,7 +380,7 @@ static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out;
}
set_nlink(inode, 2);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -413,7 +413,7 @@ static int msdos_unlink(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_CTIME);
fat_detach(inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -478,7 +478,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
mark_inode_dirty(old_inode);
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ fat_truncate_time(old_dir, NULL, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
@@ -538,7 +538,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
if (err)
goto error_dotdot;
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = ts;
+ fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
@@ -548,7 +548,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
- new_inode->i_ctime = ts;
+ fat_truncate_time(new_inode, &ts, S_CTIME);
}
out:
brelse(sinfo.bh);
@@ -637,6 +637,7 @@ static const struct inode_operations msdos_dir_inode_operations = {
.rename = msdos_rename,
.setattr = fat_setattr,
.getattr = fat_getattr,
+ .update_time = fat_update_time,
};
static void setup(struct super_block *sb)
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 82cd1e69cbdf..996c8c25e9c6 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -678,7 +678,7 @@ static int vfat_add_entry(struct inode *dir, const struct qstr *qname,
goto cleanup;
/* update timestamp */
- dir->i_ctime = dir->i_mtime = dir->i_atime = *ts;
+ fat_truncate_time(dir, ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
@@ -779,7 +779,7 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto out;
}
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -810,7 +810,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_ATIME|S_MTIME);
fat_detach(inode);
vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
@@ -836,7 +836,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ fat_truncate_time(inode, NULL, S_ATIME|S_MTIME);
fat_detach(inode);
vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
@@ -876,7 +876,7 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
}
inode_inc_iversion(inode);
set_nlink(inode, 2);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
+ fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -969,7 +969,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto error_dotdot;
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = ts;
+ fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
@@ -979,7 +979,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
- new_inode->i_ctime = ts;
+ fat_truncate_time(new_inode, &ts, S_CTIME);
}
out:
brelse(sinfo.bh);
@@ -1032,6 +1032,7 @@ static const struct inode_operations vfat_dir_inode_operations = {
.rename = vfat_rename,
.setattr = fat_setattr,
.getattr = fat_getattr,
+ .update_time = fat_update_time,
};
static void setup(struct super_block *sb)
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 60da84a86dab..f7b807bc1027 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
-fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o
+fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 0b694655d988..989df5accaee 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -107,7 +107,7 @@ static ssize_t fuse_conn_max_background_read(struct file *file,
if (!fc)
return 0;
- val = fc->max_background;
+ val = READ_ONCE(fc->max_background);
fuse_conn_put(fc);
return fuse_conn_limit_read(file, buf, len, ppos, val);
@@ -125,7 +125,12 @@ static ssize_t fuse_conn_max_background_write(struct file *file,
if (ret > 0) {
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (fc) {
+ spin_lock(&fc->bg_lock);
fc->max_background = val;
+ fc->blocked = fc->num_background >= fc->max_background;
+ if (!fc->blocked)
+ wake_up(&fc->blocked_waitq);
+ spin_unlock(&fc->bg_lock);
fuse_conn_put(fc);
}
}
@@ -144,7 +149,7 @@ static ssize_t fuse_conn_congestion_threshold_read(struct file *file,
if (!fc)
return 0;
- val = fc->congestion_threshold;
+ val = READ_ONCE(fc->congestion_threshold);
fuse_conn_put(fc);
return fuse_conn_limit_read(file, buf, len, ppos, val);
@@ -155,18 +160,31 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
size_t count, loff_t *ppos)
{
unsigned uninitialized_var(val);
+ struct fuse_conn *fc;
ssize_t ret;
ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
max_user_congthresh);
- if (ret > 0) {
- struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
- if (fc) {
- fc->congestion_threshold = val;
- fuse_conn_put(fc);
+ if (ret <= 0)
+ goto out;
+ fc = fuse_ctl_file_conn_get(file);
+ if (!fc)
+ goto out;
+
+ spin_lock(&fc->bg_lock);
+ fc->congestion_threshold = val;
+ if (fc->sb) {
+ if (fc->num_background < fc->congestion_threshold) {
+ clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
+ clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
+ } else {
+ set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
+ set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
}
}
-
+ spin_unlock(&fc->bg_lock);
+ fuse_conn_put(fc);
+out:
return ret;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 11ea2c4a38ab..ae813e609932 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -25,6 +25,10 @@
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
+/* Ordinary requests have even IDs, while interrupts IDs are odd */
+#define FUSE_INT_REQ_BIT (1ULL << 0)
+#define FUSE_REQ_ID_STEP (1ULL << 1)
+
static struct kmem_cache *fuse_req_cachep;
static struct fuse_dev *fuse_get_dev(struct file *file)
@@ -40,9 +44,6 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
struct fuse_page_desc *page_descs,
unsigned npages)
{
- memset(req, 0, sizeof(*req));
- memset(pages, 0, sizeof(*pages) * npages);
- memset(page_descs, 0, sizeof(*page_descs) * npages);
INIT_LIST_HEAD(&req->list);
INIT_LIST_HEAD(&req->intr_entry);
init_waitqueue_head(&req->waitq);
@@ -53,30 +54,36 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
__set_bit(FR_PENDING, &req->flags);
}
+static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags,
+ struct fuse_page_desc **desc)
+{
+ struct page **pages;
+
+ pages = kzalloc(npages * (sizeof(struct page *) +
+ sizeof(struct fuse_page_desc)), flags);
+ *desc = (void *) pages + npages * sizeof(struct page *);
+
+ return pages;
+}
+
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
{
- struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
+ struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
if (req) {
- struct page **pages;
- struct fuse_page_desc *page_descs;
-
- if (npages <= FUSE_REQ_INLINE_PAGES) {
+ struct page **pages = NULL;
+ struct fuse_page_desc *page_descs = NULL;
+
+ WARN_ON(npages > FUSE_MAX_MAX_PAGES);
+ if (npages > FUSE_REQ_INLINE_PAGES) {
+ pages = fuse_req_pages_alloc(npages, flags,
+ &page_descs);
+ if (!pages) {
+ kmem_cache_free(fuse_req_cachep, req);
+ return NULL;
+ }
+ } else if (npages) {
pages = req->inline_pages;
page_descs = req->inline_page_descs;
- } else {
- pages = kmalloc_array(npages, sizeof(struct page *),
- flags);
- page_descs =
- kmalloc_array(npages,
- sizeof(struct fuse_page_desc),
- flags);
- }
-
- if (!pages || !page_descs) {
- kfree(pages);
- kfree(page_descs);
- kmem_cache_free(fuse_req_cachep, req);
- return NULL;
}
fuse_request_init(req, pages, page_descs, npages);
@@ -95,12 +102,41 @@ struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
return __fuse_request_alloc(npages, GFP_NOFS);
}
-void fuse_request_free(struct fuse_req *req)
+static void fuse_req_pages_free(struct fuse_req *req)
{
- if (req->pages != req->inline_pages) {
+ if (req->pages != req->inline_pages)
kfree(req->pages);
- kfree(req->page_descs);
- }
+}
+
+bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
+ gfp_t flags)
+{
+ struct page **pages;
+ struct fuse_page_desc *page_descs;
+ unsigned int npages = min_t(unsigned int,
+ max_t(unsigned int, req->max_pages * 2,
+ FUSE_DEFAULT_MAX_PAGES_PER_REQ),
+ fc->max_pages);
+ WARN_ON(npages <= req->max_pages);
+
+ pages = fuse_req_pages_alloc(npages, flags, &page_descs);
+ if (!pages)
+ return false;
+
+ memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages);
+ memcpy(page_descs, req->page_descs,
+ sizeof(struct fuse_page_desc) * req->max_pages);
+ fuse_req_pages_free(req);
+ req->pages = pages;
+ req->page_descs = page_descs;
+ req->max_pages = npages;
+
+ return true;
+}
+
+void fuse_request_free(struct fuse_req *req)
+{
+ fuse_req_pages_free(req);
kmem_cache_free(fuse_req_cachep, req);
}
@@ -235,8 +271,10 @@ static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
struct file *file = req->stolen_file;
struct fuse_file *ff = file->private_data;
+ WARN_ON(req->max_pages);
spin_lock(&fc->lock);
- fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
+ memset(req, 0, sizeof(*req));
+ fuse_request_init(req, NULL, NULL, 0);
BUG_ON(ff->reserved_req);
ff->reserved_req = req;
wake_up_all(&fc->reserved_req_waitq);
@@ -287,10 +325,10 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
* We get here in the unlikely case that a background
* request was allocated but not sent
*/
- spin_lock(&fc->lock);
+ spin_lock(&fc->bg_lock);
if (!fc->blocked)
wake_up(&fc->blocked_waitq);
- spin_unlock(&fc->lock);
+ spin_unlock(&fc->bg_lock);
}
if (test_bit(FR_WAITING, &req->flags)) {
@@ -319,7 +357,13 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args)
static u64 fuse_get_unique(struct fuse_iqueue *fiq)
{
- return ++fiq->reqctr;
+ fiq->reqctr += FUSE_REQ_ID_STEP;
+ return fiq->reqctr;
+}
+
+static unsigned int fuse_req_hash(u64 unique)
+{
+ return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
}
static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
@@ -353,12 +397,13 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
static void flush_bg_queue(struct fuse_conn *fc)
{
+ struct fuse_iqueue *fiq = &fc->iq;
+
while (fc->active_background < fc->max_background &&
!list_empty(&fc->bg_queue)) {
struct fuse_req *req;
- struct fuse_iqueue *fiq = &fc->iq;
- req = list_entry(fc->bg_queue.next, struct fuse_req, list);
+ req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
spin_lock(&fiq->waitq.lock);
@@ -389,14 +434,21 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
if (test_bit(FR_BACKGROUND, &req->flags)) {
- spin_lock(&fc->lock);
+ spin_lock(&fc->bg_lock);
clear_bit(FR_BACKGROUND, &req->flags);
- if (fc->num_background == fc->max_background)
+ if (fc->num_background == fc->max_background) {
fc->blocked = 0;
-
- /* Wake up next waiter, if any */
- if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
wake_up(&fc->blocked_waitq);
+ } else if (!fc->blocked) {
+ /*
+ * Wake up next waiter, if any. It's okay to use
+ * waitqueue_active(), as we've already synced up
+ * fc->blocked with waiters with the wake_up() call
+ * above.
+ */
+ if (waitqueue_active(&fc->blocked_waitq))
+ wake_up(&fc->blocked_waitq);
+ }
if (fc->num_background == fc->congestion_threshold && fc->sb) {
clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
@@ -405,7 +457,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
fc->num_background--;
fc->active_background--;
flush_bg_queue(fc);
- spin_unlock(&fc->lock);
+ spin_unlock(&fc->bg_lock);
}
wake_up(&req->waitq);
if (req->end)
@@ -573,40 +625,38 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
return ret;
}
-/*
- * Called under fc->lock
- *
- * fc->connected must have been checked previously
- */
-void fuse_request_send_background_locked(struct fuse_conn *fc,
- struct fuse_req *req)
+bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
{
- BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
+ bool queued = false;
+
+ WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
if (!test_bit(FR_WAITING, &req->flags)) {
__set_bit(FR_WAITING, &req->flags);
atomic_inc(&fc->num_waiting);
}
__set_bit(FR_ISREPLY, &req->flags);
- fc->num_background++;
- if (fc->num_background == fc->max_background)
- fc->blocked = 1;
- if (fc->num_background == fc->congestion_threshold && fc->sb) {
- set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
- set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
+ spin_lock(&fc->bg_lock);
+ if (likely(fc->connected)) {
+ fc->num_background++;
+ if (fc->num_background == fc->max_background)
+ fc->blocked = 1;
+ if (fc->num_background == fc->congestion_threshold && fc->sb) {
+ set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
+ set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
+ }
+ list_add_tail(&req->list, &fc->bg_queue);
+ flush_bg_queue(fc);
+ queued = true;
}
- list_add_tail(&req->list, &fc->bg_queue);
- flush_bg_queue(fc);
+ spin_unlock(&fc->bg_lock);
+
+ return queued;
}
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
{
- BUG_ON(!req->end);
- spin_lock(&fc->lock);
- if (fc->connected) {
- fuse_request_send_background_locked(fc, req);
- spin_unlock(&fc->lock);
- } else {
- spin_unlock(&fc->lock);
+ WARN_ON(!req->end);
+ if (!fuse_request_queue_background(fc, req)) {
req->out.h.error = -ENOTCONN;
req->end(fc, req);
fuse_put_request(fc, req);
@@ -1084,12 +1134,11 @@ __releases(fiq->waitq.lock)
int err;
list_del_init(&req->intr_entry);
- req->intr_unique = fuse_get_unique(fiq);
memset(&ih, 0, sizeof(ih));
memset(&arg, 0, sizeof(arg));
ih.len = reqsize;
ih.opcode = FUSE_INTERRUPT;
- ih.unique = req->intr_unique;
+ ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
arg.unique = req->in.h.unique;
spin_unlock(&fiq->waitq.lock);
@@ -1238,6 +1287,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
struct fuse_req *req;
struct fuse_in *in;
unsigned reqsize;
+ unsigned int hash;
restart:
spin_lock(&fiq->waitq.lock);
@@ -1310,13 +1360,16 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
err = reqsize;
goto out_end;
}
- list_move_tail(&req->list, &fpq->processing);
- spin_unlock(&fpq->lock);
+ hash = fuse_req_hash(req->in.h.unique);
+ list_move_tail(&req->list, &fpq->processing[hash]);
+ __fuse_get_request(req);
set_bit(FR_SENT, &req->flags);
+ spin_unlock(&fpq->lock);
/* matches barrier in request_wait_answer() */
smp_mb__after_atomic();
if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fiq, req);
+ fuse_put_request(fc, req);
return reqsize;
@@ -1663,7 +1716,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
unsigned int num;
unsigned int offset;
size_t total_len = 0;
- int num_pages;
+ unsigned int num_pages;
offset = outarg->offset & ~PAGE_MASK;
file_size = i_size_read(inode);
@@ -1675,7 +1728,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
num = file_size - outarg->offset;
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
- num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
+ num_pages = min(num_pages, fc->max_pages);
req = fuse_get_req(fc, num_pages);
if (IS_ERR(req))
@@ -1792,10 +1845,11 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
/* Look up request on processing list by unique ID */
static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
{
+ unsigned int hash = fuse_req_hash(unique);
struct fuse_req *req;
- list_for_each_entry(req, &fpq->processing, list) {
- if (req->in.h.unique == unique || req->intr_unique == unique)
+ list_for_each_entry(req, &fpq->processing[hash], list) {
+ if (req->in.h.unique == unique)
return req;
}
return NULL;
@@ -1869,22 +1923,26 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
if (!fpq->connected)
goto err_unlock_pq;
- req = request_find(fpq, oh.unique);
+ req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
if (!req)
goto err_unlock_pq;
- /* Is it an interrupt reply? */
- if (req->intr_unique == oh.unique) {
+ /* Is it an interrupt reply ID? */
+ if (oh.unique & FUSE_INT_REQ_BIT) {
+ __fuse_get_request(req);
spin_unlock(&fpq->lock);
err = -EINVAL;
- if (nbytes != sizeof(struct fuse_out_header))
+ if (nbytes != sizeof(struct fuse_out_header)) {
+ fuse_put_request(fc, req);
goto err_finish;
+ }
if (oh.error == -ENOSYS)
fc->no_interrupt = 1;
else if (oh.error == -EAGAIN)
queue_interrupt(&fc->iq, req);
+ fuse_put_request(fc, req);
fuse_copy_finish(cs);
return nbytes;
@@ -2102,9 +2160,13 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
struct fuse_dev *fud;
struct fuse_req *req, *next;
LIST_HEAD(to_end);
+ unsigned int i;
+ /* Background queuing checks fc->connected under bg_lock */
+ spin_lock(&fc->bg_lock);
fc->connected = 0;
- fc->blocked = 0;
+ spin_unlock(&fc->bg_lock);
+
fc->aborted = is_abort;
fuse_set_initialized(fc);
list_for_each_entry(fud, &fc->devices, entry) {
@@ -2123,11 +2185,16 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
}
spin_unlock(&req->waitq.lock);
}
- list_splice_tail_init(&fpq->processing, &to_end);
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ list_splice_tail_init(&fpq->processing[i],
+ &to_end);
spin_unlock(&fpq->lock);
}
+ spin_lock(&fc->bg_lock);
+ fc->blocked = 0;
fc->max_background = UINT_MAX;
flush_bg_queue(fc);
+ spin_unlock(&fc->bg_lock);
spin_lock(&fiq->waitq.lock);
fiq->connected = 0;
@@ -2163,10 +2230,12 @@ int fuse_dev_release(struct inode *inode, struct file *file)
struct fuse_conn *fc = fud->fc;
struct fuse_pqueue *fpq = &fud->pq;
LIST_HEAD(to_end);
+ unsigned int i;
spin_lock(&fpq->lock);
WARN_ON(!list_empty(&fpq->io));
- list_splice_init(&fpq->processing, &to_end);
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ list_splice_init(&fpq->processing[i], &to_end);
spin_unlock(&fpq->lock);
end_requests(fc, &to_end);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 0979609d6eba..47395b0c3b35 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -14,24 +14,9 @@
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/xattr.h>
+#include <linux/iversion.h>
#include <linux/posix_acl.h>
-static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx)
-{
- struct fuse_conn *fc = get_fuse_conn(dir);
- struct fuse_inode *fi = get_fuse_inode(dir);
-
- if (!fc->do_readdirplus)
- return false;
- if (!fc->readdirplus_auto)
- return true;
- if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state))
- return true;
- if (ctx->pos == 0)
- return true;
- return false;
-}
-
static void fuse_advise_use_readdirplus(struct inode *dir)
{
struct fuse_inode *fi = get_fuse_inode(dir);
@@ -80,8 +65,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
* Set dentry and possibly attribute timeouts from the lookup/mk*
* replies
*/
-static void fuse_change_entry_timeout(struct dentry *entry,
- struct fuse_entry_out *o)
+void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
{
fuse_dentry_settime(entry,
time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
@@ -92,18 +76,29 @@ static u64 attr_timeout(struct fuse_attr_out *o)
return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
}
-static u64 entry_attr_timeout(struct fuse_entry_out *o)
+u64 entry_attr_timeout(struct fuse_entry_out *o)
{
return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
}
+static void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
+{
+ set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask);
+}
+
/*
* Mark the attributes as stale, so that at the next call to
* ->getattr() they will be fetched from userspace
*/
void fuse_invalidate_attr(struct inode *inode)
{
- get_fuse_inode(inode)->i_time = 0;
+ fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS);
+}
+
+static void fuse_dir_changed(struct inode *dir)
+{
+ fuse_invalidate_attr(dir);
+ inode_maybe_inc_iversion(dir, false);
}
/**
@@ -113,7 +108,7 @@ void fuse_invalidate_attr(struct inode *inode)
void fuse_invalidate_atime(struct inode *inode)
{
if (!IS_RDONLY(inode))
- fuse_invalidate_attr(inode);
+ fuse_invalidate_attr_mask(inode, STATX_ATIME);
}
/*
@@ -262,11 +257,6 @@ invalid:
goto out;
}
-static int invalid_nodeid(u64 nodeid)
-{
- return !nodeid || nodeid == FUSE_ROOT_ID;
-}
-
static int fuse_dentry_init(struct dentry *dentry)
{
dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry), GFP_KERNEL);
@@ -469,7 +459,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
kfree(forget);
d_instantiate(entry, inode);
fuse_change_entry_timeout(entry, &outentry);
- fuse_invalidate_attr(dir);
+ fuse_dir_changed(dir);
err = finish_open(file, entry, generic_file_open);
if (err) {
fuse_sync_release(ff, flags);
@@ -583,7 +573,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
} else {
fuse_change_entry_timeout(entry, &outarg);
}
- fuse_invalidate_attr(dir);
+ fuse_dir_changed(dir);
return 0;
out_put_forget_req:
@@ -693,7 +683,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
drop_nlink(inode);
spin_unlock(&fc->lock);
fuse_invalidate_attr(inode);
- fuse_invalidate_attr(dir);
+ fuse_dir_changed(dir);
fuse_invalidate_entry_cache(entry);
fuse_update_ctime(inode);
} else if (err == -EINTR)
@@ -715,7 +705,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
err = fuse_simple_request(fc, &args);
if (!err) {
clear_nlink(d_inode(entry));
- fuse_invalidate_attr(dir);
+ fuse_dir_changed(dir);
fuse_invalidate_entry_cache(entry);
} else if (err == -EINTR)
fuse_invalidate_entry(entry);
@@ -754,9 +744,9 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
fuse_update_ctime(d_inode(newent));
}
- fuse_invalidate_attr(olddir);
+ fuse_dir_changed(olddir);
if (olddir != newdir)
- fuse_invalidate_attr(newdir);
+ fuse_dir_changed(newdir);
/* newent will end up negative */
if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent)) {
@@ -932,7 +922,8 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
}
static int fuse_update_get_attr(struct inode *inode, struct file *file,
- struct kstat *stat, unsigned int flags)
+ struct kstat *stat, u32 request_mask,
+ unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
int err = 0;
@@ -942,6 +933,8 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
sync = true;
else if (flags & AT_STATX_DONT_SYNC)
sync = false;
+ else if (request_mask & READ_ONCE(fi->inval_mask))
+ sync = true;
else
sync = time_before64(fi->i_time, get_jiffies_64());
@@ -959,7 +952,9 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
int fuse_update_attributes(struct inode *inode, struct file *file)
{
- return fuse_update_get_attr(inode, file, NULL, 0);
+ /* Do *not* need to get atime for internal purposes */
+ return fuse_update_get_attr(inode, file, NULL,
+ STATX_BASIC_STATS & ~STATX_ATIME, 0);
}
int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
@@ -989,7 +984,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
if (!entry)
goto unlock;
- fuse_invalidate_attr(parent);
+ fuse_dir_changed(parent);
fuse_invalidate_entry(entry);
if (child_nodeid != 0 && d_really_is_positive(entry)) {
@@ -1165,271 +1160,78 @@ static int fuse_permission(struct inode *inode, int mask)
return err;
}
-static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
- struct dir_context *ctx)
-{
- while (nbytes >= FUSE_NAME_OFFSET) {
- struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
- size_t reclen = FUSE_DIRENT_SIZE(dirent);
- if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
- return -EIO;
- if (reclen > nbytes)
- break;
- if (memchr(dirent->name, '/', dirent->namelen) != NULL)
- return -EIO;
-
- if (!dir_emit(ctx, dirent->name, dirent->namelen,
- dirent->ino, dirent->type))
- break;
-
- buf += reclen;
- nbytes -= reclen;
- ctx->pos = dirent->off;
- }
-
- return 0;
-}
-
-static int fuse_direntplus_link(struct file *file,
- struct fuse_direntplus *direntplus,
- u64 attr_version)
-{
- struct fuse_entry_out *o = &direntplus->entry_out;
- struct fuse_dirent *dirent = &direntplus->dirent;
- struct dentry *parent = file->f_path.dentry;
- struct qstr name = QSTR_INIT(dirent->name, dirent->namelen);
- struct dentry *dentry;
- struct dentry *alias;
- struct inode *dir = d_inode(parent);
- struct fuse_conn *fc;
- struct inode *inode;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
-
- if (!o->nodeid) {
- /*
- * Unlike in the case of fuse_lookup, zero nodeid does not mean
- * ENOENT. Instead, it only means the userspace filesystem did
- * not want to return attributes/handle for this entry.
- *
- * So do nothing.
- */
- return 0;
- }
-
- if (name.name[0] == '.') {
- /*
- * We could potentially refresh the attributes of the directory
- * and its parent?
- */
- if (name.len == 1)
- return 0;
- if (name.name[1] == '.' && name.len == 2)
- return 0;
- }
-
- if (invalid_nodeid(o->nodeid))
- return -EIO;
- if (!fuse_valid_type(o->attr.mode))
- return -EIO;
-
- fc = get_fuse_conn(dir);
-
- name.hash = full_name_hash(parent, name.name, name.len);
- dentry = d_lookup(parent, &name);
- if (!dentry) {
-retry:
- dentry = d_alloc_parallel(parent, &name, &wq);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- }
- if (!d_in_lookup(dentry)) {
- struct fuse_inode *fi;
- inode = d_inode(dentry);
- if (!inode ||
- get_node_id(inode) != o->nodeid ||
- ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
- d_invalidate(dentry);
- dput(dentry);
- goto retry;
- }
- if (is_bad_inode(inode)) {
- dput(dentry);
- return -EIO;
- }
-
- fi = get_fuse_inode(inode);
- spin_lock(&fc->lock);
- fi->nlookup++;
- spin_unlock(&fc->lock);
-
- forget_all_cached_acls(inode);
- fuse_change_attributes(inode, &o->attr,
- entry_attr_timeout(o),
- attr_version);
- /*
- * The other branch comes via fuse_iget()
- * which bumps nlookup inside
- */
- } else {
- inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
- &o->attr, entry_attr_timeout(o),
- attr_version);
- if (!inode)
- inode = ERR_PTR(-ENOMEM);
-
- alias = d_splice_alias(inode, dentry);
- d_lookup_done(dentry);
- if (alias) {
- dput(dentry);
- dentry = alias;
- }
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- }
- if (fc->readdirplus_auto)
- set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
- fuse_change_entry_timeout(dentry, o);
-
- dput(dentry);
- return 0;
-}
-
-static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
- struct dir_context *ctx, u64 attr_version)
+static int fuse_readlink_page(struct inode *inode, struct page *page)
{
- struct fuse_direntplus *direntplus;
- struct fuse_dirent *dirent;
- size_t reclen;
- int over = 0;
- int ret;
-
- while (nbytes >= FUSE_NAME_OFFSET_DIRENTPLUS) {
- direntplus = (struct fuse_direntplus *) buf;
- dirent = &direntplus->dirent;
- reclen = FUSE_DIRENTPLUS_SIZE(direntplus);
-
- if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
- return -EIO;
- if (reclen > nbytes)
- break;
- if (memchr(dirent->name, '/', dirent->namelen) != NULL)
- return -EIO;
-
- if (!over) {
- /* We fill entries into dstbuf only as much as
- it can hold. But we still continue iterating
- over remaining entries to link them. If not,
- we need to send a FORGET for each of those
- which we did not link.
- */
- over = !dir_emit(ctx, dirent->name, dirent->namelen,
- dirent->ino, dirent->type);
- if (!over)
- ctx->pos = dirent->off;
- }
-
- buf += reclen;
- nbytes -= reclen;
-
- ret = fuse_direntplus_link(file, direntplus, attr_version);
- if (ret)
- fuse_force_forget(file, direntplus->entry_out.nodeid);
- }
-
- return 0;
-}
-
-static int fuse_readdir(struct file *file, struct dir_context *ctx)
-{
- int plus, err;
- size_t nbytes;
- struct page *page;
- struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req;
- u64 attr_version = 0;
- bool locked;
-
- if (is_bad_inode(inode))
- return -EIO;
+ int err;
req = fuse_get_req(fc, 1);
if (IS_ERR(req))
return PTR_ERR(req);
- page = alloc_page(GFP_KERNEL);
- if (!page) {
- fuse_put_request(fc, req);
- return -ENOMEM;
- }
-
- plus = fuse_use_readdirplus(inode, ctx);
+ req->out.page_zeroing = 1;
req->out.argpages = 1;
req->num_pages = 1;
req->pages[0] = page;
- req->page_descs[0].length = PAGE_SIZE;
- if (plus) {
- attr_version = fuse_get_attr_version(fc);
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIRPLUS);
- } else {
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIR);
- }
- locked = fuse_lock_inode(inode);
+ req->page_descs[0].length = PAGE_SIZE - 1;
+ req->in.h.opcode = FUSE_READLINK;
+ req->in.h.nodeid = get_node_id(inode);
+ req->out.argvar = 1;
+ req->out.numargs = 1;
+ req->out.args[0].size = PAGE_SIZE - 1;
fuse_request_send(fc, req);
- fuse_unlock_inode(inode, locked);
- nbytes = req->out.args[0].size;
err = req->out.h.error;
- fuse_put_request(fc, req);
+
if (!err) {
- if (plus) {
- err = parse_dirplusfile(page_address(page), nbytes,
- file, ctx,
- attr_version);
- } else {
- err = parse_dirfile(page_address(page), nbytes, file,
- ctx);
- }
+ char *link = page_address(page);
+ size_t len = req->out.args[0].size;
+
+ BUG_ON(len >= PAGE_SIZE);
+ link[len] = '\0';
}
- __free_page(page);
+ fuse_put_request(fc, req);
fuse_invalidate_atime(inode);
+
return err;
}
-static const char *fuse_get_link(struct dentry *dentry,
- struct inode *inode,
- struct delayed_call *done)
+static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- FUSE_ARGS(args);
- char *link;
- ssize_t ret;
+ struct page *page;
+ int err;
+
+ err = -EIO;
+ if (is_bad_inode(inode))
+ goto out_err;
+ if (fc->cache_symlinks)
+ return page_get_link(dentry, inode, callback);
+
+ err = -ECHILD;
if (!dentry)
- return ERR_PTR(-ECHILD);
+ goto out_err;
- link = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!link)
- return ERR_PTR(-ENOMEM);
+ page = alloc_page(GFP_KERNEL);
+ err = -ENOMEM;
+ if (!page)
+ goto out_err;
- args.in.h.opcode = FUSE_READLINK;
- args.in.h.nodeid = get_node_id(inode);
- args.out.argvar = 1;
- args.out.numargs = 1;
- args.out.args[0].size = PAGE_SIZE - 1;
- args.out.args[0].value = link;
- ret = fuse_simple_request(fc, &args);
- if (ret < 0) {
- kfree(link);
- link = ERR_PTR(ret);
- } else {
- link[ret] = '\0';
- set_delayed_call(done, kfree_link, link);
+ err = fuse_readlink_page(inode, page);
+ if (err) {
+ __free_page(page);
+ goto out_err;
}
- fuse_invalidate_atime(inode);
- return link;
+
+ set_delayed_call(callback, page_put_link, page);
+
+ return page_address(page);
+
+out_err:
+ return ERR_PTR(err);
}
static int fuse_dir_open(struct inode *inode, struct file *file)
@@ -1662,8 +1464,11 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
file = NULL;
}
- if (attr->ia_valid & ATTR_SIZE)
+ if (attr->ia_valid & ATTR_SIZE) {
+ if (WARN_ON(!S_ISREG(inode->i_mode)))
+ return -EIO;
is_truncate = true;
+ }
if (is_truncate) {
fuse_set_nowrite(inode);
@@ -1811,7 +1616,7 @@ static int fuse_getattr(const struct path *path, struct kstat *stat,
if (!fuse_allow_current_process(fc))
return -EACCES;
- return fuse_update_get_attr(inode, NULL, stat, flags);
+ return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
}
static const struct inode_operations fuse_dir_inode_operations = {
@@ -1867,11 +1672,37 @@ void fuse_init_common(struct inode *inode)
void fuse_init_dir(struct inode *inode)
{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
inode->i_op = &fuse_dir_inode_operations;
inode->i_fop = &fuse_dir_operations;
+
+ spin_lock_init(&fi->rdc.lock);
+ fi->rdc.cached = false;
+ fi->rdc.size = 0;
+ fi->rdc.pos = 0;
+ fi->rdc.version = 0;
}
+static int fuse_symlink_readpage(struct file *null, struct page *page)
+{
+ int err = fuse_readlink_page(page->mapping->host, page);
+
+ if (!err)
+ SetPageUptodate(page);
+
+ unlock_page(page);
+
+ return err;
+}
+
+static const struct address_space_operations fuse_symlink_aops = {
+ .readpage = fuse_symlink_readpage,
+};
+
void fuse_init_symlink(struct inode *inode)
{
inode->i_op = &fuse_symlink_inode_operations;
+ inode->i_data.a_ops = &fuse_symlink_aops;
+ inode_nohighmem(inode);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 32d0b883e74f..cc2121b37bf5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -59,6 +59,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
}
INIT_LIST_HEAD(&ff->write_entry);
+ mutex_init(&ff->readdir.lock);
refcount_set(&ff->count, 1);
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
@@ -73,6 +74,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
void fuse_file_free(struct fuse_file *ff)
{
fuse_request_free(ff->reserved_req);
+ mutex_destroy(&ff->readdir.lock);
kfree(ff);
}
@@ -848,11 +850,11 @@ static int fuse_readpages_fill(void *_data, struct page *page)
fuse_wait_on_page_writeback(inode, page->index);
if (req->num_pages &&
- (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
+ (req->num_pages == fc->max_pages ||
(req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
req->pages[req->num_pages - 1]->index + 1 != page->index)) {
- int nr_alloc = min_t(unsigned, data->nr_pages,
- FUSE_MAX_PAGES_PER_REQ);
+ unsigned int nr_alloc = min_t(unsigned int, data->nr_pages,
+ fc->max_pages);
fuse_send_readpages(req, data->file);
if (fc->async_read)
req = fuse_get_req_for_background(fc, nr_alloc);
@@ -887,7 +889,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_data data;
int err;
- int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ);
+ unsigned int nr_alloc = min_t(unsigned int, nr_pages, fc->max_pages);
err = -EIO;
if (is_bad_inode(inode))
@@ -1102,12 +1104,13 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
return count > 0 ? count : err;
}
-static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
+static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
+ unsigned int max_pages)
{
- return min_t(unsigned,
+ return min_t(unsigned int,
((pos + len - 1) >> PAGE_SHIFT) -
(pos >> PAGE_SHIFT) + 1,
- FUSE_MAX_PAGES_PER_REQ);
+ max_pages);
}
static ssize_t fuse_perform_write(struct kiocb *iocb,
@@ -1129,7 +1132,8 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
do {
struct fuse_req *req;
ssize_t count;
- unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
+ unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
+ fc->max_pages);
req = fuse_get_req(fc, nr_pages);
if (IS_ERR(req)) {
@@ -1271,7 +1275,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
ssize_t ret = 0;
/* Special case for kernel I/O: can copy directly into the buffer */
- if (ii->type & ITER_KVEC) {
+ if (iov_iter_is_kvec(ii)) {
unsigned long user_addr = fuse_get_user_addr(ii);
size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
@@ -1319,11 +1323,6 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
return ret < 0 ? ret : 0;
}
-static inline int fuse_iter_npages(const struct iov_iter *ii_p)
-{
- return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ);
-}
-
ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
loff_t *ppos, int flags)
{
@@ -1343,9 +1342,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
int err = 0;
if (io->async)
- req = fuse_get_req_for_background(fc, fuse_iter_npages(iter));
+ req = fuse_get_req_for_background(fc, iov_iter_npages(iter,
+ fc->max_pages));
else
- req = fuse_get_req(fc, fuse_iter_npages(iter));
+ req = fuse_get_req(fc, iov_iter_npages(iter, fc->max_pages));
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1390,9 +1390,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
fuse_put_request(fc, req);
if (io->async)
req = fuse_get_req_for_background(fc,
- fuse_iter_npages(iter));
+ iov_iter_npages(iter, fc->max_pages));
else
- req = fuse_get_req(fc, fuse_iter_npages(iter));
+ req = fuse_get_req(fc, iov_iter_npages(iter,
+ fc->max_pages));
if (IS_ERR(req))
break;
}
@@ -1418,7 +1419,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
res = fuse_direct_io(io, iter, ppos, 0);
- fuse_invalidate_attr(inode);
+ fuse_invalidate_atime(inode);
return res;
}
@@ -1487,6 +1488,7 @@ __acquires(fc->lock)
struct fuse_inode *fi = get_fuse_inode(req->inode);
struct fuse_write_in *inarg = &req->misc.write.in;
__u64 data_size = req->num_pages * PAGE_SIZE;
+ bool queued;
if (!fc->connected)
goto out_free;
@@ -1502,7 +1504,8 @@ __acquires(fc->lock)
req->in.args[1].size = inarg->size;
fi->writectr++;
- fuse_request_send_background_locked(fc, req);
+ queued = fuse_request_queue_background(fc, req);
+ WARN_ON(!queued);
return;
out_free:
@@ -1819,12 +1822,18 @@ static int fuse_writepages_fill(struct page *page,
is_writeback = fuse_page_is_writeback(inode, page->index);
if (req && req->num_pages &&
- (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
+ (is_writeback || req->num_pages == fc->max_pages ||
(req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
fuse_writepages_send(data);
data->req = NULL;
+ } else if (req && req->num_pages == req->max_pages) {
+ if (!fuse_req_realloc_pages(fc, req, GFP_NOFS)) {
+ fuse_writepages_send(data);
+ req = data->req = NULL;
+ }
}
+
err = -ENOMEM;
tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!tmp_page)
@@ -1847,7 +1856,7 @@ static int fuse_writepages_fill(struct page *page,
struct fuse_inode *fi = get_fuse_inode(inode);
err = -ENOMEM;
- req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
+ req = fuse_request_alloc_nofs(FUSE_REQ_INLINE_PAGES);
if (!req) {
__free_page(tmp_page);
goto out_unlock;
@@ -1904,6 +1913,7 @@ static int fuse_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_wb_data data;
int err;
@@ -1916,7 +1926,7 @@ static int fuse_writepages(struct address_space *mapping,
data.ff = NULL;
err = -ENOMEM;
- data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
+ data.orig_pages = kcalloc(fc->max_pages,
sizeof(struct page *),
GFP_NOFS);
if (!data.orig_pages)
@@ -2387,10 +2397,11 @@ static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
}
/* Make sure iov_length() won't overflow */
-static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
+static int fuse_verify_ioctl_iov(struct fuse_conn *fc, struct iovec *iov,
+ size_t count)
{
size_t n;
- u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
+ u32 max = fc->max_pages << PAGE_SHIFT;
for (n = 0; n < count; n++, iov++) {
if (iov->iov_len > (size_t) max)
@@ -2514,7 +2525,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
err = -ENOMEM;
- pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL);
+ pages = kcalloc(fc->max_pages, sizeof(pages[0]), GFP_KERNEL);
iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
if (!pages || !iov_page)
goto out;
@@ -2553,7 +2564,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
/* make sure there are enough buffer pages and init request with them */
err = -ENOMEM;
- if (max_pages > FUSE_MAX_PAGES_PER_REQ)
+ if (max_pages > fc->max_pages)
goto out;
while (num_pages < max_pages) {
pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
@@ -2640,11 +2651,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
in_iov = iov_page;
out_iov = in_iov + in_iovs;
- err = fuse_verify_ioctl_iov(in_iov, in_iovs);
+ err = fuse_verify_ioctl_iov(fc, in_iov, in_iovs);
if (err)
goto out;
- err = fuse_verify_ioctl_iov(out_iov, out_iovs);
+ err = fuse_verify_ioctl_iov(fc, out_iov, out_iovs);
if (err)
goto out;
@@ -2835,9 +2846,9 @@ static void fuse_do_truncate(struct file *file)
fuse_do_setattr(file_dentry(file), &attr, file);
}
-static inline loff_t fuse_round_up(loff_t off)
+static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
{
- return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
+ return round_up(off, fc->max_pages << PAGE_SHIFT);
}
static ssize_t
@@ -2866,7 +2877,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
if (offset >= i_size)
return 0;
- iov_iter_truncate(iter, fuse_round_up(i_size - offset));
+ iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
count = iov_iter_count(iter);
}
@@ -3011,6 +3022,82 @@ out:
return err;
}
+static ssize_t fuse_copy_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ size_t len, unsigned int flags)
+{
+ struct fuse_file *ff_in = file_in->private_data;
+ struct fuse_file *ff_out = file_out->private_data;
+ struct inode *inode_out = file_inode(file_out);
+ struct fuse_inode *fi_out = get_fuse_inode(inode_out);
+ struct fuse_conn *fc = ff_in->fc;
+ FUSE_ARGS(args);
+ struct fuse_copy_file_range_in inarg = {
+ .fh_in = ff_in->fh,
+ .off_in = pos_in,
+ .nodeid_out = ff_out->nodeid,
+ .fh_out = ff_out->fh,
+ .off_out = pos_out,
+ .len = len,
+ .flags = flags
+ };
+ struct fuse_write_out outarg;
+ ssize_t err;
+ /* mark unstable when write-back is not used, and file_out gets
+ * extended */
+ bool is_unstable = (!fc->writeback_cache) &&
+ ((pos_out + len) > inode_out->i_size);
+
+ if (fc->no_copy_file_range)
+ return -EOPNOTSUPP;
+
+ inode_lock(inode_out);
+
+ if (fc->writeback_cache) {
+ err = filemap_write_and_wait_range(inode_out->i_mapping,
+ pos_out, pos_out + len);
+ if (err)
+ goto out;
+
+ fuse_sync_writes(inode_out);
+ }
+
+ if (is_unstable)
+ set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
+
+ args.in.h.opcode = FUSE_COPY_FILE_RANGE;
+ args.in.h.nodeid = ff_in->nodeid;
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ args.out.numargs = 1;
+ args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
+ if (err == -ENOSYS) {
+ fc->no_copy_file_range = 1;
+ err = -EOPNOTSUPP;
+ }
+ if (err)
+ goto out;
+
+ if (fc->writeback_cache) {
+ fuse_write_update_size(inode_out, pos_out + outarg.size);
+ file_update_time(file_out);
+ }
+
+ fuse_invalidate_attr(inode_out);
+
+ err = outarg.size;
+out:
+ if (is_unstable)
+ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
+
+ inode_unlock(inode_out);
+
+ return err;
+}
+
static const struct file_operations fuse_file_operations = {
.llseek = fuse_file_llseek,
.read_iter = fuse_file_read_iter,
@@ -3027,6 +3114,7 @@ static const struct file_operations fuse_file_operations = {
.compat_ioctl = fuse_file_compat_ioctl,
.poll = fuse_file_poll,
.fallocate = fuse_file_fallocate,
+ .copy_file_range = fuse_copy_file_range,
};
static const struct file_operations fuse_direct_io_file_operations = {
@@ -3062,6 +3150,14 @@ static const struct address_space_operations fuse_file_aops = {
void fuse_init_file_inode(struct inode *inode)
{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
inode->i_fop = &fuse_file_operations;
inode->i_data.a_ops = &fuse_file_aops;
+
+ INIT_LIST_HEAD(&fi->write_files);
+ INIT_LIST_HEAD(&fi->queued_writes);
+ fi->writectr = 0;
+ init_waitqueue_head(&fi->page_waitq);
+ INIT_LIST_HEAD(&fi->writepages);
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index f78e9614bb5f..e9f712e81c7d 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -28,8 +28,11 @@
#include <linux/refcount.h>
#include <linux/user_namespace.h>
-/** Max number of pages that can be used in a single read request */
-#define FUSE_MAX_PAGES_PER_REQ 32
+/** Default max number of pages that can be used in a single read request */
+#define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32
+
+/** Maximum of max_pages received in init_out */
+#define FUSE_MAX_MAX_PAGES 256
/** Bias for fi->writectr, meaning new writepages must not be sent */
#define FUSE_NOWRITE INT_MIN
@@ -77,6 +80,9 @@ struct fuse_inode {
/** Time in jiffies until the file attributes are valid */
u64 i_time;
+ /* Which attributes are invalid */
+ u32 inval_mask;
+
/** The sticky bit in inode->i_mode may have been removed, so
preserve the original mode */
umode_t orig_i_mode;
@@ -87,21 +93,51 @@ struct fuse_inode {
/** Version of last attribute change */
u64 attr_version;
- /** Files usable in writepage. Protected by fc->lock */
- struct list_head write_files;
+ union {
+ /* Write related fields (regular file only) */
+ struct {
+ /* Files usable in writepage. Protected by fc->lock */
+ struct list_head write_files;
+
+ /* Writepages pending on truncate or fsync */
+ struct list_head queued_writes;
- /** Writepages pending on truncate or fsync */
- struct list_head queued_writes;
+ /* Number of sent writes, a negative bias
+ * (FUSE_NOWRITE) means more writes are blocked */
+ int writectr;
+
+ /* Waitq for writepage completion */
+ wait_queue_head_t page_waitq;
+
+ /* List of writepage requestst (pending or sent) */
+ struct list_head writepages;
+ };
+
+ /* readdir cache (directory only) */
+ struct {
+ /* true if fully cached */
+ bool cached;
- /** Number of sent writes, a negative bias (FUSE_NOWRITE)
- * means more writes are blocked */
- int writectr;
+ /* size of cache */
+ loff_t size;
- /** Waitq for writepage completion */
- wait_queue_head_t page_waitq;
+ /* position at end of cache (position of next entry) */
+ loff_t pos;
- /** List of writepage requestst (pending or sent) */
- struct list_head writepages;
+ /* version of the cache */
+ u64 version;
+
+ /* modification time of directory when cache was
+ * started */
+ struct timespec64 mtime;
+
+ /* iversion of directory when cache was started */
+ u64 iversion;
+
+ /* protects above fields */
+ spinlock_t lock;
+ } rdc;
+ };
/** Miscellaneous bits describing inode state */
unsigned long state;
@@ -148,6 +184,25 @@ struct fuse_file {
/** Entry on inode's write_files list */
struct list_head write_entry;
+ /* Readdir related */
+ struct {
+ /*
+ * Protects below fields against (crazy) parallel readdir on
+ * same open file. Uncontended in the normal case.
+ */
+ struct mutex lock;
+
+ /* Dir stream position */
+ loff_t pos;
+
+ /* Offset in cache */
+ loff_t cache_off;
+
+ /* Version of cache we are reading */
+ u64 version;
+
+ } readdir;
+
/** RB node to be linked on fuse_conn->polled_files */
struct rb_node polled_node;
@@ -311,9 +366,6 @@ struct fuse_req {
/** refcount */
refcount_t count;
- /** Unique ID for the interrupt request */
- u64 intr_unique;
-
/* Request flags, updated with test/set/clear_bit() */
unsigned long flags;
@@ -411,6 +463,9 @@ struct fuse_iqueue {
struct fasync_struct *fasync;
};
+#define FUSE_PQ_HASH_BITS 8
+#define FUSE_PQ_HASH_SIZE (1 << FUSE_PQ_HASH_BITS)
+
struct fuse_pqueue {
/** Connection established */
unsigned connected;
@@ -418,8 +473,8 @@ struct fuse_pqueue {
/** Lock protecting accessess to members of this structure */
spinlock_t lock;
- /** The list of requests being processed */
- struct list_head processing;
+ /** Hash table of requests being processed */
+ struct list_head *processing;
/** The list of requests under I/O */
struct list_head io;
@@ -476,6 +531,9 @@ struct fuse_conn {
/** Maximum write size */
unsigned max_write;
+ /** Maxmum number of pages that can be used in a single request */
+ unsigned int max_pages;
+
/** Input queue */
struct fuse_iqueue iq;
@@ -500,6 +558,10 @@ struct fuse_conn {
/** The list of background requests set aside for later queuing */
struct list_head bg_queue;
+ /** Protects: max_background, congestion_threshold, num_background,
+ * active_background, bg_queue, blocked */
+ spinlock_t bg_lock;
+
/** Flag indicating that INIT reply has been received. Allocating
* any fuse request will be suspended until the flag is set */
int initialized;
@@ -551,6 +613,9 @@ struct fuse_conn {
/** handle fs handles killing suid/sgid/cap on write/chown/trunc */
unsigned handle_killpriv:1;
+ /** cache READLINK responses in page cache */
+ unsigned cache_symlinks:1;
+
/*
* The following bitfields are only for optimization purposes
* and hence races in setting them will not cause malfunction
@@ -637,6 +702,9 @@ struct fuse_conn {
/** Allow other than the mounter user to access the filesystem ? */
unsigned allow_other:1;
+ /** Does the filesystem support copy_file_range? */
+ unsigned no_copy_file_range:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -697,6 +765,11 @@ static inline u64 get_node_id(struct inode *inode)
return get_fuse_inode(inode)->nodeid;
}
+static inline int invalid_nodeid(u64 nodeid)
+{
+ return !nodeid || nodeid == FUSE_ROOT_ID;
+}
+
/** Device operations */
extern const struct file_operations fuse_dev_operations;
@@ -812,6 +885,10 @@ struct fuse_req *fuse_request_alloc(unsigned npages);
struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
+bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
+ gfp_t flags);
+
+
/**
* Free a request
*/
@@ -856,9 +933,7 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args);
* Send a request in the background
*/
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
-
-void fuse_request_send_background_locked(struct fuse_conn *fc,
- struct fuse_req *req);
+bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req);
/* Abort all requests */
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
@@ -873,6 +948,9 @@ void fuse_invalidate_entry_cache(struct dentry *entry);
void fuse_invalidate_atime(struct inode *inode);
+u64 entry_attr_timeout(struct fuse_entry_out *o);
+void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o);
+
/**
* Acquire reference to fuse_conn
*/
@@ -992,4 +1070,8 @@ struct posix_acl;
struct posix_acl *fuse_get_acl(struct inode *inode, int type);
int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+
+/* readdir.c */
+int fuse_readdir(struct file *file, struct dir_context *ctx);
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index db9e60b7eb69..0b94b23b02d4 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -90,16 +90,12 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
fi = get_fuse_inode(inode);
fi->i_time = 0;
+ fi->inval_mask = 0;
fi->nodeid = 0;
fi->nlookup = 0;
fi->attr_version = 0;
- fi->writectr = 0;
fi->orig_ino = 0;
fi->state = 0;
- INIT_LIST_HEAD(&fi->write_files);
- INIT_LIST_HEAD(&fi->queued_writes);
- INIT_LIST_HEAD(&fi->writepages);
- init_waitqueue_head(&fi->page_waitq);
mutex_init(&fi->mutex);
fi->forget = fuse_alloc_forget();
if (!fi->forget) {
@@ -119,8 +115,10 @@ static void fuse_i_callback(struct rcu_head *head)
static void fuse_destroy_inode(struct inode *inode)
{
struct fuse_inode *fi = get_fuse_inode(inode);
- BUG_ON(!list_empty(&fi->write_files));
- BUG_ON(!list_empty(&fi->queued_writes));
+ if (S_ISREG(inode->i_mode)) {
+ WARN_ON(!list_empty(&fi->write_files));
+ WARN_ON(!list_empty(&fi->queued_writes));
+ }
mutex_destroy(&fi->mutex);
kfree(fi->forget);
call_rcu(&inode->i_rcu, fuse_i_callback);
@@ -167,6 +165,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
fi->attr_version = ++fc->attr_version;
fi->i_time = attr_valid;
+ WRITE_ONCE(fi->inval_mask, 0);
inode->i_ino = fuse_squash_ino(attr->ino);
inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
@@ -594,9 +593,11 @@ static void fuse_iqueue_init(struct fuse_iqueue *fiq)
static void fuse_pqueue_init(struct fuse_pqueue *fpq)
{
- memset(fpq, 0, sizeof(struct fuse_pqueue));
+ unsigned int i;
+
spin_lock_init(&fpq->lock);
- INIT_LIST_HEAD(&fpq->processing);
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&fpq->processing[i]);
INIT_LIST_HEAD(&fpq->io);
fpq->connected = 1;
}
@@ -605,6 +606,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
{
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
+ spin_lock_init(&fc->bg_lock);
init_rwsem(&fc->killsb);
refcount_set(&fc->count, 1);
atomic_set(&fc->dev_count, 1);
@@ -852,6 +854,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
sanitize_global_limit(&max_user_bgreq);
sanitize_global_limit(&max_user_congthresh);
+ spin_lock(&fc->bg_lock);
if (arg->max_background) {
fc->max_background = arg->max_background;
@@ -865,6 +868,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
fc->congestion_threshold > max_user_congthresh)
fc->congestion_threshold = max_user_congthresh;
}
+ spin_unlock(&fc->bg_lock);
}
static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
@@ -924,8 +928,15 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->posix_acl = 1;
fc->sb->s_xattr = fuse_acl_xattr_handlers;
}
+ if (arg->flags & FUSE_CACHE_SYMLINKS)
+ fc->cache_symlinks = 1;
if (arg->flags & FUSE_ABORT_ERROR)
fc->abort_err = 1;
+ if (arg->flags & FUSE_MAX_PAGES) {
+ fc->max_pages =
+ min_t(unsigned int, FUSE_MAX_MAX_PAGES,
+ max_t(unsigned int, arg->max_pages, 1));
+ }
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
@@ -957,7 +968,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
- FUSE_ABORT_ERROR;
+ FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -1022,17 +1033,26 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
{
struct fuse_dev *fud;
+ struct list_head *pq;
fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL);
- if (fud) {
- fud->fc = fuse_conn_get(fc);
- fuse_pqueue_init(&fud->pq);
+ if (!fud)
+ return NULL;
- spin_lock(&fc->lock);
- list_add_tail(&fud->entry, &fc->devices);
- spin_unlock(&fc->lock);
+ pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL);
+ if (!pq) {
+ kfree(fud);
+ return NULL;
}
+ fud->pq.processing = pq;
+ fud->fc = fuse_conn_get(fc);
+ fuse_pqueue_init(&fud->pq);
+
+ spin_lock(&fc->lock);
+ list_add_tail(&fud->entry, &fc->devices);
+ spin_unlock(&fc->lock);
+
return fud;
}
EXPORT_SYMBOL_GPL(fuse_dev_alloc);
@@ -1141,6 +1161,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fc->user_id = d.user_id;
fc->group_id = d.group_id;
fc->max_read = max_t(unsigned, 4096, d.max_read);
+ fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
/* Used by get_root_inode() */
sb->s_fs_info = fc;
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
new file mode 100644
index 000000000000..ab18b78f4755
--- /dev/null
+++ b/fs/fuse/readdir.c
@@ -0,0 +1,569 @@
+/*
+ FUSE: Filesystem in Userspace
+ Copyright (C) 2001-2018 Miklos Szeredi <miklos@szeredi.hu>
+
+ This program can be distributed under the terms of the GNU GPL.
+ See the file COPYING.
+*/
+
+
+#include "fuse_i.h"
+#include <linux/iversion.h>
+#include <linux/posix_acl.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+
+static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx)
+{
+ struct fuse_conn *fc = get_fuse_conn(dir);
+ struct fuse_inode *fi = get_fuse_inode(dir);
+
+ if (!fc->do_readdirplus)
+ return false;
+ if (!fc->readdirplus_auto)
+ return true;
+ if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state))
+ return true;
+ if (ctx->pos == 0)
+ return true;
+ return false;
+}
+
+static void fuse_add_dirent_to_cache(struct file *file,
+ struct fuse_dirent *dirent, loff_t pos)
+{
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
+ size_t reclen = FUSE_DIRENT_SIZE(dirent);
+ pgoff_t index;
+ struct page *page;
+ loff_t size;
+ u64 version;
+ unsigned int offset;
+ void *addr;
+
+ spin_lock(&fi->rdc.lock);
+ /*
+ * Is cache already completed? Or this entry does not go at the end of
+ * cache?
+ */
+ if (fi->rdc.cached || pos != fi->rdc.pos) {
+ spin_unlock(&fi->rdc.lock);
+ return;
+ }
+ version = fi->rdc.version;
+ size = fi->rdc.size;
+ offset = size & ~PAGE_MASK;
+ index = size >> PAGE_SHIFT;
+ /* Dirent doesn't fit in current page? Jump to next page. */
+ if (offset + reclen > PAGE_SIZE) {
+ index++;
+ offset = 0;
+ }
+ spin_unlock(&fi->rdc.lock);
+
+ if (offset) {
+ page = find_lock_page(file->f_mapping, index);
+ } else {
+ page = find_or_create_page(file->f_mapping, index,
+ mapping_gfp_mask(file->f_mapping));
+ }
+ if (!page)
+ return;
+
+ spin_lock(&fi->rdc.lock);
+ /* Raced with another readdir */
+ if (fi->rdc.version != version || fi->rdc.size != size ||
+ WARN_ON(fi->rdc.pos != pos))
+ goto unlock;
+
+ addr = kmap_atomic(page);
+ if (!offset)
+ clear_page(addr);
+ memcpy(addr + offset, dirent, reclen);
+ kunmap_atomic(addr);
+ fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen;
+ fi->rdc.pos = dirent->off;
+unlock:
+ spin_unlock(&fi->rdc.lock);
+ unlock_page(page);
+ put_page(page);
+}
+
+static void fuse_readdir_cache_end(struct file *file, loff_t pos)
+{
+ struct fuse_inode *fi = get_fuse_inode(file_inode(file));
+ loff_t end;
+
+ spin_lock(&fi->rdc.lock);
+ /* does cache end position match current position? */
+ if (fi->rdc.pos != pos) {
+ spin_unlock(&fi->rdc.lock);
+ return;
+ }
+
+ fi->rdc.cached = true;
+ end = ALIGN(fi->rdc.size, PAGE_SIZE);
+ spin_unlock(&fi->rdc.lock);
+
+ /* truncate unused tail of cache */
+ truncate_inode_pages(file->f_mapping, end);
+}
+
+static bool fuse_emit(struct file *file, struct dir_context *ctx,
+ struct fuse_dirent *dirent)
+{
+ struct fuse_file *ff = file->private_data;
+
+ if (ff->open_flags & FOPEN_CACHE_DIR)
+ fuse_add_dirent_to_cache(file, dirent, ctx->pos);
+
+ return dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino,
+ dirent->type);
+}
+
+static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
+ struct dir_context *ctx)
+{
+ while (nbytes >= FUSE_NAME_OFFSET) {
+ struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
+ size_t reclen = FUSE_DIRENT_SIZE(dirent);
+ if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
+ return -EIO;
+ if (reclen > nbytes)
+ break;
+ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
+ return -EIO;
+
+ if (!fuse_emit(file, ctx, dirent))
+ break;
+
+ buf += reclen;
+ nbytes -= reclen;
+ ctx->pos = dirent->off;
+ }
+
+ return 0;
+}
+
+static int fuse_direntplus_link(struct file *file,
+ struct fuse_direntplus *direntplus,
+ u64 attr_version)
+{
+ struct fuse_entry_out *o = &direntplus->entry_out;
+ struct fuse_dirent *dirent = &direntplus->dirent;
+ struct dentry *parent = file->f_path.dentry;
+ struct qstr name = QSTR_INIT(dirent->name, dirent->namelen);
+ struct dentry *dentry;
+ struct dentry *alias;
+ struct inode *dir = d_inode(parent);
+ struct fuse_conn *fc;
+ struct inode *inode;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ if (!o->nodeid) {
+ /*
+ * Unlike in the case of fuse_lookup, zero nodeid does not mean
+ * ENOENT. Instead, it only means the userspace filesystem did
+ * not want to return attributes/handle for this entry.
+ *
+ * So do nothing.
+ */
+ return 0;
+ }
+
+ if (name.name[0] == '.') {
+ /*
+ * We could potentially refresh the attributes of the directory
+ * and its parent?
+ */
+ if (name.len == 1)
+ return 0;
+ if (name.name[1] == '.' && name.len == 2)
+ return 0;
+ }
+
+ if (invalid_nodeid(o->nodeid))
+ return -EIO;
+ if (!fuse_valid_type(o->attr.mode))
+ return -EIO;
+
+ fc = get_fuse_conn(dir);
+
+ name.hash = full_name_hash(parent, name.name, name.len);
+ dentry = d_lookup(parent, &name);
+ if (!dentry) {
+retry:
+ dentry = d_alloc_parallel(parent, &name, &wq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ }
+ if (!d_in_lookup(dentry)) {
+ struct fuse_inode *fi;
+ inode = d_inode(dentry);
+ if (!inode ||
+ get_node_id(inode) != o->nodeid ||
+ ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
+ d_invalidate(dentry);
+ dput(dentry);
+ goto retry;
+ }
+ if (is_bad_inode(inode)) {
+ dput(dentry);
+ return -EIO;
+ }
+
+ fi = get_fuse_inode(inode);
+ spin_lock(&fc->lock);
+ fi->nlookup++;
+ spin_unlock(&fc->lock);
+
+ forget_all_cached_acls(inode);
+ fuse_change_attributes(inode, &o->attr,
+ entry_attr_timeout(o),
+ attr_version);
+ /*
+ * The other branch comes via fuse_iget()
+ * which bumps nlookup inside
+ */
+ } else {
+ inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
+ &o->attr, entry_attr_timeout(o),
+ attr_version);
+ if (!inode)
+ inode = ERR_PTR(-ENOMEM);
+
+ alias = d_splice_alias(inode, dentry);
+ d_lookup_done(dentry);
+ if (alias) {
+ dput(dentry);
+ dentry = alias;
+ }
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ }
+ if (fc->readdirplus_auto)
+ set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
+ fuse_change_entry_timeout(dentry, o);
+
+ dput(dentry);
+ return 0;
+}
+
+static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
+ struct dir_context *ctx, u64 attr_version)
+{
+ struct fuse_direntplus *direntplus;
+ struct fuse_dirent *dirent;
+ size_t reclen;
+ int over = 0;
+ int ret;
+
+ while (nbytes >= FUSE_NAME_OFFSET_DIRENTPLUS) {
+ direntplus = (struct fuse_direntplus *) buf;
+ dirent = &direntplus->dirent;
+ reclen = FUSE_DIRENTPLUS_SIZE(direntplus);
+
+ if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX)
+ return -EIO;
+ if (reclen > nbytes)
+ break;
+ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
+ return -EIO;
+
+ if (!over) {
+ /* We fill entries into dstbuf only as much as
+ it can hold. But we still continue iterating
+ over remaining entries to link them. If not,
+ we need to send a FORGET for each of those
+ which we did not link.
+ */
+ over = !fuse_emit(file, ctx, dirent);
+ if (!over)
+ ctx->pos = dirent->off;
+ }
+
+ buf += reclen;
+ nbytes -= reclen;
+
+ ret = fuse_direntplus_link(file, direntplus, attr_version);
+ if (ret)
+ fuse_force_forget(file, direntplus->entry_out.nodeid);
+ }
+
+ return 0;
+}
+
+static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
+{
+ int plus, err;
+ size_t nbytes;
+ struct page *page;
+ struct inode *inode = file_inode(file);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_req *req;
+ u64 attr_version = 0;
+ bool locked;
+
+ req = fuse_get_req(fc, 1);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ fuse_put_request(fc, req);
+ return -ENOMEM;
+ }
+
+ plus = fuse_use_readdirplus(inode, ctx);
+ req->out.argpages = 1;
+ req->num_pages = 1;
+ req->pages[0] = page;
+ req->page_descs[0].length = PAGE_SIZE;
+ if (plus) {
+ attr_version = fuse_get_attr_version(fc);
+ fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
+ FUSE_READDIRPLUS);
+ } else {
+ fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
+ FUSE_READDIR);
+ }
+ locked = fuse_lock_inode(inode);
+ fuse_request_send(fc, req);
+ fuse_unlock_inode(inode, locked);
+ nbytes = req->out.args[0].size;
+ err = req->out.h.error;
+ fuse_put_request(fc, req);
+ if (!err) {
+ if (!nbytes) {
+ struct fuse_file *ff = file->private_data;
+
+ if (ff->open_flags & FOPEN_CACHE_DIR)
+ fuse_readdir_cache_end(file, ctx->pos);
+ } else if (plus) {
+ err = parse_dirplusfile(page_address(page), nbytes,
+ file, ctx, attr_version);
+ } else {
+ err = parse_dirfile(page_address(page), nbytes, file,
+ ctx);
+ }
+ }
+
+ __free_page(page);
+ fuse_invalidate_atime(inode);
+ return err;
+}
+
+enum fuse_parse_result {
+ FOUND_ERR = -1,
+ FOUND_NONE = 0,
+ FOUND_SOME,
+ FOUND_ALL,
+};
+
+static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff,
+ void *addr, unsigned int size,
+ struct dir_context *ctx)
+{
+ unsigned int offset = ff->readdir.cache_off & ~PAGE_MASK;
+ enum fuse_parse_result res = FOUND_NONE;
+
+ WARN_ON(offset >= size);
+
+ for (;;) {
+ struct fuse_dirent *dirent = addr + offset;
+ unsigned int nbytes = size - offset;
+ size_t reclen = FUSE_DIRENT_SIZE(dirent);
+
+ if (nbytes < FUSE_NAME_OFFSET || !dirent->namelen)
+ break;
+
+ if (WARN_ON(dirent->namelen > FUSE_NAME_MAX))
+ return FOUND_ERR;
+ if (WARN_ON(reclen > nbytes))
+ return FOUND_ERR;
+ if (WARN_ON(memchr(dirent->name, '/', dirent->namelen) != NULL))
+ return FOUND_ERR;
+
+ if (ff->readdir.pos == ctx->pos) {
+ res = FOUND_SOME;
+ if (!dir_emit(ctx, dirent->name, dirent->namelen,
+ dirent->ino, dirent->type))
+ return FOUND_ALL;
+ ctx->pos = dirent->off;
+ }
+ ff->readdir.pos = dirent->off;
+ ff->readdir.cache_off += reclen;
+
+ offset += reclen;
+ }
+
+ return res;
+}
+
+static void fuse_rdc_reset(struct inode *inode)
+{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ fi->rdc.cached = false;
+ fi->rdc.version++;
+ fi->rdc.size = 0;
+ fi->rdc.pos = 0;
+}
+
+#define UNCACHED 1
+
+static int fuse_readdir_cached(struct file *file, struct dir_context *ctx)
+{
+ struct fuse_file *ff = file->private_data;
+ struct inode *inode = file_inode(file);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+ enum fuse_parse_result res;
+ pgoff_t index;
+ unsigned int size;
+ struct page *page;
+ void *addr;
+
+ /* Seeked? If so, reset the cache stream */
+ if (ff->readdir.pos != ctx->pos) {
+ ff->readdir.pos = 0;
+ ff->readdir.cache_off = 0;
+ }
+
+ /*
+ * We're just about to start reading into the cache or reading the
+ * cache; both cases require an up-to-date mtime value.
+ */
+ if (!ctx->pos && fc->auto_inval_data) {
+ int err = fuse_update_attributes(inode, file);
+
+ if (err)
+ return err;
+ }
+
+retry:
+ spin_lock(&fi->rdc.lock);
+retry_locked:
+ if (!fi->rdc.cached) {
+ /* Starting cache? Set cache mtime. */
+ if (!ctx->pos && !fi->rdc.size) {
+ fi->rdc.mtime = inode->i_mtime;
+ fi->rdc.iversion = inode_query_iversion(inode);
+ }
+ spin_unlock(&fi->rdc.lock);
+ return UNCACHED;
+ }
+ /*
+ * When at the beginning of the directory (i.e. just after opendir(3) or
+ * rewinddir(3)), then need to check whether directory contents have
+ * changed, and reset the cache if so.
+ */
+ if (!ctx->pos) {
+ if (inode_peek_iversion(inode) != fi->rdc.iversion ||
+ !timespec64_equal(&fi->rdc.mtime, &inode->i_mtime)) {
+ fuse_rdc_reset(inode);
+ goto retry_locked;
+ }
+ }
+
+ /*
+ * If cache version changed since the last getdents() call, then reset
+ * the cache stream.
+ */
+ if (ff->readdir.version != fi->rdc.version) {
+ ff->readdir.pos = 0;
+ ff->readdir.cache_off = 0;
+ }
+ /*
+ * If at the beginning of the cache, than reset version to
+ * current.
+ */
+ if (ff->readdir.pos == 0)
+ ff->readdir.version = fi->rdc.version;
+
+ WARN_ON(fi->rdc.size < ff->readdir.cache_off);
+
+ index = ff->readdir.cache_off >> PAGE_SHIFT;
+
+ if (index == (fi->rdc.size >> PAGE_SHIFT))
+ size = fi->rdc.size & ~PAGE_MASK;
+ else
+ size = PAGE_SIZE;
+ spin_unlock(&fi->rdc.lock);
+
+ /* EOF? */
+ if ((ff->readdir.cache_off & ~PAGE_MASK) == size)
+ return 0;
+
+ page = find_get_page_flags(file->f_mapping, index,
+ FGP_ACCESSED | FGP_LOCK);
+ spin_lock(&fi->rdc.lock);
+ if (!page) {
+ /*
+ * Uh-oh: page gone missing, cache is useless
+ */
+ if (fi->rdc.version == ff->readdir.version)
+ fuse_rdc_reset(inode);
+ goto retry_locked;
+ }
+
+ /* Make sure it's still the same version after getting the page. */
+ if (ff->readdir.version != fi->rdc.version) {
+ spin_unlock(&fi->rdc.lock);
+ unlock_page(page);
+ put_page(page);
+ goto retry;
+ }
+ spin_unlock(&fi->rdc.lock);
+
+ /*
+ * Contents of the page are now protected against changing by holding
+ * the page lock.
+ */
+ addr = kmap(page);
+ res = fuse_parse_cache(ff, addr, size, ctx);
+ kunmap(page);
+ unlock_page(page);
+ put_page(page);
+
+ if (res == FOUND_ERR)
+ return -EIO;
+
+ if (res == FOUND_ALL)
+ return 0;
+
+ if (size == PAGE_SIZE) {
+ /* We hit end of page: skip to next page. */
+ ff->readdir.cache_off = ALIGN(ff->readdir.cache_off, PAGE_SIZE);
+ goto retry;
+ }
+
+ /*
+ * End of cache reached. If found position, then we are done, otherwise
+ * need to fall back to uncached, since the position we were looking for
+ * wasn't in the cache.
+ */
+ return res == FOUND_SOME ? 0 : UNCACHED;
+}
+
+int fuse_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct fuse_file *ff = file->private_data;
+ struct inode *inode = file_inode(file);
+ int err;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+
+ mutex_lock(&ff->readdir.lock);
+
+ err = UNCACHED;
+ if (ff->open_flags & FOPEN_CACHE_DIR)
+ err = fuse_readdir_cached(file, ctx);
+ if (err == UNCACHED)
+ err = fuse_readdir_uncached(file, ctx);
+
+ mutex_unlock(&ff->readdir.lock);
+
+ return err;
+}
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 9a8772465a90..896396554bcc 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -425,6 +425,10 @@ skip:
if (new_node) {
__be32 cnid;
+ if (!new_node->parent) {
+ hfs_btree_inc_height(tree);
+ new_node->parent = tree->root;
+ }
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index key and entry */
hfs_bnode_read_key(new_node, fd->search_key, 14);
@@ -441,6 +445,7 @@ skip:
/* restore search_key */
hfs_bnode_read_key(node, fd->search_key, 14);
}
+ new_node = NULL;
}
if (!rec && node->parent)
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 374b5688e29e..98b96ffb95ed 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -220,25 +220,17 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
return node;
}
-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+/* Make sure @tree has enough space for the @rsvd_nodes */
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
{
- struct hfs_bnode *node, *next_node;
- struct page **pagep;
- u32 nidx, idx;
- unsigned off;
- u16 off16;
- u16 len;
- u8 *data, byte, m;
- int i;
-
- while (!tree->free_nodes) {
- struct inode *inode = tree->inode;
- u32 count;
- int res;
+ struct inode *inode = tree->inode;
+ u32 count;
+ int res;
+ while (tree->free_nodes < rsvd_nodes) {
res = hfs_extend_file(inode);
if (res)
- return ERR_PTR(res);
+ return res;
HFS_I(inode)->phys_size = inode->i_size =
(loff_t)HFS_I(inode)->alloc_blocks *
HFS_SB(tree->sb)->alloc_blksz;
@@ -246,9 +238,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
tree->sb->s_blocksize_bits;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
- tree->free_nodes = count - tree->node_count;
+ tree->free_nodes += count - tree->node_count;
tree->node_count = count;
}
+ return 0;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+ struct hfs_bnode *node, *next_node;
+ struct page **pagep;
+ u32 nidx, idx;
+ unsigned off;
+ u16 off16;
+ u16 len;
+ u8 *data, byte, m;
+ int i, res;
+
+ res = hfs_bmap_reserve(tree, 1);
+ if (res)
+ return ERR_PTR(res);
nidx = 0;
node = hfs_bnode_find(tree, nidx);
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index c8b252dbb26c..dcc2aab1b2c4 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -82,6 +82,7 @@ struct hfs_find_data {
extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp);
extern void hfs_btree_close(struct hfs_btree *);
extern void hfs_btree_write(struct hfs_btree *);
+extern int hfs_bmap_reserve(struct hfs_btree *, int);
extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *);
extern void hfs_bmap_free(struct hfs_bnode *node);
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 8a66405b0f8b..d365bf0b8c77 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -97,6 +97,14 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
+ if (err)
+ goto err2;
+
hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
HFS_CDR_THD : HFS_CDR_FTH,
@@ -295,6 +303,14 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
return err;
dst_fd = src_fd;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(src_fd.tree, 2 * src_fd.tree->depth);
+ if (err)
+ goto out;
+
/* find the old dir entry and read the data */
hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
err = hfs_brec_find(&src_fd);
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index 5d0182654580..263d5028d9d1 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -117,6 +117,10 @@ static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
if (res != -ENOENT)
return res;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
+ if (res)
+ return res;
hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
} else {
@@ -300,7 +304,7 @@ int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
return 0;
blocks = 0;
- for (i = 0; i < 3; extent++, i++)
+ for (i = 0; i < 3; i++)
blocks += be16_to_cpu(extent[i].count);
res = hfs_free_extents(sb, extent, blocks, blocks);
@@ -341,7 +345,9 @@ int hfs_get_block(struct inode *inode, sector_t block,
ablock = (u32)block / HFS_SB(sb)->fs_div;
if (block >= HFS_I(inode)->fs_blocks) {
- if (block > HFS_I(inode)->fs_blocks || !create)
+ if (!create)
+ return 0;
+ if (block > HFS_I(inode)->fs_blocks)
return -EIO;
if (ablock >= HFS_I(inode)->alloc_blocks) {
res = hfs_extend_file(inode);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a2dfa1b2a89c..da243c84e93b 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -642,6 +642,8 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
truncate_setsize(inode, attr->ia_size);
hfs_file_truncate(inode);
+ inode->i_atime = inode->i_mtime = inode->i_ctime =
+ current_time(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
index 2bab6b3cdba4..e6d554476db4 100644
--- a/fs/hfsplus/attributes.c
+++ b/fs/hfsplus/attributes.c
@@ -217,6 +217,11 @@ int hfsplus_create_attr(struct inode *inode,
if (err)
goto failed_init_create_attr;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1);
+ if (err)
+ goto failed_create_attr;
+
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
@@ -313,6 +318,11 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
if (err)
return err;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ err = hfs_bmap_reserve(fd.tree, fd.tree->depth);
+ if (err)
+ goto out;
+
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index ed8eacb34452..1918544a7871 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -429,6 +429,10 @@ skip:
if (new_node) {
__be32 cnid;
+ if (!new_node->parent) {
+ hfs_btree_inc_height(tree);
+ new_node->parent = tree->root;
+ }
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index key and entry */
hfs_bnode_read_key(new_node, fd->search_key, 14);
@@ -445,6 +449,7 @@ skip:
/* restore search_key */
hfs_bnode_read_key(node, fd->search_key, 14);
}
+ new_node = NULL;
}
if (!rec && node->parent)
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index de14b2b6881b..236efe51eca6 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -342,26 +342,21 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
return node;
}
-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+/* Make sure @tree has enough space for the @rsvd_nodes */
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
{
- struct hfs_bnode *node, *next_node;
- struct page **pagep;
- u32 nidx, idx;
- unsigned off;
- u16 off16;
- u16 len;
- u8 *data, byte, m;
- int i;
+ struct inode *inode = tree->inode;
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
+ u32 count;
+ int res;
- while (!tree->free_nodes) {
- struct inode *inode = tree->inode;
- struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
- u32 count;
- int res;
+ if (rsvd_nodes <= 0)
+ return 0;
+ while (tree->free_nodes < rsvd_nodes) {
res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
if (res)
- return ERR_PTR(res);
+ return res;
hip->phys_size = inode->i_size =
(loff_t)hip->alloc_blocks <<
HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
@@ -369,9 +364,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
- tree->free_nodes = count - tree->node_count;
+ tree->free_nodes += count - tree->node_count;
tree->node_count = count;
}
+ return 0;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+ struct hfs_bnode *node, *next_node;
+ struct page **pagep;
+ u32 nidx, idx;
+ unsigned off;
+ u16 off16;
+ u16 len;
+ u8 *data, byte, m;
+ int i, res;
+
+ res = hfs_bmap_reserve(tree, 1);
+ if (res)
+ return ERR_PTR(res);
nidx = 0;
node = hfs_bnode_find(tree, nidx);
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index a196369ba779..35472cba750e 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -265,6 +265,14 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
+ if (err)
+ goto err2;
+
hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry,
S_ISDIR(inode->i_mode) ?
@@ -333,6 +341,14 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2);
+ if (err)
+ goto out;
+
if (!str) {
int len;
@@ -433,6 +449,14 @@ int hfsplus_rename_cat(u32 cnid,
return err;
dst_fd = src_fd;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most twice.
+ */
+ err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1);
+ if (err)
+ goto out;
+
/* find the old dir entry and read the data */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
src_dir->i_ino, src_name);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 8e0f59767694..a930ddd15681 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -100,6 +100,10 @@ static int __hfsplus_ext_write_extent(struct inode *inode,
if (hip->extent_state & HFSPLUS_EXT_NEW) {
if (res != -ENOENT)
return res;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
+ if (res)
+ return res;
hfs_brec_insert(fd, hip->cached_extents,
sizeof(hfsplus_extent_rec));
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
@@ -233,7 +237,9 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
ablock = iblock >> sbi->fs_shift;
if (iblock >= hip->fs_blocks) {
- if (iblock > hip->fs_blocks || !create)
+ if (!create)
+ return 0;
+ if (iblock > hip->fs_blocks)
return -EIO;
if (ablock >= hip->alloc_blocks) {
res = hfsplus_file_extend(inode, false);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 8e039435958a..dd7ad9f13e3a 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -311,6 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
#define hfs_btree_open hfsplus_btree_open
#define hfs_btree_close hfsplus_btree_close
#define hfs_btree_write hfsplus_btree_write
+#define hfs_bmap_reserve hfsplus_bmap_reserve
#define hfs_bmap_alloc hfsplus_bmap_alloc
#define hfs_bmap_free hfsplus_bmap_free
#define hfs_bnode_read hfsplus_bnode_read
@@ -395,6 +396,7 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors,
struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id);
void hfs_btree_close(struct hfs_btree *tree);
int hfs_btree_write(struct hfs_btree *tree);
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes);
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
void hfs_bmap_free(struct hfs_bnode *node);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 8e9427a42b81..d7ab9d8c4b67 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -261,6 +261,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
}
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/inode.c b/fs/inode.c
index 9b808986d440..9e198f00b64c 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -10,7 +10,7 @@
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/cdev.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 2005529af560..d64f622cac8b 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -223,6 +223,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
u64 off, u64 olen, u64 destoff)
{
struct fd src_file = fdget(srcfd);
+ loff_t cloned;
int ret;
if (!src_file.file)
@@ -230,7 +231,14 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
ret = -EXDEV;
if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
goto fdput;
- ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
+ cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff,
+ olen, 0);
+ if (cloned < 0)
+ ret = cloned;
+ else if (olen && cloned != olen)
+ ret = -EINVAL;
+ else
+ ret = 0;
fdput:
fdput(src_file);
return ret;
@@ -669,6 +677,9 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
return ioctl_fiemap(filp, arg);
case FIGETBSZ:
+ /* anon_bdev filesystems may not have a block size */
+ if (!inode->i_sb->s_blocksize)
+ return -EINVAL;
return put_user(inode->i_sb->s_blocksize, argp);
case FICLONE:
diff --git a/fs/iomap.c b/fs/iomap.c
index 90c2febc93ac..64ce240217a1 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -30,7 +30,6 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/dax.h>
#include <linux/sched/signal.h>
-#include <linux/swap.h>
#include "internal.h"
@@ -1795,7 +1794,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (pos >= dio->i_size)
goto out_free_dio;
- if (iter->type == ITER_IOVEC)
+ if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
dio->flags |= IOMAP_DIO_DIRTY;
} else {
flags |= IOMAP_WRITE;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index d35cd6be0675..93fb7cf0b92b 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -341,7 +341,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
};
struct lockd_net *ln = net_generic(net, lockd_net_id);
- dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
+ dprintk("lockd: %s(host='%.*s', vers=%u, proto=%s)\n", __func__,
(int)hostname_len, hostname, rqstp->rq_vers,
(rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
diff --git a/fs/namespace.c b/fs/namespace.c
index d86830c86ce8..98d27da43304 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -23,7 +23,7 @@
#include <linux/uaccess.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/task_work.h>
#include <linux/sched/task.h>
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index 060c658eab66..a7d3df85736d 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -65,6 +65,7 @@ struct nfs_dns_ent {
struct sockaddr_storage addr;
size_t addrlen;
+ struct rcu_head rcu_head;
};
@@ -101,15 +102,23 @@ static void nfs_dns_ent_init(struct cache_head *cnew,
}
}
-static void nfs_dns_ent_put(struct kref *ref)
+static void nfs_dns_ent_free_rcu(struct rcu_head *head)
{
struct nfs_dns_ent *item;
- item = container_of(ref, struct nfs_dns_ent, h.ref);
+ item = container_of(head, struct nfs_dns_ent, rcu_head);
kfree(item->hostname);
kfree(item);
}
+static void nfs_dns_ent_put(struct kref *ref)
+{
+ struct nfs_dns_ent *item;
+
+ item = container_of(ref, struct nfs_dns_ent, h.ref);
+ call_rcu(&item->rcu_head, nfs_dns_ent_free_rcu);
+}
+
static struct cache_head *nfs_dns_ent_alloc(void)
{
struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL);
@@ -195,7 +204,7 @@ static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
{
struct cache_head *ch;
- ch = sunrpc_cache_lookup(cd,
+ ch = sunrpc_cache_lookup_rcu(cd,
&key->h,
nfs_dns_hash(key));
if (!ch)
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 4288a6ecaf75..46d691ba04bc 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -180,8 +180,9 @@ static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t
return nfs42_proc_allocate(filep, offset, len);
}
-static int nfs42_clone_file_range(struct file *src_file, loff_t src_off,
- struct file *dst_file, loff_t dst_off, u64 count)
+static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
+ struct file *dst_file, loff_t dst_off, loff_t count,
+ unsigned int remap_flags)
{
struct inode *dst_inode = file_inode(dst_file);
struct nfs_server *server = NFS_SERVER(dst_inode);
@@ -190,6 +191,9 @@ static int nfs42_clone_file_range(struct file *src_file, loff_t src_off,
bool same_inode = false;
int ret;
+ if (remap_flags & ~REMAP_FILE_ADVISORY)
+ return -EINVAL;
+
/* check alignment w.r.t. clone_blksize */
ret = -EINVAL;
if (bs) {
@@ -240,7 +244,7 @@ out_unlock:
inode_unlock(src_inode);
}
out:
- return ret;
+ return ret < 0 ? ret : count;
}
#endif /* CONFIG_NFS_V4_2 */
@@ -262,7 +266,7 @@ const struct file_operations nfs4_file_operations = {
.copy_file_range = nfs4_copy_file_range,
.llseek = nfs4_file_llseek,
.fallocate = nfs42_fallocate,
- .clone_file_range = nfs42_clone_file_range,
+ .remap_file_range = nfs42_remap_file_range,
#else
.llseek = nfs_file_llseek,
#endif
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index b7559c6f2b97..4a98537efb0f 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -19,18 +19,22 @@
* is much larger than a sockaddr_in6.
*/
struct svc_cacherep {
- struct list_head c_lru;
+ struct {
+ /* Keep often-read xid, csum in the same cache line: */
+ __be32 k_xid;
+ __wsum k_csum;
+ u32 k_proc;
+ u32 k_prot;
+ u32 k_vers;
+ unsigned int k_len;
+ struct sockaddr_in6 k_addr;
+ } c_key;
+ struct rb_node c_node;
+ struct list_head c_lru;
unsigned char c_state, /* unused, inprog, done */
c_type, /* status, buffer */
c_secure : 1; /* req came from port < 1024 */
- struct sockaddr_in6 c_addr;
- __be32 c_xid;
- u32 c_prot;
- u32 c_proc;
- u32 c_vers;
- unsigned int c_len;
- __wsum c_csum;
unsigned long c_timestamp;
union {
struct kvec u_vec;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index a1143f7c2201..802993d8912f 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -46,7 +46,7 @@ static void expkey_put(struct kref *ref)
!test_bit(CACHE_NEGATIVE, &key->h.flags))
path_put(&key->ek_path);
auth_domain_put(key->ek_client);
- kfree(key);
+ kfree_rcu(key, ek_rcu);
}
static void expkey_request(struct cache_detail *cd,
@@ -265,7 +265,7 @@ svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item)
struct cache_head *ch;
int hash = svc_expkey_hash(item);
- ch = sunrpc_cache_lookup(cd, &item->h, hash);
+ ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
if (ch)
return container_of(ch, struct svc_expkey, h);
else
@@ -314,7 +314,7 @@ static void svc_export_put(struct kref *ref)
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
kfree(exp->ex_uuid);
- kfree(exp);
+ kfree_rcu(exp, ex_rcu);
}
static void svc_export_request(struct cache_detail *cd,
@@ -780,7 +780,7 @@ svc_export_lookup(struct svc_export *exp)
struct cache_head *ch;
int hash = svc_export_hash(exp);
- ch = sunrpc_cache_lookup(exp->cd, &exp->h, hash);
+ ch = sunrpc_cache_lookup_rcu(exp->cd, &exp->h, hash);
if (ch)
return container_of(ch, struct svc_export, h);
else
@@ -1216,9 +1216,9 @@ static int e_show(struct seq_file *m, void *p)
}
const struct seq_operations nfs_exports_op = {
- .start = cache_seq_start,
- .next = cache_seq_next,
- .stop = cache_seq_stop,
+ .start = cache_seq_start_rcu,
+ .next = cache_seq_next_rcu,
+ .stop = cache_seq_stop_rcu,
.show = e_show,
};
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index c8b74126ddaa..e7daa1f246f0 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -61,6 +61,7 @@ struct svc_export {
u32 ex_layout_types;
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
+ struct rcu_head ex_rcu;
};
/* an "export key" (expkey) maps a filehandlefragement to an
@@ -75,6 +76,7 @@ struct svc_expkey {
u32 ek_fsid[6];
struct path ek_path;
+ struct rcu_head ek_rcu;
};
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 426f55005697..32cb8c027483 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -123,6 +123,14 @@ struct nfsd_net {
wait_queue_head_t ntf_wq;
atomic_t ntf_refcnt;
+
+ /*
+ * clientid and stateid data for construction of net unique COPY
+ * stateids.
+ */
+ u32 s2s_cp_cl_id;
+ struct idr s2s_cp_stateids;
+ spinlock_t s2s_cp_lock;
};
/* Simple check to find out if a given net was properly initialized */
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 601bf33c26a0..25987bcdf96f 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -39,6 +39,7 @@
#include "state.h"
#include "netns.h"
#include "xdr4cb.h"
+#include "xdr4.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -105,6 +106,7 @@ enum nfs_cb_opnum4 {
OP_CB_WANTS_CANCELLED = 12,
OP_CB_NOTIFY_LOCK = 13,
OP_CB_NOTIFY_DEVICEID = 14,
+ OP_CB_OFFLOAD = 15,
OP_CB_ILLEGAL = 10044
};
@@ -683,6 +685,101 @@ static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
}
/*
+ * struct write_response4 {
+ * stateid4 wr_callback_id<1>;
+ * length4 wr_count;
+ * stable_how4 wr_committed;
+ * verifier4 wr_writeverf;
+ * };
+ * union offload_info4 switch (nfsstat4 coa_status) {
+ * case NFS4_OK:
+ * write_response4 coa_resok4;
+ * default:
+ * length4 coa_bytes_copied;
+ * };
+ * struct CB_OFFLOAD4args {
+ * nfs_fh4 coa_fh;
+ * stateid4 coa_stateid;
+ * offload_info4 coa_offload_info;
+ * };
+ */
+static void encode_offload_info4(struct xdr_stream *xdr,
+ __be32 nfserr,
+ const struct nfsd4_copy *cp)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ *p++ = nfserr;
+ if (!nfserr) {
+ p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
+ p = xdr_encode_empty_array(p);
+ p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
+ *p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
+ p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
+ NFS4_VERIFIER_SIZE);
+ } else {
+ p = xdr_reserve_space(xdr, 8);
+ /* We always return success if bytes were written */
+ p = xdr_encode_hyper(p, 0);
+ }
+}
+
+static void encode_cb_offload4args(struct xdr_stream *xdr,
+ __be32 nfserr,
+ const struct knfsd_fh *fh,
+ const struct nfsd4_copy *cp,
+ struct nfs4_cb_compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ *p++ = cpu_to_be32(OP_CB_OFFLOAD);
+ encode_nfs_fh4(xdr, fh);
+ encode_stateid4(xdr, &cp->cp_res.cb_stateid);
+ encode_offload_info4(xdr, nfserr, cp);
+
+ hdr->nops++;
+}
+
+static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfsd4_callback *cb = data;
+ const struct nfsd4_copy *cp =
+ container_of(cb, struct nfsd4_copy, cp_cb);
+ struct nfs4_cb_compound_hdr hdr = {
+ .ident = 0,
+ .minorversion = cb->cb_clp->cl_minorversion,
+ };
+
+ encode_cb_compound4args(xdr, &hdr);
+ encode_cb_sequence4args(xdr, cb, &hdr);
+ encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr);
+ encode_cb_nops(&hdr);
+}
+
+static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfsd4_callback *cb = data;
+ struct nfs4_cb_compound_hdr hdr;
+ int status;
+
+ status = decode_cb_compound4res(xdr, &hdr);
+ if (unlikely(status))
+ return status;
+
+ if (cb) {
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
+ }
+ return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
+}
+/*
* RPC procedure tables
*/
#define PROC(proc, call, argtype, restype) \
@@ -703,6 +800,7 @@ static const struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout),
#endif
PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
+ PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
};
static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index a5bb76593ce7..bf137fec33ff 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -65,6 +65,7 @@ struct ent {
u32 id;
char name[IDMAP_NAMESZ];
char authname[IDMAP_NAMESZ];
+ struct rcu_head rcu_head;
};
/* Common entry handling */
@@ -89,7 +90,7 @@ static void
ent_put(struct kref *ref)
{
struct ent *map = container_of(ref, struct ent, h.ref);
- kfree(map);
+ kfree_rcu(map, rcu_head);
}
static struct cache_head *
@@ -264,8 +265,8 @@ out:
static struct ent *
idtoname_lookup(struct cache_detail *cd, struct ent *item)
{
- struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
- idtoname_hash(item));
+ struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h,
+ idtoname_hash(item));
if (ch)
return container_of(ch, struct ent, h);
else
@@ -422,8 +423,8 @@ out:
static struct ent *
nametoid_lookup(struct cache_detail *cd, struct ent *item)
{
- struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
- nametoid_hash(item));
+ struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h,
+ nametoid_hash(item));
if (ch)
return container_of(ch, struct ent, h);
else
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index b7bc6e1a85ac..edff074d38c7 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -36,6 +36,7 @@
#include <linux/file.h>
#include <linux/falloc.h>
#include <linux/slab.h>
+#include <linux/kthread.h>
#include "idmap.h"
#include "cache.h"
@@ -1089,36 +1090,254 @@ out:
return status;
}
+void nfs4_put_copy(struct nfsd4_copy *copy)
+{
+ if (!refcount_dec_and_test(&copy->refcount))
+ return;
+ kfree(copy);
+}
+
+static bool
+check_and_set_stop_copy(struct nfsd4_copy *copy)
+{
+ bool value;
+
+ spin_lock(&copy->cp_clp->async_lock);
+ value = copy->stopped;
+ if (!copy->stopped)
+ copy->stopped = true;
+ spin_unlock(&copy->cp_clp->async_lock);
+ return value;
+}
+
+static void nfsd4_stop_copy(struct nfsd4_copy *copy)
+{
+ /* only 1 thread should stop the copy */
+ if (!check_and_set_stop_copy(copy))
+ kthread_stop(copy->copy_task);
+ nfs4_put_copy(copy);
+}
+
+static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp)
+{
+ struct nfsd4_copy *copy = NULL;
+
+ spin_lock(&clp->async_lock);
+ if (!list_empty(&clp->async_copies)) {
+ copy = list_first_entry(&clp->async_copies, struct nfsd4_copy,
+ copies);
+ refcount_inc(&copy->refcount);
+ }
+ spin_unlock(&clp->async_lock);
+ return copy;
+}
+
+void nfsd4_shutdown_copy(struct nfs4_client *clp)
+{
+ struct nfsd4_copy *copy;
+
+ while ((copy = nfsd4_get_copy(clp)) != NULL)
+ nfsd4_stop_copy(copy);
+}
+
+static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
+{
+ struct nfsd4_copy *copy = container_of(cb, struct nfsd4_copy, cp_cb);
+
+ nfs4_put_copy(copy);
+}
+
+static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
+ struct rpc_task *task)
+{
+ return 1;
+}
+
+static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = {
+ .release = nfsd4_cb_offload_release,
+ .done = nfsd4_cb_offload_done
+};
+
+static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
+{
+ copy->cp_res.wr_stable_how = NFS_UNSTABLE;
+ copy->cp_synchronous = sync;
+ gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
+}
+
+static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
+{
+ ssize_t bytes_copied = 0;
+ size_t bytes_total = copy->cp_count;
+ u64 src_pos = copy->cp_src_pos;
+ u64 dst_pos = copy->cp_dst_pos;
+
+ do {
+ if (kthread_should_stop())
+ break;
+ bytes_copied = nfsd_copy_file_range(copy->file_src, src_pos,
+ copy->file_dst, dst_pos, bytes_total);
+ if (bytes_copied <= 0)
+ break;
+ bytes_total -= bytes_copied;
+ copy->cp_res.wr_bytes_written += bytes_copied;
+ src_pos += bytes_copied;
+ dst_pos += bytes_copied;
+ } while (bytes_total > 0 && !copy->cp_synchronous);
+ return bytes_copied;
+}
+
+static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
+{
+ __be32 status;
+ ssize_t bytes;
+
+ bytes = _nfsd_copy_file_range(copy);
+ /* for async copy, we ignore the error, client can always retry
+ * to get the error
+ */
+ if (bytes < 0 && !copy->cp_res.wr_bytes_written)
+ status = nfserrno(bytes);
+ else {
+ nfsd4_init_copy_res(copy, sync);
+ status = nfs_ok;
+ }
+
+ fput(copy->file_src);
+ fput(copy->file_dst);
+ return status;
+}
+
+static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
+{
+ dst->cp_src_pos = src->cp_src_pos;
+ dst->cp_dst_pos = src->cp_dst_pos;
+ dst->cp_count = src->cp_count;
+ dst->cp_synchronous = src->cp_synchronous;
+ memcpy(&dst->cp_res, &src->cp_res, sizeof(src->cp_res));
+ memcpy(&dst->fh, &src->fh, sizeof(src->fh));
+ dst->cp_clp = src->cp_clp;
+ dst->file_dst = get_file(src->file_dst);
+ dst->file_src = get_file(src->file_src);
+ memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid));
+}
+
+static void cleanup_async_copy(struct nfsd4_copy *copy)
+{
+ nfs4_free_cp_state(copy);
+ fput(copy->file_dst);
+ fput(copy->file_src);
+ spin_lock(&copy->cp_clp->async_lock);
+ list_del(&copy->copies);
+ spin_unlock(&copy->cp_clp->async_lock);
+ nfs4_put_copy(copy);
+}
+
+static int nfsd4_do_async_copy(void *data)
+{
+ struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
+ struct nfsd4_copy *cb_copy;
+
+ copy->nfserr = nfsd4_do_copy(copy, 0);
+ cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ if (!cb_copy)
+ goto out;
+ memcpy(&cb_copy->cp_res, &copy->cp_res, sizeof(copy->cp_res));
+ cb_copy->cp_clp = copy->cp_clp;
+ cb_copy->nfserr = copy->nfserr;
+ memcpy(&cb_copy->fh, &copy->fh, sizeof(copy->fh));
+ nfsd4_init_cb(&cb_copy->cp_cb, cb_copy->cp_clp,
+ &nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD);
+ nfsd4_run_cb(&cb_copy->cp_cb);
+out:
+ cleanup_async_copy(copy);
+ return 0;
+}
+
static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_copy *copy = &u->copy;
- struct file *src, *dst;
__be32 status;
- ssize_t bytes;
+ struct nfsd4_copy *async_copy = NULL;
- status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid, &src,
- &copy->cp_dst_stateid, &dst);
+ status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid,
+ &copy->file_src, &copy->cp_dst_stateid,
+ &copy->file_dst);
if (status)
goto out;
- bytes = nfsd_copy_file_range(src, copy->cp_src_pos,
- dst, copy->cp_dst_pos, copy->cp_count);
+ copy->cp_clp = cstate->clp;
+ memcpy(&copy->fh, &cstate->current_fh.fh_handle,
+ sizeof(struct knfsd_fh));
+ if (!copy->cp_synchronous) {
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- if (bytes < 0)
- status = nfserrno(bytes);
- else {
- copy->cp_res.wr_bytes_written = bytes;
- copy->cp_res.wr_stable_how = NFS_UNSTABLE;
- copy->cp_synchronous = 1;
- gen_boot_verifier(&copy->cp_res.wr_verifier, SVC_NET(rqstp));
+ status = nfserrno(-ENOMEM);
+ async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ if (!async_copy)
+ goto out;
+ if (!nfs4_init_cp_state(nn, copy)) {
+ kfree(async_copy);
+ goto out;
+ }
+ refcount_set(&async_copy->refcount, 1);
+ memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
+ sizeof(copy->cp_stateid));
+ dup_copy_fields(copy, async_copy);
+ async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
+ async_copy, "%s", "copy thread");
+ if (IS_ERR(async_copy->copy_task))
+ goto out_err;
+ spin_lock(&async_copy->cp_clp->async_lock);
+ list_add(&async_copy->copies,
+ &async_copy->cp_clp->async_copies);
+ spin_unlock(&async_copy->cp_clp->async_lock);
+ wake_up_process(async_copy->copy_task);
status = nfs_ok;
+ } else
+ status = nfsd4_do_copy(copy, 1);
+out:
+ return status;
+out_err:
+ cleanup_async_copy(async_copy);
+ goto out;
+}
+
+struct nfsd4_copy *
+find_async_copy(struct nfs4_client *clp, stateid_t *stateid)
+{
+ struct nfsd4_copy *copy;
+
+ spin_lock(&clp->async_lock);
+ list_for_each_entry(copy, &clp->async_copies, copies) {
+ if (memcmp(&copy->cp_stateid, stateid, NFS4_STATEID_SIZE))
+ continue;
+ refcount_inc(&copy->refcount);
+ spin_unlock(&clp->async_lock);
+ return copy;
}
+ spin_unlock(&clp->async_lock);
+ return NULL;
+}
+
+static __be32
+nfsd4_offload_cancel(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_offload_status *os = &u->offload_status;
+ __be32 status = 0;
+ struct nfsd4_copy *copy;
+ struct nfs4_client *clp = cstate->clp;
+
+ copy = find_async_copy(clp, &os->stateid);
+ if (copy)
+ nfsd4_stop_copy(copy);
+ else
+ status = nfserr_bad_stateid;
- fput(src);
- fput(dst);
-out:
return status;
}
@@ -1144,6 +1363,25 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fput(file);
return status;
}
+static __be32
+nfsd4_offload_status(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_offload_status *os = &u->offload_status;
+ __be32 status = 0;
+ struct nfsd4_copy *copy;
+ struct nfs4_client *clp = cstate->clp;
+
+ copy = find_async_copy(clp, &os->stateid);
+ if (copy) {
+ os->count = copy->cp_res.wr_bytes_written;
+ nfs4_put_copy(copy);
+ } else
+ status = nfserr_bad_stateid;
+
+ return status;
+}
static __be32
nfsd4_allocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -2047,6 +2285,14 @@ static inline u32 nfsd4_copy_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
1 /* cr_synchronous */) * sizeof(__be32);
}
+static inline u32 nfsd4_offload_status_rsize(struct svc_rqst *rqstp,
+ struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 2 /* osr_count */ +
+ 1 /* osr_complete<1> optional 0 for now */) * sizeof(__be32);
+}
+
#ifdef CONFIG_NFSD_PNFS
static inline u32 nfsd4_getdeviceinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
@@ -2460,6 +2706,17 @@ static const struct nfsd4_operation nfsd4_ops[] = {
.op_name = "OP_SEEK",
.op_rsize_bop = nfsd4_seek_rsize,
},
+ [OP_OFFLOAD_STATUS] = {
+ .op_func = nfsd4_offload_status,
+ .op_name = "OP_OFFLOAD_STATUS",
+ .op_rsize_bop = nfsd4_offload_status_rsize,
+ },
+ [OP_OFFLOAD_CANCEL] = {
+ .op_func = nfsd4_offload_cancel,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_OFFLOAD_CANCEL",
+ .op_rsize_bop = nfsd4_only_status_rsize,
+ },
};
/**
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b0ca0efd2875..f093fbe47133 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -713,6 +713,36 @@ out_free:
return NULL;
}
+/*
+ * Create a unique stateid_t to represent each COPY.
+ */
+int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
+{
+ int new_id;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&nn->s2s_cp_lock);
+ new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, copy, 0, 0, GFP_NOWAIT);
+ spin_unlock(&nn->s2s_cp_lock);
+ idr_preload_end();
+ if (new_id < 0)
+ return 0;
+ copy->cp_stateid.si_opaque.so_id = new_id;
+ copy->cp_stateid.si_opaque.so_clid.cl_boot = nn->boot_time;
+ copy->cp_stateid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
+ return 1;
+}
+
+void nfs4_free_cp_state(struct nfsd4_copy *copy)
+{
+ struct nfsd_net *nn;
+
+ nn = net_generic(copy->cp_clp->net, nfsd_net_id);
+ spin_lock(&nn->s2s_cp_lock);
+ idr_remove(&nn->s2s_cp_stateids, copy->cp_stateid.si_opaque.so_id);
+ spin_unlock(&nn->s2s_cp_lock);
+}
+
static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
{
struct nfs4_stid *stid;
@@ -1827,6 +1857,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
#ifdef CONFIG_NFSD_PNFS
INIT_LIST_HEAD(&clp->cl_lo_states);
#endif
+ INIT_LIST_HEAD(&clp->async_copies);
+ spin_lock_init(&clp->async_lock);
spin_lock_init(&clp->cl_lock);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
@@ -1942,6 +1974,7 @@ __destroy_client(struct nfs4_client *clp)
}
}
nfsd4_return_all_client_layouts(clp);
+ nfsd4_shutdown_copy(clp);
nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
@@ -2475,7 +2508,8 @@ static bool client_has_state(struct nfs4_client *clp)
|| !list_empty(&clp->cl_lo_states)
#endif
|| !list_empty(&clp->cl_delegations)
- || !list_empty(&clp->cl_sessions);
+ || !list_empty(&clp->cl_sessions)
+ || !list_empty(&clp->async_copies);
}
__be32
@@ -4364,7 +4398,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
if (!fl)
- goto out_stid;
+ goto out_clnt_odstate;
status = vfs_setlease(fp->fi_deleg_file, fl->fl_type, &fl, NULL);
if (fl)
@@ -4389,7 +4423,6 @@ out_unlock:
vfs_setlease(fp->fi_deleg_file, F_UNLCK, NULL, (void **)&dp);
out_clnt_odstate:
put_clnt_odstate(dp->dl_clnt_odstate);
-out_stid:
nfs4_put_stid(&dp->dl_stid);
out_delegees:
put_deleg_file(fp);
@@ -7161,6 +7194,8 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
spin_lock_init(&nn->client_lock);
+ spin_lock_init(&nn->s2s_cp_lock);
+ idr_init(&nn->s2s_cp_stateids);
spin_lock_init(&nn->blocked_locks_lock);
INIT_LIST_HEAD(&nn->blocked_locks_lru);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 418fa9c78186..3de42a729093 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1768,6 +1768,13 @@ nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
}
static __be32
+nfsd4_decode_offload_status(struct nfsd4_compoundargs *argp,
+ struct nfsd4_offload_status *os)
+{
+ return nfsd4_decode_stateid(argp, &os->stateid);
+}
+
+static __be32
nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
{
DECODE_HEAD;
@@ -1873,8 +1880,8 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
[OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTERROR] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTSTATS] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OFFLOAD_CANCEL] = (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OFFLOAD_STATUS] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_OFFLOAD_CANCEL] = (nfsd4_dec)nfsd4_decode_offload_status,
+ [OP_OFFLOAD_STATUS] = (nfsd4_dec)nfsd4_decode_offload_status,
[OP_READ_PLUS] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_SEEK] = (nfsd4_dec)nfsd4_decode_seek,
[OP_WRITE_SAME] = (nfsd4_dec)nfsd4_decode_notsupp,
@@ -4224,15 +4231,27 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
#endif /* CONFIG_NFSD_PNFS */
static __be32
-nfsd42_encode_write_res(struct nfsd4_compoundres *resp, struct nfsd42_write_res *write)
+nfsd42_encode_write_res(struct nfsd4_compoundres *resp,
+ struct nfsd42_write_res *write, bool sync)
{
__be32 *p;
+ p = xdr_reserve_space(&resp->xdr, 4);
+ if (!p)
+ return nfserr_resource;
- p = xdr_reserve_space(&resp->xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
+ if (sync)
+ *p++ = cpu_to_be32(0);
+ else {
+ __be32 nfserr;
+ *p++ = cpu_to_be32(1);
+ nfserr = nfsd4_encode_stateid(&resp->xdr, &write->cb_stateid);
+ if (nfserr)
+ return nfserr;
+ }
+ p = xdr_reserve_space(&resp->xdr, 8 + 4 + NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
- *p++ = cpu_to_be32(0);
p = xdr_encode_hyper(p, write->wr_bytes_written);
*p++ = cpu_to_be32(write->wr_stable_how);
p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
@@ -4246,7 +4265,8 @@ nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
{
__be32 *p;
- nfserr = nfsd42_encode_write_res(resp, &copy->cp_res);
+ nfserr = nfsd42_encode_write_res(resp, &copy->cp_res,
+ copy->cp_synchronous);
if (nfserr)
return nfserr;
@@ -4257,6 +4277,22 @@ nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
}
static __be32
+nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_offload_status *os)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 8 + 4);
+ if (!p)
+ return nfserr_resource;
+ p = xdr_encode_hyper(p, os->count);
+ *p++ = cpu_to_be32(0);
+
+ return nfserr;
+}
+
+static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_seek *seek)
{
@@ -4359,7 +4395,7 @@ static const nfsd4_enc nfsd4_enc_ops[] = {
[OP_LAYOUTERROR] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTSTATS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_OFFLOAD_CANCEL] = (nfsd4_enc)nfsd4_encode_noop,
- [OP_OFFLOAD_STATUS] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_OFFLOAD_STATUS] = (nfsd4_enc)nfsd4_encode_offload_status,
[OP_READ_PLUS] = (nfsd4_enc)nfsd4_encode_noop,
[OP_SEEK] = (nfsd4_enc)nfsd4_encode_seek,
[OP_WRITE_SAME] = (nfsd4_enc)nfsd4_encode_noop,
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index dbdeb9d6af03..e2fe0e9ce0df 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -30,6 +30,7 @@
#define TARGET_BUCKET_SIZE 64
struct nfsd_drc_bucket {
+ struct rb_root rb_head;
struct list_head lru_head;
spinlock_t cache_lock;
};
@@ -121,7 +122,7 @@ nfsd_cache_hash(__be32 xid)
}
static struct svc_cacherep *
-nfsd_reply_cache_alloc(void)
+nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum)
{
struct svc_cacherep *rp;
@@ -129,21 +130,35 @@ nfsd_reply_cache_alloc(void)
if (rp) {
rp->c_state = RC_UNUSED;
rp->c_type = RC_NOCACHE;
+ RB_CLEAR_NODE(&rp->c_node);
INIT_LIST_HEAD(&rp->c_lru);
+
+ memset(&rp->c_key, 0, sizeof(rp->c_key));
+ rp->c_key.k_xid = rqstp->rq_xid;
+ rp->c_key.k_proc = rqstp->rq_proc;
+ rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
+ rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
+ rp->c_key.k_prot = rqstp->rq_prot;
+ rp->c_key.k_vers = rqstp->rq_vers;
+ rp->c_key.k_len = rqstp->rq_arg.len;
+ rp->c_key.k_csum = csum;
}
return rp;
}
static void
-nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
+nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
{
if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
drc_mem_usage -= rp->c_replvec.iov_len;
kfree(rp->c_replvec.iov_base);
}
- list_del(&rp->c_lru);
- atomic_dec(&num_drc_entries);
- drc_mem_usage -= sizeof(*rp);
+ if (rp->c_state != RC_UNUSED) {
+ rb_erase(&rp->c_node, &b->rb_head);
+ list_del(&rp->c_lru);
+ atomic_dec(&num_drc_entries);
+ drc_mem_usage -= sizeof(*rp);
+ }
kmem_cache_free(drc_slab, rp);
}
@@ -151,7 +166,7 @@ static void
nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
{
spin_lock(&b->cache_lock);
- nfsd_reply_cache_free_locked(rp);
+ nfsd_reply_cache_free_locked(b, rp);
spin_unlock(&b->cache_lock);
}
@@ -207,7 +222,7 @@ void nfsd_reply_cache_shutdown(void)
struct list_head *head = &drc_hashtbl[i].lru_head;
while (!list_empty(head)) {
rp = list_first_entry(head, struct svc_cacherep, c_lru);
- nfsd_reply_cache_free_locked(rp);
+ nfsd_reply_cache_free_locked(&drc_hashtbl[i], rp);
}
}
@@ -246,7 +261,7 @@ prune_bucket(struct nfsd_drc_bucket *b)
if (atomic_read(&num_drc_entries) <= max_drc_entries &&
time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
break;
- nfsd_reply_cache_free_locked(rp);
+ nfsd_reply_cache_free_locked(b, rp);
freed++;
}
return freed;
@@ -318,51 +333,48 @@ nfsd_cache_csum(struct svc_rqst *rqstp)
return csum;
}
-static bool
-nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
+static int
+nfsd_cache_key_cmp(const struct svc_cacherep *key, const struct svc_cacherep *rp)
{
- /* Check RPC XID first */
- if (rqstp->rq_xid != rp->c_xid)
- return false;
- /* compare checksum of NFS data */
- if (csum != rp->c_csum) {
+ if (key->c_key.k_xid == rp->c_key.k_xid &&
+ key->c_key.k_csum != rp->c_key.k_csum)
++payload_misses;
- return false;
- }
- /* Other discriminators */
- if (rqstp->rq_proc != rp->c_proc ||
- rqstp->rq_prot != rp->c_prot ||
- rqstp->rq_vers != rp->c_vers ||
- rqstp->rq_arg.len != rp->c_len ||
- !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
- rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
- return false;
-
- return true;
+ return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
}
/*
* Search the request hash for an entry that matches the given rqstp.
* Must be called with cache_lock held. Returns the found entry or
- * NULL on failure.
+ * inserts an empty key on failure.
*/
static struct svc_cacherep *
-nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
- __wsum csum)
+nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key)
{
- struct svc_cacherep *rp, *ret = NULL;
- struct list_head *rh = &b->lru_head;
+ struct svc_cacherep *rp, *ret = key;
+ struct rb_node **p = &b->rb_head.rb_node,
+ *parent = NULL;
unsigned int entries = 0;
+ int cmp;
- list_for_each_entry(rp, rh, c_lru) {
+ while (*p != NULL) {
++entries;
- if (nfsd_cache_match(rqstp, csum, rp)) {
+ parent = *p;
+ rp = rb_entry(parent, struct svc_cacherep, c_node);
+
+ cmp = nfsd_cache_key_cmp(key, rp);
+ if (cmp < 0)
+ p = &parent->rb_left;
+ else if (cmp > 0)
+ p = &parent->rb_right;
+ else {
ret = rp;
- break;
+ goto out;
}
}
-
+ rb_link_node(&key->c_node, parent, p);
+ rb_insert_color(&key->c_node, &b->rb_head);
+out:
/* tally hash chain length stats */
if (entries > longest_chain) {
longest_chain = entries;
@@ -374,6 +386,7 @@ nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
atomic_read(&num_drc_entries));
}
+ lru_put_end(b, ret);
return ret;
}
@@ -389,9 +402,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
{
struct svc_cacherep *rp, *found;
__be32 xid = rqstp->rq_xid;
- u32 proto = rqstp->rq_prot,
- vers = rqstp->rq_vers,
- proc = rqstp->rq_proc;
__wsum csum;
u32 hash = nfsd_cache_hash(xid);
struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
@@ -410,60 +420,38 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
* Since the common case is a cache miss followed by an insert,
* preallocate an entry.
*/
- rp = nfsd_reply_cache_alloc();
- spin_lock(&b->cache_lock);
- if (likely(rp)) {
- atomic_inc(&num_drc_entries);
- drc_mem_usage += sizeof(*rp);
+ rp = nfsd_reply_cache_alloc(rqstp, csum);
+ if (!rp) {
+ dprintk("nfsd: unable to allocate DRC entry!\n");
+ return rtn;
}
- /* go ahead and prune the cache */
- prune_bucket(b);
-
- found = nfsd_cache_search(b, rqstp, csum);
- if (found) {
- if (likely(rp))
- nfsd_reply_cache_free_locked(rp);
+ spin_lock(&b->cache_lock);
+ found = nfsd_cache_insert(b, rp);
+ if (found != rp) {
+ nfsd_reply_cache_free_locked(NULL, rp);
rp = found;
goto found_entry;
}
- if (!rp) {
- dprintk("nfsd: unable to allocate DRC entry!\n");
- goto out;
- }
-
nfsdstats.rcmisses++;
rqstp->rq_cacherep = rp;
rp->c_state = RC_INPROG;
- rp->c_xid = xid;
- rp->c_proc = proc;
- rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
- rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
- rp->c_prot = proto;
- rp->c_vers = vers;
- rp->c_len = rqstp->rq_arg.len;
- rp->c_csum = csum;
- lru_put_end(b, rp);
+ atomic_inc(&num_drc_entries);
+ drc_mem_usage += sizeof(*rp);
- /* release any buffer */
- if (rp->c_type == RC_REPLBUFF) {
- drc_mem_usage -= rp->c_replvec.iov_len;
- kfree(rp->c_replvec.iov_base);
- rp->c_replvec.iov_base = NULL;
- }
- rp->c_type = RC_NOCACHE;
+ /* go ahead and prune the cache */
+ prune_bucket(b);
out:
spin_unlock(&b->cache_lock);
return rtn;
found_entry:
- nfsdstats.rchits++;
/* We found a matching entry which is either in progress or done. */
- lru_put_end(b, rp);
-
+ nfsdstats.rchits++;
rtn = RC_DROPIT;
+
/* Request being processed */
if (rp->c_state == RC_INPROG)
goto out;
@@ -489,7 +477,7 @@ found_entry:
break;
default:
printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
- nfsd_reply_cache_free_locked(rp);
+ nfsd_reply_cache_free_locked(b, rp);
}
goto out;
@@ -524,7 +512,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
if (!rp)
return;
- hash = nfsd_cache_hash(rp->c_xid);
+ hash = nfsd_cache_hash(rp->c_key.k_xid);
b = &drc_hashtbl[hash];
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7fb9f7c667b1..6384c9b94898 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1242,6 +1242,7 @@ static __net_init int nfsd_init_net(struct net *net)
nn->somebody_reclaimed = false;
nn->clverifier_counter = prandom_u32();
nn->clientid_counter = prandom_u32();
+ nn->s2s_cp_cl_id = nn->clientid_counter++;
atomic_set(&nn->ntf_refcnt, 0);
init_waitqueue_head(&nn->ntf_wq);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 0b15dac7e609..6aacb325b6a0 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -355,6 +355,8 @@ struct nfs4_client {
struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
/* wait here for slots */
struct net *net;
+ struct list_head async_copies; /* list of async copies */
+ spinlock_t async_lock; /* lock for async copies */
};
/* struct nfs4_client_reset
@@ -573,6 +575,7 @@ enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_NULL = 0,
NFSPROC4_CLNT_CB_RECALL,
NFSPROC4_CLNT_CB_LAYOUT,
+ NFSPROC4_CLNT_CB_OFFLOAD,
NFSPROC4_CLNT_CB_SEQUENCE,
NFSPROC4_CLNT_CB_NOTIFY_LOCK,
};
@@ -599,6 +602,7 @@ struct nfsd4_blocked_lock {
struct nfsd4_compound_state;
struct nfsd_net;
+struct nfsd4_copy;
extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
@@ -608,6 +612,8 @@ __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
struct nfs4_stid **s, struct nfsd_net *nn);
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
void (*sc_free)(struct nfs4_stid *));
+int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy);
+void nfs4_free_cp_state(struct nfsd4_copy *copy);
void nfs4_unhash_stid(struct nfs4_stid *s);
void nfs4_put_stid(struct nfs4_stid *s);
void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
@@ -626,6 +632,7 @@ extern void nfsd4_run_cb(struct nfsd4_callback *cb);
extern int nfsd4_create_callback_queue(void);
extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *);
+extern void nfsd4_shutdown_copy(struct nfs4_client *clp);
extern void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp);
extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name,
struct nfsd_net *nn);
@@ -633,6 +640,9 @@ extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn);
struct nfs4_file *find_file(struct knfsd_fh *fh);
void put_nfs4_file(struct nfs4_file *fi);
+extern void nfs4_put_copy(struct nfsd4_copy *copy);
+extern struct nfsd4_copy *
+find_async_copy(struct nfs4_client *clp, stateid_t *staetid);
static inline void get_nfs4_file(struct nfs4_file *fi)
{
refcount_inc(&fi->fi_ref);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index b53e76391e52..eb67098117b4 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -541,8 +541,12 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
u64 dst_pos, u64 count)
{
- return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
- count));
+ loff_t cloned;
+
+ cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
+ if (count && cloned != count)
+ cloned = -EINVAL;
+ return nfserrno(cloned < 0 ? cloned : 0);
}
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
@@ -923,7 +927,7 @@ __be32 nfsd_readv(struct svc_rqst *rqstp, struct svc_fh *fhp,
int host_err;
trace_nfsd_read_vector(rqstp, fhp, offset, *count);
- iov_iter_kvec(&iter, READ | ITER_KVEC, vec, vlen, *count);
+ iov_iter_kvec(&iter, READ, vec, vlen, *count);
host_err = vfs_iter_read(file, &iter, &offset, 0);
return nfsd_finish_read(rqstp, fhp, file, offset, count, host_err);
}
@@ -999,7 +1003,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
if (stable && !use_wgather)
flags |= RWF_SYNC;
- iov_iter_kvec(&iter, WRITE | ITER_KVEC, vec, vlen, *cnt);
+ iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
host_err = vfs_iter_write(file, &iter, &pos, flags);
if (host_err < 0)
goto out_nfserr;
@@ -1276,7 +1280,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
int type, dev_t rdev, struct svc_fh *resfhp)
{
struct dentry *dentry, *dchild = NULL;
- struct inode *dirp;
__be32 err;
int host_err;
@@ -1288,7 +1291,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
return err;
dentry = fhp->fh_dentry;
- dirp = d_inode(dentry);
host_err = fh_want_write(fhp);
if (host_err)
@@ -1409,6 +1411,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
*created = 1;
break;
}
+ /* fall through */
case NFS4_CREATE_EXCLUSIVE4_1:
if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime
&& d_inode(dchild)->i_atime.tv_sec == v_atime
@@ -1417,7 +1420,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
*created = 1;
goto set_attr;
}
- /* fallthru */
+ /* fall through */
case NFS3_CREATE_GUARDED:
err = nfserr_exist;
}
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 17c453a7999c..feeb6d4bdffd 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -511,6 +511,7 @@ struct nfsd42_write_res {
u64 wr_bytes_written;
u32 wr_stable_how;
nfs4_verifier wr_verifier;
+ stateid_t cb_stateid;
};
struct nfsd4_copy {
@@ -526,6 +527,23 @@ struct nfsd4_copy {
/* response */
struct nfsd42_write_res cp_res;
+
+ /* for cb_offload */
+ struct nfsd4_callback cp_cb;
+ __be32 nfserr;
+ struct knfsd_fh fh;
+
+ struct nfs4_client *cp_clp;
+
+ struct file *file_src;
+ struct file *file_dst;
+
+ stateid_t cp_stateid;
+
+ struct list_head copies;
+ struct task_struct *copy_task;
+ refcount_t refcount;
+ bool stopped;
};
struct nfsd4_seek {
@@ -539,6 +557,15 @@ struct nfsd4_seek {
loff_t seek_pos;
};
+struct nfsd4_offload_status {
+ /* request */
+ stateid_t stateid;
+
+ /* response */
+ u64 count;
+ u32 status;
+};
+
struct nfsd4_op {
int opnum;
const struct nfsd4_operation * opdesc;
@@ -597,6 +624,7 @@ struct nfsd4_op {
struct nfsd4_fallocate deallocate;
struct nfsd4_clone clone;
struct nfsd4_copy copy;
+ struct nfsd4_offload_status offload_status;
struct nfsd4_seek seek;
} u;
struct nfs4_replay * replay;
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index 517239af0302..547cf07cf4e0 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -38,3 +38,13 @@
#define NFS4_dec_cb_notify_lock_sz (cb_compound_dec_hdr_sz + \
cb_sequence_dec_sz + \
op_dec_sz)
+#define enc_cb_offload_info_sz (1 + 1 + 2 + 1 + \
+ XDR_QUADLEN(NFS4_VERIFIER_SIZE))
+#define NFS4_enc_cb_offload_sz (cb_compound_enc_hdr_sz + \
+ cb_sequence_enc_sz + \
+ enc_nfs4_fh_sz + \
+ enc_stateid_sz + \
+ enc_cb_offload_info_sz)
+#define NFS4_dec_cb_offload_sz (cb_compound_dec_hdr_sz + \
+ cb_sequence_dec_sz + \
+ op_dec_sz)
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 4690cd75d8d7..3986c7a1f6a8 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -312,7 +312,7 @@ static struct dentry *ntfs_get_parent(struct dentry *child_dent)
/* Get the mft record of the inode belonging to the child dentry. */
mrec = map_mft_record(ni);
if (IS_ERR(mrec))
- return (struct dentry *)mrec;
+ return ERR_CAST(mrec);
/* Find the first file name attribute in the mft record. */
ctx = ntfs_attr_get_search_ctx(ni, mrec);
if (unlikely(!ctx)) {
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 1d098c3c00e0..4ebbd57cbf84 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -99,25 +99,34 @@ out:
return ret;
}
+/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
+ * will be easier to handle read failure.
+ */
int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
unsigned int nr, struct buffer_head *bhs[])
{
int status = 0;
unsigned int i;
struct buffer_head *bh;
+ int new_bh = 0;
trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
if (!nr)
goto bail;
+ /* Don't put buffer head and re-assign it to NULL if it is allocated
+ * outside since the caller can't be aware of this alternation!
+ */
+ new_bh = (bhs[0] == NULL);
+
for (i = 0 ; i < nr ; i++) {
if (bhs[i] == NULL) {
bhs[i] = sb_getblk(osb->sb, block++);
if (bhs[i] == NULL) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ break;
}
}
bh = bhs[i];
@@ -158,9 +167,26 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
submit_bh(REQ_OP_READ, 0, bh);
}
+read_failure:
for (i = nr; i > 0; i--) {
bh = bhs[i - 1];
+ if (unlikely(status)) {
+ if (new_bh && bh) {
+ /* If middle bh fails, let previous bh
+ * finish its read and then put it to
+ * aovoid bh leak
+ */
+ if (!buffer_jbd(bh))
+ wait_on_buffer(bh);
+ put_bh(bh);
+ bhs[i - 1] = NULL;
+ } else if (bh && buffer_uptodate(bh)) {
+ clear_buffer_uptodate(bh);
+ }
+ continue;
+ }
+
/* No need to wait on the buffer if it's managed by JBD. */
if (!buffer_jbd(bh))
wait_on_buffer(bh);
@@ -170,8 +196,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
* so we can safely record this and loop back
* to cleanup the other buffers. */
status = -EIO;
- put_bh(bh);
- bhs[i - 1] = NULL;
+ goto read_failure;
}
}
@@ -179,6 +204,9 @@ bail:
return status;
}
+/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
+ * will be easier to handle read failure.
+ */
int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
@@ -188,6 +216,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
int i, ignore_cache = 0;
struct buffer_head *bh;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ int new_bh = 0;
trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
@@ -213,6 +242,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
goto bail;
}
+ /* Don't put buffer head and re-assign it to NULL if it is allocated
+ * outside since the caller can't be aware of this alternation!
+ */
+ new_bh = (bhs[0] == NULL);
+
ocfs2_metadata_cache_io_lock(ci);
for (i = 0 ; i < nr ; i++) {
if (bhs[i] == NULL) {
@@ -221,7 +255,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
ocfs2_metadata_cache_io_unlock(ci);
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ /* Don't forget to put previous bh! */
+ break;
}
}
bh = bhs[i];
@@ -316,16 +351,27 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
}
}
- status = 0;
-
+read_failure:
for (i = (nr - 1); i >= 0; i--) {
bh = bhs[i];
if (!(flags & OCFS2_BH_READAHEAD)) {
- if (status) {
- /* Clear the rest of the buffers on error */
- put_bh(bh);
- bhs[i] = NULL;
+ if (unlikely(status)) {
+ /* Clear the buffers on error including those
+ * ever succeeded in reading
+ */
+ if (new_bh && bh) {
+ /* If middle bh fails, let previous bh
+ * finish its read and then put it to
+ * aovoid bh leak
+ */
+ if (!buffer_jbd(bh))
+ wait_on_buffer(bh);
+ put_bh(bh);
+ bhs[i] = NULL;
+ } else if (bh && buffer_uptodate(bh)) {
+ clear_buffer_uptodate(bh);
+ }
continue;
}
/* We know this can't have changed as we hold the
@@ -343,9 +389,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
* uptodate. */
status = -EIO;
clear_buffer_needs_validate(bh);
- put_bh(bh);
- bhs[i] = NULL;
- continue;
+ goto read_failure;
}
if (buffer_needs_validate(bh)) {
@@ -355,11 +399,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
BUG_ON(buffer_jbd(bh));
clear_buffer_needs_validate(bh);
status = validate(sb, bh);
- if (status) {
- put_bh(bh);
- bhs[i] = NULL;
- continue;
- }
+ if (status)
+ goto read_failure;
}
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 7d9eea7d4a87..e9f236af1927 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -916,7 +916,7 @@ static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
{
struct kvec vec = { .iov_len = len, .iov_base = data, };
struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, len);
+ iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, len);
return sock_recvmsg(sock, &msg, MSG_DONTWAIT);
}
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index b048d4fa3959..c121abbdfc7d 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1897,8 +1897,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
/* On error, skip the f_pos to the
next block. */
ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
- brelse(bh);
- continue;
+ break;
}
if (le64_to_cpu(de->inode)) {
unsigned char d_type = DT_UNKNOWN;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 933aac5da193..7c835824247e 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2123,10 +2123,10 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
/* LVB only has room for 64 bits of time here so we pack it for
* now. */
-static u64 ocfs2_pack_timespec(struct timespec *spec)
+static u64 ocfs2_pack_timespec(struct timespec64 *spec)
{
u64 res;
- u64 sec = spec->tv_sec;
+ u64 sec = clamp_t(time64_t, spec->tv_sec, 0, 0x3ffffffffull);
u32 nsec = spec->tv_nsec;
res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
@@ -2142,7 +2142,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
- struct timespec ts;
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
@@ -2163,15 +2162,12 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
lvb->lvb_imode = cpu_to_be16(inode->i_mode);
lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
- ts = timespec64_to_timespec(inode->i_atime);
lvb->lvb_iatime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&ts));
- ts = timespec64_to_timespec(inode->i_ctime);
+ cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
lvb->lvb_ictime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&ts));
- ts = timespec64_to_timespec(inode->i_mtime);
+ cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
lvb->lvb_imtime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&ts));
+ cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
@@ -2180,7 +2176,7 @@ out:
mlog_meta_lvb(0, lockres);
}
-static void ocfs2_unpack_timespec(struct timespec *spec,
+static void ocfs2_unpack_timespec(struct timespec64 *spec,
u64 packed_time)
{
spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
@@ -2189,7 +2185,6 @@ static void ocfs2_unpack_timespec(struct timespec *spec,
static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
{
- struct timespec ts;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
@@ -2217,15 +2212,12 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
inode->i_mode = be16_to_cpu(lvb->lvb_imode);
set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
- ocfs2_unpack_timespec(&ts,
+ ocfs2_unpack_timespec(&inode->i_atime,
be64_to_cpu(lvb->lvb_iatime_packed));
- inode->i_atime = timespec_to_timespec64(ts);
- ocfs2_unpack_timespec(&ts,
+ ocfs2_unpack_timespec(&inode->i_mtime,
be64_to_cpu(lvb->lvb_imtime_packed));
- inode->i_mtime = timespec_to_timespec64(ts);
- ocfs2_unpack_timespec(&ts,
+ ocfs2_unpack_timespec(&inode->i_ctime,
be64_to_cpu(lvb->lvb_ictime_packed));
- inode->i_ctime = timespec_to_timespec64(ts);
spin_unlock(&oi->ip_lock);
}
@@ -3603,7 +3595,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
* we can recover correctly from node failure. Otherwise, we may get
* invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
*/
- if (!ocfs2_is_o2cb_active() &&
+ if (ocfs2_userspace_stack(osb) &&
lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
lvb = 1;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 9fa35cb6f6e0..d640c5f8a85d 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2343,7 +2343,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
written = __generic_file_write_iter(iocb, from);
/* buffered aio wouldn't have proper lock coverage today */
- BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
+ BUG_ON(written == -EIOCBQUEUED && !direct_io);
/*
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
@@ -2463,7 +2463,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
trace_generic_file_read_iter_ret(ret);
/* buffered aio wouldn't have proper lock coverage today */
- BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
+ BUG_ON(ret == -EIOCBQUEUED && !direct_io);
/* see ocfs2_file_write_iter */
if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
@@ -2527,24 +2527,79 @@ out:
return offset;
}
-static int ocfs2_file_clone_range(struct file *file_in,
- loff_t pos_in,
- struct file *file_out,
- loff_t pos_out,
- u64 len)
+static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags)
{
- return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
- len, false);
-}
+ struct inode *inode_in = file_inode(file_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
+ struct buffer_head *in_bh = NULL, *out_bh = NULL;
+ bool same_inode = (inode_in == inode_out);
+ loff_t remapped = 0;
+ ssize_t ret;
-static int ocfs2_file_dedupe_range(struct file *file_in,
- loff_t pos_in,
- struct file *file_out,
- loff_t pos_out,
- u64 len)
-{
- return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
- len, true);
+ if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+ return -EINVAL;
+ if (!ocfs2_refcount_tree(osb))
+ return -EOPNOTSUPP;
+ if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ return -EROFS;
+
+ /* Lock both files against IO */
+ ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
+ if (ret)
+ return ret;
+
+ /* Check file eligibility and prepare for block sharing. */
+ ret = -EINVAL;
+ if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
+ (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
+ goto out_unlock;
+
+ ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
+ &len, remap_flags);
+ if (ret < 0 || len == 0)
+ goto out_unlock;
+
+ /* Lock out changes to the allocation maps and remap. */
+ down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
+ if (!same_inode)
+ down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
+ SINGLE_DEPTH_NESTING);
+
+ /* Zap any page cache for the destination file's range. */
+ truncate_inode_pages_range(&inode_out->i_data,
+ round_down(pos_out, PAGE_SIZE),
+ round_up(pos_out + len, PAGE_SIZE) - 1);
+
+ remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
+ inode_out, out_bh, pos_out, len);
+ up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
+ if (!same_inode)
+ up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
+ if (remapped < 0) {
+ ret = remapped;
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ /*
+ * Empty the extent map so that we may get the right extent
+ * record from the disk.
+ */
+ ocfs2_extent_map_trunc(inode_in, 0);
+ ocfs2_extent_map_trunc(inode_out, 0);
+
+ ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+out_unlock:
+ ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
+ return remapped > 0 ? remapped : ret;
}
const struct inode_operations ocfs2_file_iops = {
@@ -2586,8 +2641,7 @@ const struct file_operations ocfs2_fops = {
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ocfs2_fallocate,
- .clone_file_range = ocfs2_file_clone_range,
- .dedupe_file_range = ocfs2_file_dedupe_range,
+ .remap_file_range = ocfs2_remap_file_range,
};
const struct file_operations ocfs2_dops = {
@@ -2633,8 +2687,7 @@ const struct file_operations ocfs2_fops_no_plocks = {
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ocfs2_fallocate,
- .clone_file_range = ocfs2_file_clone_range,
- .dedupe_file_range = ocfs2_file_dedupe_range,
+ .remap_file_range = ocfs2_remap_file_range,
};
const struct file_operations ocfs2_dops_no_plocks = {
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bd3475694e83..b63c97f4318e 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1378,15 +1378,23 @@ static int __ocfs2_recovery_thread(void *arg)
int rm_quota_used = 0, i;
struct ocfs2_quota_recovery *qrec;
+ /* Whether the quota supported. */
+ int quota_enabled = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
+ OCFS2_FEATURE_RO_COMPAT_USRQUOTA)
+ || OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
+ OCFS2_FEATURE_RO_COMPAT_GRPQUOTA);
+
status = ocfs2_wait_on_mount(osb);
if (status < 0) {
goto bail;
}
- rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
- if (!rm_quota) {
- status = -ENOMEM;
- goto bail;
+ if (quota_enabled) {
+ rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
+ if (!rm_quota) {
+ status = -ENOMEM;
+ goto bail;
+ }
}
restart:
status = ocfs2_super_lock(osb, 1);
@@ -1422,9 +1430,14 @@ restart:
* then quota usage would be out of sync until some node takes
* the slot. So we remember which nodes need quota recovery
* and when everything else is done, we recover quotas. */
- for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++);
- if (i == rm_quota_used)
- rm_quota[rm_quota_used++] = slot_num;
+ if (quota_enabled) {
+ for (i = 0; i < rm_quota_used
+ && rm_quota[i] != slot_num; i++)
+ ;
+
+ if (i == rm_quota_used)
+ rm_quota[rm_quota_used++] = slot_num;
+ }
status = ocfs2_recover_node(osb, node_num, slot_num);
skip_recovery:
@@ -1452,16 +1465,19 @@ skip_recovery:
/* Now it is right time to recover quotas... We have to do this under
* superblock lock so that no one can start using the slot (and crash)
* before we recover it */
- for (i = 0; i < rm_quota_used; i++) {
- qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
- if (IS_ERR(qrec)) {
- status = PTR_ERR(qrec);
- mlog_errno(status);
- continue;
+ if (quota_enabled) {
+ for (i = 0; i < rm_quota_used; i++) {
+ qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
+ if (IS_ERR(qrec)) {
+ status = PTR_ERR(qrec);
+ mlog_errno(status);
+ continue;
+ }
+ ocfs2_queue_recovery_completion(osb->journal,
+ rm_quota[i],
+ NULL, NULL, qrec,
+ ORPHAN_NEED_TRUNCATE);
}
- ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
- NULL, NULL, qrec,
- ORPHAN_NEED_TRUNCATE);
}
ocfs2_super_unlock(osb, 1);
@@ -1483,7 +1499,8 @@ bail:
mutex_unlock(&osb->recovery_lock);
- kfree(rm_quota);
+ if (quota_enabled)
+ kfree(rm_quota);
/* no one is callint kthread_stop() for us so the kthread() api
* requires that we call do_exit(). And it isn't exported, but
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 7eb3b0a6347e..3f1685d7d43b 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -25,6 +25,7 @@
#include "ocfs2_ioctl.h"
#include "alloc.h"
+#include "localalloc.h"
#include "aops.h"
#include "dlmglue.h"
#include "extent_map.h"
@@ -233,6 +234,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
struct ocfs2_refcount_tree *ref_tree = NULL;
u32 new_phys_cpos, new_len;
u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
+ int need_free = 0;
if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
BUG_ON(!ocfs2_is_refcount_inode(inode));
@@ -308,6 +310,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
if (!partial) {
context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
ret = -ENOSPC;
+ need_free = 1;
goto out_commit;
}
}
@@ -332,6 +335,20 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
mlog_errno(ret);
out_commit:
+ if (need_free && context->data_ac) {
+ struct ocfs2_alloc_context *data_ac = context->data_ac;
+
+ if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+ ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+ new_phys_cpos, new_len);
+ else
+ ocfs2_free_clusters(handle,
+ data_ac->ac_inode,
+ data_ac->ac_bh,
+ ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
+ new_len);
+ }
+
ocfs2_commit_trans(osb, handle);
out_unlock_mutex:
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 1114ef02e780..a35259eebc56 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4466,9 +4466,9 @@ out:
}
/* Update destination inode size, if necessary. */
-static int ocfs2_reflink_update_dest(struct inode *dest,
- struct buffer_head *d_bh,
- loff_t newlen)
+int ocfs2_reflink_update_dest(struct inode *dest,
+ struct buffer_head *d_bh,
+ loff_t newlen)
{
handle_t *handle;
int ret;
@@ -4505,14 +4505,14 @@ out_commit:
}
/* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */
-static int ocfs2_reflink_remap_extent(struct inode *s_inode,
- struct buffer_head *s_bh,
- loff_t pos_in,
- struct inode *t_inode,
- struct buffer_head *t_bh,
- loff_t pos_out,
- loff_t len,
- struct ocfs2_cached_dealloc_ctxt *dealloc)
+static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode,
+ struct buffer_head *s_bh,
+ loff_t pos_in,
+ struct inode *t_inode,
+ struct buffer_head *t_bh,
+ loff_t pos_out,
+ loff_t len,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
{
struct ocfs2_extent_tree s_et;
struct ocfs2_extent_tree t_et;
@@ -4520,8 +4520,9 @@ static int ocfs2_reflink_remap_extent(struct inode *s_inode,
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_tree *ref_tree;
struct ocfs2_super *osb;
+ loff_t remapped_bytes = 0;
loff_t pstart, plen;
- u32 p_cluster, num_clusters, slast, spos, tpos;
+ u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0;
unsigned int ext_flags;
int ret = 0;
@@ -4603,30 +4604,34 @@ static int ocfs2_reflink_remap_extent(struct inode *s_inode,
next_loop:
spos += num_clusters;
tpos += num_clusters;
+ remapped_clus += num_clusters;
}
-out:
- return ret;
+ goto out;
out_unlock_refcount:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
- return ret;
+out:
+ remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus);
+ remapped_bytes = min_t(loff_t, len, remapped_bytes);
+
+ return remapped_bytes > 0 ? remapped_bytes : ret;
}
/* Set up refcount tree and remap s_inode to t_inode. */
-static int ocfs2_reflink_remap_blocks(struct inode *s_inode,
- struct buffer_head *s_bh,
- loff_t pos_in,
- struct inode *t_inode,
- struct buffer_head *t_bh,
- loff_t pos_out,
- loff_t len)
+loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
+ struct buffer_head *s_bh,
+ loff_t pos_in,
+ struct inode *t_inode,
+ struct buffer_head *t_bh,
+ loff_t pos_out,
+ loff_t len)
{
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_super *osb;
struct ocfs2_dinode *dis;
struct ocfs2_dinode *dit;
- int ret;
+ loff_t ret;
osb = OCFS2_SB(s_inode->i_sb);
dis = (struct ocfs2_dinode *)s_bh->b_data;
@@ -4698,7 +4703,7 @@ static int ocfs2_reflink_remap_blocks(struct inode *s_inode,
/* Actually remap extents now. */
ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh,
pos_out, len, &dealloc);
- if (ret) {
+ if (ret < 0) {
mlog_errno(ret);
goto out;
}
@@ -4713,10 +4718,10 @@ out:
}
/* Lock an inode and grab a bh pointing to the inode. */
-static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
- struct buffer_head **bh1,
- struct inode *t_inode,
- struct buffer_head **bh2)
+int ocfs2_reflink_inodes_lock(struct inode *s_inode,
+ struct buffer_head **bh1,
+ struct inode *t_inode,
+ struct buffer_head **bh2)
{
struct inode *inode1;
struct inode *inode2;
@@ -4801,10 +4806,10 @@ out_i1:
}
/* Unlock both inodes and release buffers. */
-static void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
- struct buffer_head *s_bh,
- struct inode *t_inode,
- struct buffer_head *t_bh)
+void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
+ struct buffer_head *s_bh,
+ struct inode *t_inode,
+ struct buffer_head *t_bh)
{
ocfs2_inode_unlock(s_inode, 1);
ocfs2_rw_unlock(s_inode, 1);
@@ -4816,82 +4821,3 @@ static void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
}
unlock_two_nondirectories(s_inode, t_inode);
}
-
-/* Link a range of blocks from one file to another. */
-int ocfs2_reflink_remap_range(struct file *file_in,
- loff_t pos_in,
- struct file *file_out,
- loff_t pos_out,
- u64 len,
- bool is_dedupe)
-{
- struct inode *inode_in = file_inode(file_in);
- struct inode *inode_out = file_inode(file_out);
- struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
- struct buffer_head *in_bh = NULL, *out_bh = NULL;
- bool same_inode = (inode_in == inode_out);
- ssize_t ret;
-
- if (!ocfs2_refcount_tree(osb))
- return -EOPNOTSUPP;
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
- return -EROFS;
-
- /* Lock both files against IO */
- ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
- if (ret)
- return ret;
-
- /* Check file eligibility and prepare for block sharing. */
- ret = -EINVAL;
- if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
- (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
- goto out_unlock;
-
- ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
- &len, is_dedupe);
- if (ret <= 0)
- goto out_unlock;
-
- /* Lock out changes to the allocation maps and remap. */
- down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
- if (!same_inode)
- down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
- SINGLE_DEPTH_NESTING);
-
- ret = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in, inode_out,
- out_bh, pos_out, len);
-
- /* Zap any page cache for the destination file's range. */
- if (!ret)
- truncate_inode_pages_range(&inode_out->i_data, pos_out,
- PAGE_ALIGN(pos_out + len) - 1);
-
- up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
- if (!same_inode)
- up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
- if (ret) {
- mlog_errno(ret);
- goto out_unlock;
- }
-
- /*
- * Empty the extent map so that we may get the right extent
- * record from the disk.
- */
- ocfs2_extent_map_trunc(inode_in, 0);
- ocfs2_extent_map_trunc(inode_out, 0);
-
- ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
- if (ret) {
- mlog_errno(ret);
- goto out_unlock;
- }
-
- ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
- return 0;
-
-out_unlock:
- ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
- return ret;
-}
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
index 4af55bf4b35b..e9e862be4a1e 100644
--- a/fs/ocfs2/refcounttree.h
+++ b/fs/ocfs2/refcounttree.h
@@ -115,11 +115,23 @@ int ocfs2_reflink_ioctl(struct inode *inode,
const char __user *oldname,
const char __user *newname,
bool preserve);
-int ocfs2_reflink_remap_range(struct file *file_in,
- loff_t pos_in,
- struct file *file_out,
- loff_t pos_out,
- u64 len,
- bool is_dedupe);
+loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
+ struct buffer_head *s_bh,
+ loff_t pos_in,
+ struct inode *t_inode,
+ struct buffer_head *t_bh,
+ loff_t pos_out,
+ loff_t len);
+int ocfs2_reflink_inodes_lock(struct inode *s_inode,
+ struct buffer_head **bh1,
+ struct inode *t_inode,
+ struct buffer_head **bh2);
+void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
+ struct buffer_head *s_bh,
+ struct inode *t_inode,
+ struct buffer_head *t_bh);
+int ocfs2_reflink_update_dest(struct inode *dest,
+ struct buffer_head *d_bh,
+ loff_t newlen);
#endif /* OCFS2_REFCOUNTTREE_H */
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index d6c350ba25b9..c4b029c43464 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -48,12 +48,6 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
*/
static struct ocfs2_stack_plugin *active_stack;
-inline int ocfs2_is_o2cb_active(void)
-{
- return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
-}
-EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
-
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
{
struct ocfs2_stack_plugin *p;
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index e3036e1790e8..f2dce10fae54 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -298,9 +298,6 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
-/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
-int ocfs2_is_o2cb_active(void);
-
extern struct kset *ocfs2_kset;
#endif /* STACKGLUE_H */
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 5e65d818937b..fe53381b26b1 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -25,7 +25,7 @@ static int read_one_page(struct page *page)
struct iov_iter to;
struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
- iov_iter_bvec(&to, ITER_BVEC | READ, &bv, 1, PAGE_SIZE);
+ iov_iter_bvec(&to, READ, &bv, 1, PAGE_SIZE);
gossip_debug(GOSSIP_INODE_DEBUG,
"orangefs_readpage called with page %p\n",
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 1cc797a08a5b..9e62dcf06fc4 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -125,6 +125,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
struct file *new_file;
loff_t old_pos = 0;
loff_t new_pos = 0;
+ loff_t cloned;
int error = 0;
if (len == 0)
@@ -141,11 +142,10 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
}
/* Try to use clone_file_range to clone up within the same fs */
- error = do_clone_file_range(old_file, 0, new_file, 0, len);
- if (!error)
+ cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0);
+ if (cloned == len)
goto out;
/* Couldn't clone, so now we try to copy the data */
- error = 0;
/* FIXME: copy up sparse files efficiently */
while (len) {
@@ -395,7 +395,6 @@ struct ovl_copy_up_ctx {
struct dentry *destdir;
struct qstr destname;
struct dentry *workdir;
- bool tmpfile;
bool origin;
bool indexed;
bool metacopy;
@@ -440,63 +439,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
return err;
}
-static int ovl_install_temp(struct ovl_copy_up_ctx *c, struct dentry *temp,
- struct dentry **newdentry)
-{
- int err;
- struct dentry *upper;
- struct inode *udir = d_inode(c->destdir);
-
- upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
- if (IS_ERR(upper))
- return PTR_ERR(upper);
-
- if (c->tmpfile)
- err = ovl_do_link(temp, udir, upper);
- else
- err = ovl_do_rename(d_inode(c->workdir), temp, udir, upper, 0);
-
- if (!err)
- *newdentry = dget(c->tmpfile ? upper : temp);
- dput(upper);
-
- return err;
-}
-
-static struct dentry *ovl_get_tmpfile(struct ovl_copy_up_ctx *c)
-{
- int err;
- struct dentry *temp;
- const struct cred *old_creds = NULL;
- struct cred *new_creds = NULL;
- struct ovl_cattr cattr = {
- /* Can't properly set mode on creation because of the umask */
- .mode = c->stat.mode & S_IFMT,
- .rdev = c->stat.rdev,
- .link = c->link
- };
-
- err = security_inode_copy_up(c->dentry, &new_creds);
- temp = ERR_PTR(err);
- if (err < 0)
- goto out;
-
- if (new_creds)
- old_creds = override_creds(new_creds);
-
- if (c->tmpfile)
- temp = ovl_do_tmpfile(c->workdir, c->stat.mode);
- else
- temp = ovl_create_temp(c->workdir, &cattr);
-out:
- if (new_creds) {
- revert_creds(old_creds);
- put_cred(new_creds);
- }
-
- return temp;
-}
-
static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
{
int err;
@@ -548,51 +490,148 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
return err;
}
-static int ovl_copy_up_locked(struct ovl_copy_up_ctx *c)
+struct ovl_cu_creds {
+ const struct cred *old;
+ struct cred *new;
+};
+
+static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc)
+{
+ int err;
+
+ cc->old = cc->new = NULL;
+ err = security_inode_copy_up(dentry, &cc->new);
+ if (err < 0)
+ return err;
+
+ if (cc->new)
+ cc->old = override_creds(cc->new);
+
+ return 0;
+}
+
+static void ovl_revert_cu_creds(struct ovl_cu_creds *cc)
+{
+ if (cc->new) {
+ revert_creds(cc->old);
+ put_cred(cc->new);
+ }
+}
+
+/*
+ * Copyup using workdir to prepare temp file. Used when copying up directories,
+ * special files or when upper fs doesn't support O_TMPFILE.
+ */
+static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
{
- struct inode *udir = c->destdir->d_inode;
struct inode *inode;
- struct dentry *newdentry = NULL;
- struct dentry *temp;
+ struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir);
+ struct dentry *temp, *upper;
+ struct ovl_cu_creds cc;
int err;
+ struct ovl_cattr cattr = {
+ /* Can't properly set mode on creation because of the umask */
+ .mode = c->stat.mode & S_IFMT,
+ .rdev = c->stat.rdev,
+ .link = c->link
+ };
+
+ err = ovl_lock_rename_workdir(c->workdir, c->destdir);
+ if (err)
+ return err;
+
+ err = ovl_prep_cu_creds(c->dentry, &cc);
+ if (err)
+ goto unlock;
- temp = ovl_get_tmpfile(c);
+ temp = ovl_create_temp(c->workdir, &cattr);
+ ovl_revert_cu_creds(&cc);
+
+ err = PTR_ERR(temp);
if (IS_ERR(temp))
- return PTR_ERR(temp);
+ goto unlock;
err = ovl_copy_up_inode(c, temp);
if (err)
- goto out;
+ goto cleanup;
if (S_ISDIR(c->stat.mode) && c->indexed) {
err = ovl_create_index(c->dentry, c->lowerpath.dentry, temp);
if (err)
- goto out;
+ goto cleanup;
}
- if (c->tmpfile) {
- inode_lock_nested(udir, I_MUTEX_PARENT);
- err = ovl_install_temp(c, temp, &newdentry);
- inode_unlock(udir);
- } else {
- err = ovl_install_temp(c, temp, &newdentry);
- }
+ upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+ err = PTR_ERR(upper);
+ if (IS_ERR(upper))
+ goto cleanup;
+
+ err = ovl_do_rename(wdir, temp, udir, upper, 0);
+ dput(upper);
if (err)
- goto out;
+ goto cleanup;
if (!c->metacopy)
ovl_set_upperdata(d_inode(c->dentry));
inode = d_inode(c->dentry);
- ovl_inode_update(inode, newdentry);
+ ovl_inode_update(inode, temp);
if (S_ISDIR(inode->i_mode))
ovl_set_flag(OVL_WHITEOUTS, inode);
+unlock:
+ unlock_rename(c->workdir, c->destdir);
-out:
- if (err && !c->tmpfile)
- ovl_cleanup(d_inode(c->workdir), temp);
- dput(temp);
return err;
+cleanup:
+ ovl_cleanup(wdir, temp);
+ dput(temp);
+ goto unlock;
+}
+
+/* Copyup using O_TMPFILE which does not require cross dir locking */
+static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
+{
+ struct inode *udir = d_inode(c->destdir);
+ struct dentry *temp, *upper;
+ struct ovl_cu_creds cc;
+ int err;
+
+ err = ovl_prep_cu_creds(c->dentry, &cc);
+ if (err)
+ return err;
+
+ temp = ovl_do_tmpfile(c->workdir, c->stat.mode);
+ ovl_revert_cu_creds(&cc);
+
+ if (IS_ERR(temp))
+ return PTR_ERR(temp);
+
+ err = ovl_copy_up_inode(c, temp);
+ if (err)
+ goto out_dput;
+
+ inode_lock_nested(udir, I_MUTEX_PARENT);
+
+ upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+ err = PTR_ERR(upper);
+ if (!IS_ERR(upper)) {
+ err = ovl_do_link(temp, udir, upper);
+ dput(upper);
+ }
+ inode_unlock(udir);
+
+ if (err)
+ goto out_dput;
+
+ if (!c->metacopy)
+ ovl_set_upperdata(d_inode(c->dentry));
+ ovl_inode_update(d_inode(c->dentry), temp);
+
+ return 0;
+
+out_dput:
+ dput(temp);
+ return err;
}
/*
@@ -646,18 +685,10 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
}
/* Should we copyup with O_TMPFILE or with workdir? */
- if (S_ISREG(c->stat.mode) && ofs->tmpfile) {
- c->tmpfile = true;
- err = ovl_copy_up_locked(c);
- } else {
- err = ovl_lock_rename_workdir(c->workdir, c->destdir);
- if (!err) {
- err = ovl_copy_up_locked(c);
- unlock_rename(c->workdir, c->destdir);
- }
- }
-
-
+ if (S_ISREG(c->stat.mode) && ofs->tmpfile)
+ err = ovl_copy_up_tmpfile(c);
+ else
+ err = ovl_copy_up_workdir(c);
if (err)
goto out;
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 276914ae3c60..c6289147c787 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -414,13 +414,12 @@ static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl)
return 0;
- size = posix_acl_to_xattr(NULL, acl, NULL, 0);
+ size = posix_acl_xattr_size(acl->a_count);
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- size = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
- err = size;
+ err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
if (err < 0)
goto out_free;
@@ -463,6 +462,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
if (IS_ERR(upper))
goto out_unlock;
+ err = -ESTALE;
+ if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper)))
+ goto out_dput;
+
newdentry = ovl_create_temp(workdir, cattr);
err = PTR_ERR(newdentry);
if (IS_ERR(newdentry))
@@ -652,7 +655,6 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
struct dentry *new)
{
int err;
- bool locked = false;
struct inode *inode;
err = ovl_want_write(old);
@@ -663,13 +665,17 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
if (err)
goto out_drop_write;
+ err = ovl_copy_up(new->d_parent);
+ if (err)
+ goto out_drop_write;
+
if (ovl_is_metacopy_dentry(old)) {
err = ovl_set_redirect(old, false);
if (err)
goto out_drop_write;
}
- err = ovl_nlink_start(old, &locked);
+ err = ovl_nlink_start(old);
if (err)
goto out_drop_write;
@@ -682,7 +688,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
if (err)
iput(inode);
- ovl_nlink_end(old, locked);
+ ovl_nlink_end(old);
out_drop_write:
ovl_drop_write(old);
out:
@@ -807,7 +813,6 @@ static bool ovl_pure_upper(struct dentry *dentry)
static int ovl_do_remove(struct dentry *dentry, bool is_dir)
{
int err;
- bool locked = false;
const struct cred *old_cred;
struct dentry *upperdentry;
bool lower_positive = ovl_lower_positive(dentry);
@@ -828,7 +833,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
if (err)
goto out_drop_write;
- err = ovl_nlink_start(dentry, &locked);
+ err = ovl_nlink_start(dentry);
if (err)
goto out_drop_write;
@@ -844,7 +849,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
else
drop_nlink(dentry->d_inode);
}
- ovl_nlink_end(dentry, locked);
+ ovl_nlink_end(dentry);
/*
* Copy ctime
@@ -1008,7 +1013,6 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
unsigned int flags)
{
int err;
- bool locked = false;
struct dentry *old_upperdir;
struct dentry *new_upperdir;
struct dentry *olddentry;
@@ -1017,6 +1021,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
bool old_opaque;
bool new_opaque;
bool cleanup_whiteout = false;
+ bool update_nlink = false;
bool overwrite = !(flags & RENAME_EXCHANGE);
bool is_dir = d_is_dir(old);
bool new_is_dir = d_is_dir(new);
@@ -1074,10 +1079,12 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
err = ovl_copy_up(new);
if (err)
goto out_drop_write;
- } else {
- err = ovl_nlink_start(new, &locked);
+ } else if (d_inode(new)) {
+ err = ovl_nlink_start(new);
if (err)
goto out_drop_write;
+
+ update_nlink = true;
}
old_cred = ovl_override_creds(old->d_sb);
@@ -1206,7 +1213,8 @@ out_unlock:
unlock_rename(new_upperdir, old_upperdir);
out_revert_creds:
revert_creds(old_cred);
- ovl_nlink_end(new, locked);
+ if (update_nlink)
+ ovl_nlink_end(new);
out_drop_write:
ovl_drop_write(old);
out:
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 986313da0c88..84dd957efa24 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -434,14 +434,14 @@ enum ovl_copyop {
OVL_DEDUPE,
};
-static ssize_t ovl_copyfile(struct file *file_in, loff_t pos_in,
+static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
- u64 len, unsigned int flags, enum ovl_copyop op)
+ loff_t len, unsigned int flags, enum ovl_copyop op)
{
struct inode *inode_out = file_inode(file_out);
struct fd real_in, real_out;
const struct cred *old_cred;
- ssize_t ret;
+ loff_t ret;
ret = ovl_real_fdget(file_out, &real_out);
if (ret)
@@ -462,12 +462,13 @@ static ssize_t ovl_copyfile(struct file *file_in, loff_t pos_in,
case OVL_CLONE:
ret = vfs_clone_file_range(real_in.file, pos_in,
- real_out.file, pos_out, len);
+ real_out.file, pos_out, len, flags);
break;
case OVL_DEDUPE:
ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
- real_out.file, pos_out, len);
+ real_out.file, pos_out, len,
+ flags);
break;
}
revert_creds(old_cred);
@@ -489,26 +490,31 @@ static ssize_t ovl_copy_file_range(struct file *file_in, loff_t pos_in,
OVL_COPY);
}
-static int ovl_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len)
+static loff_t ovl_remap_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags)
{
- return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, 0,
- OVL_CLONE);
-}
+ enum ovl_copyop op;
+
+ if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+ return -EINVAL;
+
+ if (remap_flags & REMAP_FILE_DEDUP)
+ op = OVL_DEDUPE;
+ else
+ op = OVL_CLONE;
-static int ovl_dedupe_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len)
-{
/*
* Don't copy up because of a dedupe request, this wouldn't make sense
* most of the time (data would be duplicated instead of deduplicated).
*/
- if (!ovl_inode_upper(file_inode(file_in)) ||
- !ovl_inode_upper(file_inode(file_out)))
+ if (op == OVL_DEDUPE &&
+ (!ovl_inode_upper(file_inode(file_in)) ||
+ !ovl_inode_upper(file_inode(file_out))))
return -EPERM;
- return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, 0,
- OVL_DEDUPE);
+ return ovl_copyfile(file_in, pos_in, file_out, pos_out, len,
+ remap_flags, op);
}
const struct file_operations ovl_file_operations = {
@@ -525,6 +531,5 @@ const struct file_operations ovl_file_operations = {
.compat_ioctl = ovl_compat_ioctl,
.copy_file_range = ovl_copy_file_range,
- .clone_file_range = ovl_clone_file_range,
- .dedupe_file_range = ovl_dedupe_file_range,
+ .remap_file_range = ovl_remap_file_range,
};
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 3b7ed5d2279c..6bcc9dedc342 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -286,13 +286,22 @@ int ovl_permission(struct inode *inode, int mask)
if (err)
return err;
- old_cred = ovl_override_creds(inode->i_sb);
- if (!upperinode &&
- !special_file(realinode->i_mode) && mask & MAY_WRITE) {
+ /* No need to do any access on underlying for special files */
+ if (special_file(realinode->i_mode))
+ return 0;
+
+ /* No need to access underlying for execute */
+ mask &= ~MAY_EXEC;
+ if ((mask & (MAY_READ | MAY_WRITE)) == 0)
+ return 0;
+
+ /* Lower files get copied up, so turn write access into read */
+ if (!upperinode && mask & MAY_WRITE) {
mask &= ~(MAY_WRITE | MAY_APPEND);
- /* Make sure mounter can read file for copy up later */
mask |= MAY_READ;
}
+
+ old_cred = ovl_override_creds(inode->i_sb);
err = inode_permission(realinode, mask);
revert_creds(old_cred);
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 9c0ca6a7becf..efd372312ef1 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -422,8 +422,10 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name,
fh = ovl_encode_real_fh(real, is_upper);
err = PTR_ERR(fh);
- if (IS_ERR(fh))
+ if (IS_ERR(fh)) {
+ fh = NULL;
goto fail;
+ }
err = ovl_verify_fh(dentry, name, fh);
if (set && err == -ENODATA)
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index a3c0d9584312..5e45cb3630a0 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -271,8 +271,8 @@ bool ovl_test_flag(unsigned long flag, struct inode *inode);
bool ovl_inuse_trylock(struct dentry *dentry);
void ovl_inuse_unlock(struct dentry *dentry);
bool ovl_need_index(struct dentry *dentry);
-int ovl_nlink_start(struct dentry *dentry, bool *locked);
-void ovl_nlink_end(struct dentry *dentry, bool locked);
+int ovl_nlink_start(struct dentry *dentry);
+void ovl_nlink_end(struct dentry *dentry);
int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
int ovl_check_metacopy_xattr(struct dentry *dentry);
bool ovl_is_metacopy_dentry(struct dentry *dentry);
@@ -290,6 +290,16 @@ static inline unsigned int ovl_xino_bits(struct super_block *sb)
return ofs->xino_bits;
}
+static inline int ovl_inode_lock(struct inode *inode)
+{
+ return mutex_lock_interruptible(&OVL_I(inode)->lock);
+}
+
+static inline void ovl_inode_unlock(struct inode *inode)
+{
+ mutex_unlock(&OVL_I(inode)->lock);
+}
+
/* namei.c */
int ovl_check_fh_len(struct ovl_fh *fh, int fh_len);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 30adc9d408a0..0116735cc321 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -472,6 +472,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
{
char *p;
int err;
+ bool metacopy_opt = false, redirect_opt = false;
config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
if (!config->redirect_mode)
@@ -516,6 +517,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
config->redirect_mode = match_strdup(&args[0]);
if (!config->redirect_mode)
return -ENOMEM;
+ redirect_opt = true;
break;
case OPT_INDEX_ON:
@@ -548,6 +550,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
case OPT_METACOPY_ON:
config->metacopy = true;
+ metacopy_opt = true;
break;
case OPT_METACOPY_OFF:
@@ -572,13 +575,32 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
if (err)
return err;
- /* metacopy feature with upper requires redirect_dir=on */
- if (config->upperdir && config->metacopy && !config->redirect_dir) {
- pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=on\", falling back to metacopy=off.\n");
- config->metacopy = false;
- } else if (config->metacopy && !config->redirect_follow) {
- pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=follow\" on non-upper mount, falling back to metacopy=off.\n");
- config->metacopy = false;
+ /*
+ * This is to make the logic below simpler. It doesn't make any other
+ * difference, since config->redirect_dir is only used for upper.
+ */
+ if (!config->upperdir && config->redirect_follow)
+ config->redirect_dir = true;
+
+ /* Resolve metacopy -> redirect_dir dependency */
+ if (config->metacopy && !config->redirect_dir) {
+ if (metacopy_opt && redirect_opt) {
+ pr_err("overlayfs: conflicting options: metacopy=on,redirect_dir=%s\n",
+ config->redirect_mode);
+ return -EINVAL;
+ }
+ if (redirect_opt) {
+ /*
+ * There was an explicit redirect_dir=... that resulted
+ * in this conflict.
+ */
+ pr_info("overlayfs: disabling metacopy due to redirect_dir=%s\n",
+ config->redirect_mode);
+ config->metacopy = false;
+ } else {
+ /* Automatically enable redirect otherwise. */
+ config->redirect_follow = config->redirect_dir = true;
+ }
}
return 0;
@@ -1175,9 +1197,29 @@ out:
return err;
}
+static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
+{
+ unsigned int i;
+
+ if (!ofs->config.nfs_export && !(ofs->config.index && ofs->upper_mnt))
+ return true;
+
+ for (i = 0; i < ofs->numlowerfs; i++) {
+ /*
+ * We use uuid to associate an overlay lower file handle with a
+ * lower layer, so we can accept lower fs with null uuid as long
+ * as all lower layers with null uuid are on the same fs.
+ */
+ if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid))
+ return false;
+ }
+ return true;
+}
+
/* Get a unique fsid for the layer */
-static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb)
+static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
{
+ struct super_block *sb = path->mnt->mnt_sb;
unsigned int i;
dev_t dev;
int err;
@@ -1191,6 +1233,14 @@ static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb)
return i + 1;
}
+ if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
+ ofs->config.index = false;
+ ofs->config.nfs_export = false;
+ pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
+ uuid_is_null(&sb->s_uuid) ? "null" : "conflicting",
+ path->dentry);
+ }
+
err = get_anon_bdev(&dev);
if (err) {
pr_err("overlayfs: failed to get anonymous bdev for lowerpath\n");
@@ -1225,7 +1275,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
struct vfsmount *mnt;
int fsid;
- err = fsid = ovl_get_fsid(ofs, stack[i].mnt->mnt_sb);
+ err = fsid = ovl_get_fsid(ofs, &stack[i]);
if (err < 0)
goto out;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index ace4fe4c39a9..7c01327b1852 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -65,8 +65,7 @@ struct super_block *ovl_same_sb(struct super_block *sb)
*/
int ovl_can_decode_fh(struct super_block *sb)
{
- if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry ||
- uuid_is_null(&sb->s_uuid))
+ if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry)
return 0;
return sb->s_export_op->encode_fh ? -1 : FILEID_INO32_GEN;
@@ -522,13 +521,13 @@ bool ovl_already_copied_up(struct dentry *dentry, int flags)
int ovl_copy_up_start(struct dentry *dentry, int flags)
{
- struct ovl_inode *oi = OVL_I(d_inode(dentry));
+ struct inode *inode = d_inode(dentry);
int err;
- err = mutex_lock_interruptible(&oi->lock);
+ err = ovl_inode_lock(inode);
if (!err && ovl_already_copied_up_locked(dentry, flags)) {
err = 1; /* Already copied up */
- mutex_unlock(&oi->lock);
+ ovl_inode_unlock(inode);
}
return err;
@@ -536,7 +535,7 @@ int ovl_copy_up_start(struct dentry *dentry, int flags)
void ovl_copy_up_end(struct dentry *dentry)
{
- mutex_unlock(&OVL_I(d_inode(dentry))->lock);
+ ovl_inode_unlock(d_inode(dentry));
}
bool ovl_check_origin_xattr(struct dentry *dentry)
@@ -739,14 +738,14 @@ fail:
* Operations that change overlay inode and upper inode nlink need to be
* synchronized with copy up for persistent nlink accounting.
*/
-int ovl_nlink_start(struct dentry *dentry, bool *locked)
+int ovl_nlink_start(struct dentry *dentry)
{
- struct ovl_inode *oi = OVL_I(d_inode(dentry));
+ struct inode *inode = d_inode(dentry);
const struct cred *old_cred;
int err;
- if (!d_inode(dentry))
- return 0;
+ if (WARN_ON(!inode))
+ return -ENOENT;
/*
* With inodes index is enabled, we store the union overlay nlink
@@ -768,11 +767,11 @@ int ovl_nlink_start(struct dentry *dentry, bool *locked)
return err;
}
- err = mutex_lock_interruptible(&oi->lock);
+ err = ovl_inode_lock(inode);
if (err)
return err;
- if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
+ if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, inode))
goto out;
old_cred = ovl_override_creds(dentry->d_sb);
@@ -787,27 +786,24 @@ int ovl_nlink_start(struct dentry *dentry, bool *locked)
out:
if (err)
- mutex_unlock(&oi->lock);
- else
- *locked = true;
+ ovl_inode_unlock(inode);
return err;
}
-void ovl_nlink_end(struct dentry *dentry, bool locked)
+void ovl_nlink_end(struct dentry *dentry)
{
- if (locked) {
- if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) &&
- d_inode(dentry)->i_nlink == 0) {
- const struct cred *old_cred;
+ struct inode *inode = d_inode(dentry);
- old_cred = ovl_override_creds(dentry->d_sb);
- ovl_cleanup_index(dentry);
- revert_creds(old_cred);
- }
+ if (ovl_test_flag(OVL_INDEX, inode) && inode->i_nlink == 0) {
+ const struct cred *old_cred;
- mutex_unlock(&OVL_I(d_inode(dentry))->lock);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ ovl_cleanup_index(dentry);
+ revert_creds(old_cred);
}
+
+ ovl_inode_unlock(inode);
}
int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7e9f07bf260d..ce3465479447 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2905,6 +2905,21 @@ static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns,
}
#endif /* CONFIG_LIVEPATCH */
+#ifdef CONFIG_STACKLEAK_METRICS
+static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+{
+ unsigned long prev_depth = THREAD_SIZE -
+ (task->prev_lowest_stack & (THREAD_SIZE - 1));
+ unsigned long depth = THREAD_SIZE -
+ (task->lowest_stack & (THREAD_SIZE - 1));
+
+ seq_printf(m, "previous stack depth: %lu\nstack depth: %lu\n",
+ prev_depth, depth);
+ return 0;
+}
+#endif /* CONFIG_STACKLEAK_METRICS */
+
/*
* Thread groups
*/
@@ -3006,6 +3021,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_LIVEPATCH
ONE("patch_state", S_IRUSR, proc_pid_patch_state),
#endif
+#ifdef CONFIG_STACKLEAK_METRICS
+ ONE("stack_depth", S_IRUGO, proc_stack_depth),
+#endif
};
static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index d297fe4472a9..bbcc185062bb 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -22,7 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/printk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 792c78a49174..6c517b11acf8 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/init.h>
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 91ae16fbd7d5..3fe90443c1bb 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -16,7 +16,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/printk.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/crash_dump.h>
#include <linux/list.h>
@@ -423,7 +423,7 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
if (rc < 0) {
unlock_page(page);
put_page(page);
- return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ return vmf_error(rc);
}
SetPageUptodate(page);
}
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 503086f7f7c1..0d19d191ae70 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -141,7 +141,6 @@ config PSTORE_RAM
tristate "Log panic/oops to a RAM buffer"
depends on PSTORE
depends on HAS_IOMEM
- depends on HAVE_MEMBLOCK
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
diff --git a/fs/read_write.c b/fs/read_write.c
index 603794b207eb..bfcb4ced5664 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1407,7 +1407,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
goto fput_in;
if (!(out.file->f_mode & FMODE_WRITE))
goto fput_out;
- retval = -EINVAL;
in_inode = file_inode(in.file);
out_inode = file_inode(out.file);
out_pos = out.file->f_pos;
@@ -1588,11 +1587,15 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
* Try cloning first, this is supported by more file systems, and
* more efficient if both clone and copy are supported (e.g. NFS).
*/
- if (file_in->f_op->clone_file_range) {
- ret = file_in->f_op->clone_file_range(file_in, pos_in,
- file_out, pos_out, len);
- if (ret == 0) {
- ret = len;
+ if (file_in->f_op->remap_file_range) {
+ loff_t cloned;
+
+ cloned = file_in->f_op->remap_file_range(file_in, pos_in,
+ file_out, pos_out,
+ min_t(loff_t, MAX_RW_COUNT, len),
+ REMAP_FILE_CAN_SHORTEN);
+ if (cloned > 0) {
+ ret = cloned;
goto done;
}
}
@@ -1686,11 +1689,12 @@ out2:
return ret;
}
-static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write)
+static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
+ bool write)
{
struct inode *inode = file_inode(file);
- if (unlikely(pos < 0))
+ if (unlikely(pos < 0 || len < 0))
return -EINVAL;
if (unlikely((loff_t) (pos + len) < 0))
@@ -1708,22 +1712,150 @@ static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write)
return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
}
+/*
+ * Ensure that we don't remap a partial EOF block in the middle of something
+ * else. Assume that the offsets have already been checked for block
+ * alignment.
+ *
+ * For deduplication we always scale down to the previous block because we
+ * can't meaningfully compare post-EOF contents.
+ *
+ * For clone we only link a partial EOF block above the destination file's EOF.
+ *
+ * Shorten the request if possible.
+ */
+static int generic_remap_check_len(struct inode *inode_in,
+ struct inode *inode_out,
+ loff_t pos_out,
+ loff_t *len,
+ unsigned int remap_flags)
+{
+ u64 blkmask = i_blocksize(inode_in) - 1;
+ loff_t new_len = *len;
+
+ if ((*len & blkmask) == 0)
+ return 0;
+
+ if ((remap_flags & REMAP_FILE_DEDUP) ||
+ pos_out + *len < i_size_read(inode_out))
+ new_len &= ~blkmask;
+
+ if (new_len == *len)
+ return 0;
+
+ if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
+ *len = new_len;
+ return 0;
+ }
+
+ return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
+}
+
+/*
+ * Read a page's worth of file data into the page cache. Return the page
+ * locked.
+ */
+static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
+{
+ struct page *page;
+
+ page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL);
+ if (IS_ERR(page))
+ return page;
+ if (!PageUptodate(page)) {
+ put_page(page);
+ return ERR_PTR(-EIO);
+ }
+ lock_page(page);
+ return page;
+}
+
+/*
+ * Compare extents of two files to see if they are the same.
+ * Caller must have locked both inodes to prevent write races.
+ */
+static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dest, loff_t destoff,
+ loff_t len, bool *is_same)
+{
+ loff_t src_poff;
+ loff_t dest_poff;
+ void *src_addr;
+ void *dest_addr;
+ struct page *src_page;
+ struct page *dest_page;
+ loff_t cmp_len;
+ bool same;
+ int error;
+
+ error = -EINVAL;
+ same = true;
+ while (len) {
+ src_poff = srcoff & (PAGE_SIZE - 1);
+ dest_poff = destoff & (PAGE_SIZE - 1);
+ cmp_len = min(PAGE_SIZE - src_poff,
+ PAGE_SIZE - dest_poff);
+ cmp_len = min(cmp_len, len);
+ if (cmp_len <= 0)
+ goto out_error;
+
+ src_page = vfs_dedupe_get_page(src, srcoff);
+ if (IS_ERR(src_page)) {
+ error = PTR_ERR(src_page);
+ goto out_error;
+ }
+ dest_page = vfs_dedupe_get_page(dest, destoff);
+ if (IS_ERR(dest_page)) {
+ error = PTR_ERR(dest_page);
+ unlock_page(src_page);
+ put_page(src_page);
+ goto out_error;
+ }
+ src_addr = kmap_atomic(src_page);
+ dest_addr = kmap_atomic(dest_page);
+
+ flush_dcache_page(src_page);
+ flush_dcache_page(dest_page);
+
+ if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
+ same = false;
+
+ kunmap_atomic(dest_addr);
+ kunmap_atomic(src_addr);
+ unlock_page(dest_page);
+ unlock_page(src_page);
+ put_page(dest_page);
+ put_page(src_page);
+
+ if (!same)
+ break;
+
+ srcoff += cmp_len;
+ destoff += cmp_len;
+ len -= cmp_len;
+ }
+
+ *is_same = same;
+ return 0;
+
+out_error:
+ return error;
+}
/*
* Check that the two inodes are eligible for cloning, the ranges make
* sense, and then flush all dirty data. Caller must ensure that the
* inodes have been locked against any other modifications.
*
- * Returns: 0 for "nothing to clone", 1 for "something to clone", or
- * the usual negative error code.
+ * If there's an error, then the usual negative error code is returned.
+ * Otherwise returns 0 with *len set to the request length.
*/
-int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
- struct inode *inode_out, loff_t pos_out,
- u64 *len, bool is_dedupe)
+int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags)
{
- loff_t bs = inode_out->i_sb->s_blocksize;
- loff_t blen;
- loff_t isize;
+ struct inode *inode_in = file_inode(file_in);
+ struct inode *inode_out = file_inode(file_out);
bool same_inode = (inode_in == inode_out);
int ret;
@@ -1740,50 +1872,24 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
return -EINVAL;
- /* Are we going all the way to the end? */
- isize = i_size_read(inode_in);
- if (isize == 0)
- return 0;
-
/* Zero length dedupe exits immediately; reflink goes to EOF. */
if (*len == 0) {
- if (is_dedupe || pos_in == isize)
+ loff_t isize = i_size_read(inode_in);
+
+ if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
return 0;
if (pos_in > isize)
return -EINVAL;
*len = isize - pos_in;
+ if (*len == 0)
+ return 0;
}
- /* Ensure offsets don't wrap and the input is inside i_size */
- if (pos_in + *len < pos_in || pos_out + *len < pos_out ||
- pos_in + *len > isize)
- return -EINVAL;
-
- /* Don't allow dedupe past EOF in the dest file */
- if (is_dedupe) {
- loff_t disize;
-
- disize = i_size_read(inode_out);
- if (pos_out >= disize || pos_out + *len > disize)
- return -EINVAL;
- }
-
- /* If we're linking to EOF, continue to the block boundary. */
- if (pos_in + *len == isize)
- blen = ALIGN(isize, bs) - pos_in;
- else
- blen = *len;
-
- /* Only reflink if we're aligned to block boundaries */
- if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
- !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
- return -EINVAL;
-
- /* Don't allow overlapped reflink within the same file */
- if (same_inode) {
- if (pos_out + blen > pos_in && pos_out < pos_in + blen)
- return -EINVAL;
- }
+ /* Check that we don't violate system file offset limits. */
+ ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
+ remap_flags);
+ if (ret)
+ return ret;
/* Wait for the completion of any pending IOs on both files */
inode_dio_wait(inode_in);
@@ -1803,7 +1909,7 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
/*
* Check that the extents are the same.
*/
- if (is_dedupe) {
+ if (remap_flags & REMAP_FILE_DEDUP) {
bool is_same = false;
ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
@@ -1814,16 +1920,43 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
return -EBADE;
}
- return 1;
+ ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
+ remap_flags);
+ if (ret)
+ return ret;
+
+ /* If can't alter the file contents, we're done. */
+ if (!(remap_flags & REMAP_FILE_DEDUP)) {
+ /* Update the timestamps, since we can alter file contents. */
+ if (!(file_out->f_mode & FMODE_NOCMTIME)) {
+ ret = file_update_time(file_out);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Clear the security bits if the process is not being run by
+ * root. This keeps people from modifying setuid and setgid
+ * binaries.
+ */
+ ret = file_remove_privs(file_out);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
+EXPORT_SYMBOL(generic_remap_file_range_prep);
-int do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len)
+loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags)
{
struct inode *inode_in = file_inode(file_in);
struct inode *inode_out = file_inode(file_out);
- int ret;
+ loff_t ret;
+
+ WARN_ON_ONCE(remap_flags);
if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
return -EISDIR;
@@ -1843,155 +1976,76 @@ int do_clone_file_range(struct file *file_in, loff_t pos_in,
(file_out->f_flags & O_APPEND))
return -EBADF;
- if (!file_in->f_op->clone_file_range)
+ if (!file_in->f_op->remap_file_range)
return -EOPNOTSUPP;
- ret = clone_verify_area(file_in, pos_in, len, false);
+ ret = remap_verify_area(file_in, pos_in, len, false);
if (ret)
return ret;
- ret = clone_verify_area(file_out, pos_out, len, true);
+ ret = remap_verify_area(file_out, pos_out, len, true);
if (ret)
return ret;
- if (pos_in + len > i_size_read(inode_in))
- return -EINVAL;
-
- ret = file_in->f_op->clone_file_range(file_in, pos_in,
- file_out, pos_out, len);
- if (!ret) {
- fsnotify_access(file_in);
- fsnotify_modify(file_out);
- }
+ ret = file_in->f_op->remap_file_range(file_in, pos_in,
+ file_out, pos_out, len, remap_flags);
+ if (ret < 0)
+ return ret;
+ fsnotify_access(file_in);
+ fsnotify_modify(file_out);
return ret;
}
EXPORT_SYMBOL(do_clone_file_range);
-int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len)
+loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags)
{
- int ret;
+ loff_t ret;
file_start_write(file_out);
- ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len);
+ ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
+ remap_flags);
file_end_write(file_out);
return ret;
}
EXPORT_SYMBOL(vfs_clone_file_range);
-/*
- * Read a page's worth of file data into the page cache. Return the page
- * locked.
- */
-static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
+/* Check whether we are allowed to dedupe the destination file */
+static bool allow_file_dedupe(struct file *file)
{
- struct address_space *mapping;
- struct page *page;
- pgoff_t n;
-
- n = offset >> PAGE_SHIFT;
- mapping = inode->i_mapping;
- page = read_mapping_page(mapping, n, NULL);
- if (IS_ERR(page))
- return page;
- if (!PageUptodate(page)) {
- put_page(page);
- return ERR_PTR(-EIO);
- }
- lock_page(page);
- return page;
+ if (capable(CAP_SYS_ADMIN))
+ return true;
+ if (file->f_mode & FMODE_WRITE)
+ return true;
+ if (uid_eq(current_fsuid(), file_inode(file)->i_uid))
+ return true;
+ if (!inode_permission(file_inode(file), MAY_WRITE))
+ return true;
+ return false;
}
-/*
- * Compare extents of two files to see if they are the same.
- * Caller must have locked both inodes to prevent write races.
- */
-int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
- struct inode *dest, loff_t destoff,
- loff_t len, bool *is_same)
+loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
+ struct file *dst_file, loff_t dst_pos,
+ loff_t len, unsigned int remap_flags)
{
- loff_t src_poff;
- loff_t dest_poff;
- void *src_addr;
- void *dest_addr;
- struct page *src_page;
- struct page *dest_page;
- loff_t cmp_len;
- bool same;
- int error;
-
- error = -EINVAL;
- same = true;
- while (len) {
- src_poff = srcoff & (PAGE_SIZE - 1);
- dest_poff = destoff & (PAGE_SIZE - 1);
- cmp_len = min(PAGE_SIZE - src_poff,
- PAGE_SIZE - dest_poff);
- cmp_len = min(cmp_len, len);
- if (cmp_len <= 0)
- goto out_error;
-
- src_page = vfs_dedupe_get_page(src, srcoff);
- if (IS_ERR(src_page)) {
- error = PTR_ERR(src_page);
- goto out_error;
- }
- dest_page = vfs_dedupe_get_page(dest, destoff);
- if (IS_ERR(dest_page)) {
- error = PTR_ERR(dest_page);
- unlock_page(src_page);
- put_page(src_page);
- goto out_error;
- }
- src_addr = kmap_atomic(src_page);
- dest_addr = kmap_atomic(dest_page);
+ loff_t ret;
- flush_dcache_page(src_page);
- flush_dcache_page(dest_page);
-
- if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
- same = false;
-
- kunmap_atomic(dest_addr);
- kunmap_atomic(src_addr);
- unlock_page(dest_page);
- unlock_page(src_page);
- put_page(dest_page);
- put_page(src_page);
-
- if (!same)
- break;
-
- srcoff += cmp_len;
- destoff += cmp_len;
- len -= cmp_len;
- }
-
- *is_same = same;
- return 0;
-
-out_error:
- return error;
-}
-EXPORT_SYMBOL(vfs_dedupe_file_range_compare);
-
-int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
- struct file *dst_file, loff_t dst_pos, u64 len)
-{
- s64 ret;
+ WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
+ REMAP_FILE_CAN_SHORTEN));
ret = mnt_want_write_file(dst_file);
if (ret)
return ret;
- ret = clone_verify_area(dst_file, dst_pos, len, true);
+ ret = remap_verify_area(dst_file, dst_pos, len, true);
if (ret < 0)
goto out_drop_write;
- ret = -EINVAL;
- if (!(capable(CAP_SYS_ADMIN) || (dst_file->f_mode & FMODE_WRITE)))
+ ret = -EPERM;
+ if (!allow_file_dedupe(dst_file))
goto out_drop_write;
ret = -EXDEV;
@@ -2003,11 +2057,16 @@ int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
goto out_drop_write;
ret = -EINVAL;
- if (!dst_file->f_op->dedupe_file_range)
+ if (!dst_file->f_op->remap_file_range)
goto out_drop_write;
- ret = dst_file->f_op->dedupe_file_range(src_file, src_pos,
- dst_file, dst_pos, len);
+ if (len == 0) {
+ ret = 0;
+ goto out_drop_write;
+ }
+
+ ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
+ dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
out_drop_write:
mnt_drop_write_file(dst_file);
@@ -2024,7 +2083,7 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
int i;
int ret;
u16 count = same->dest_count;
- int deduped;
+ loff_t deduped;
if (!(file->f_mode & FMODE_READ))
return -EINVAL;
@@ -2043,7 +2102,7 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
if (!S_ISREG(src->i_mode))
goto out;
- ret = clone_verify_area(file, off, len, false);
+ ret = remap_verify_area(file, off, len, false);
if (ret < 0)
goto out;
ret = 0;
@@ -2075,7 +2134,8 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
}
deduped = vfs_dedupe_file_range_one(file, off, dst_file,
- info->dest_offset, len);
+ info->dest_offset, len,
+ REMAP_FILE_CAN_SHORTEN);
if (deduped == -EBADE)
info->status = FILE_DEDUPE_RANGE_DIFFERS;
else if (deduped < 0)
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index a39a562c1c10..bd29c58ccbd8 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -26,14 +26,5 @@ ifeq ($(CONFIG_REISERFS_FS_POSIX_ACL),y)
reiserfs-objs += xattr_acl.o
endif
-# gcc -O2 (the kernel default) is overaggressive on ppc32 when many inline
-# functions are used. This causes the compiler to advance the stack
-# pointer out of the available stack space, corrupting kernel space,
-# and causing a panic. Since this behavior only affects ppc32, this ifeq
-# will work around it. If any other architecture displays this behavior,
-# add it here.
-ccflags-$(CONFIG_PPC32) := $(call cc-ifversion, -lt, 0400, -O1)
-
TAGS:
etags *.c
-
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 48cdfc81fe10..32d8986c26fb 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -185,6 +185,7 @@ struct reiserfs_dentry_buf {
struct dir_context ctx;
struct dentry *xadir;
int count;
+ int err;
struct dentry *dentries[8];
};
@@ -207,6 +208,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
dentry = lookup_one_len(name, dbuf->xadir, namelen);
if (IS_ERR(dentry)) {
+ dbuf->err = PTR_ERR(dentry);
return PTR_ERR(dentry);
} else if (d_really_is_negative(dentry)) {
/* A directory entry exists, but no file? */
@@ -215,6 +217,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
"not found for file %pd.\n",
dentry, dbuf->xadir);
dput(dentry);
+ dbuf->err = -EIO;
return -EIO;
}
@@ -262,6 +265,10 @@ static int reiserfs_for_each_xattr(struct inode *inode,
err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx);
if (err)
break;
+ if (buf.err) {
+ err = buf.err;
+ break;
+ }
if (!buf.count)
break;
for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
diff --git a/fs/splice.c b/fs/splice.c
index b3daa971f597..3553f1956508 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -301,7 +301,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
struct kiocb kiocb;
int idx, ret;
- iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len);
+ iov_iter_pipe(&to, READ, pipe, len);
idx = to.idx;
init_sync_kiocb(&kiocb, in);
kiocb.ki_pos = *ppos;
@@ -386,7 +386,7 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
*/
offset = *ppos & ~PAGE_MASK;
- iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len + offset);
+ iov_iter_pipe(&to, READ, pipe, len + offset);
res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &base);
if (res <= 0)
@@ -745,8 +745,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
left -= this_len;
}
- iov_iter_bvec(&from, ITER_BVEC | WRITE, array, n,
- sd.total_len - left);
+ iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left);
ret = vfs_iter_write(out, &from, &sd.pos, 0);
if (ret <= 0)
break;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 61a5ad2600e8..53c9ab8fb777 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -919,28 +919,67 @@ out_unlock:
return error;
}
-STATIC int
-xfs_file_clone_range(
- struct file *file_in,
- loff_t pos_in,
- struct file *file_out,
- loff_t pos_out,
- u64 len)
-{
- return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
- len, false);
-}
-STATIC int
-xfs_file_dedupe_range(
- struct file *file_in,
- loff_t pos_in,
- struct file *file_out,
- loff_t pos_out,
- u64 len)
+loff_t
+xfs_file_remap_range(
+ struct file *file_in,
+ loff_t pos_in,
+ struct file *file_out,
+ loff_t pos_out,
+ loff_t len,
+ unsigned int remap_flags)
{
- return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
- len, true);
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+ struct xfs_mount *mp = src->i_mount;
+ loff_t remapped = 0;
+ xfs_extlen_t cowextsize;
+ int ret;
+
+ if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+ return -EINVAL;
+
+ if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ /* Prepare and then clone file data. */
+ ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
+ &len, remap_flags);
+ if (ret < 0 || len == 0)
+ return ret;
+
+ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
+ ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
+ &remapped);
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * Carry the cowextsize hint from src to dest if we're sharing the
+ * entire source file to the entire destination file, the source file
+ * has a cowextsize hint, and the destination file does not.
+ */
+ cowextsize = 0;
+ if (pos_in == 0 && len == i_size_read(inode_in) &&
+ (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
+ pos_out == 0 && len >= i_size_read(inode_out) &&
+ !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
+ cowextsize = src->i_d.di_cowextsize;
+
+ ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
+ remap_flags);
+
+out_unlock:
+ xfs_reflink_remap_unlock(file_in, file_out);
+ if (ret)
+ trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
+ return remapped > 0 ? remapped : ret;
}
STATIC int
@@ -1175,8 +1214,7 @@ const struct file_operations xfs_file_operations = {
.fsync = xfs_file_fsync,
.get_unmapped_area = thp_get_unmapped_area,
.fallocate = xfs_file_fallocate,
- .clone_file_range = xfs_file_clone_range,
- .dedupe_file_range = xfs_file_dedupe_range,
+ .remap_file_range = xfs_file_remap_range,
};
const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 8eaeec9d58ed..ecdb086bc23e 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -913,18 +913,18 @@ out_error:
/*
* Update destination inode size & cowextsize hint, if necessary.
*/
-STATIC int
+int
xfs_reflink_update_dest(
struct xfs_inode *dest,
xfs_off_t newlen,
xfs_extlen_t cowextsize,
- bool is_dedupe)
+ unsigned int remap_flags)
{
struct xfs_mount *mp = dest->i_mount;
struct xfs_trans *tp;
int error;
- if (is_dedupe && newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
+ if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
return 0;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
@@ -945,10 +945,6 @@ xfs_reflink_update_dest(
dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
}
- if (!is_dedupe) {
- xfs_trans_ichgtime(tp, dest,
- XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- }
xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
error = xfs_trans_commit(tp);
@@ -1112,19 +1108,28 @@ out:
/*
* Iteratively remap one file's extents (and holes) to another's.
*/
-STATIC int
+int
xfs_reflink_remap_blocks(
struct xfs_inode *src,
- xfs_fileoff_t srcoff,
+ loff_t pos_in,
struct xfs_inode *dest,
- xfs_fileoff_t destoff,
- xfs_filblks_t len,
- xfs_off_t new_isize)
+ loff_t pos_out,
+ loff_t remap_len,
+ loff_t *remapped)
{
struct xfs_bmbt_irec imap;
+ xfs_fileoff_t srcoff;
+ xfs_fileoff_t destoff;
+ xfs_filblks_t len;
+ xfs_filblks_t range_len;
+ xfs_filblks_t remapped_len = 0;
+ xfs_off_t new_isize = pos_out + remap_len;
int nimaps;
int error = 0;
- xfs_filblks_t range_len;
+
+ destoff = XFS_B_TO_FSBT(src->i_mount, pos_out);
+ srcoff = XFS_B_TO_FSBT(src->i_mount, pos_in);
+ len = XFS_B_TO_FSB(src->i_mount, remap_len);
/* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
while (len) {
@@ -1139,7 +1144,7 @@ xfs_reflink_remap_blocks(
error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
xfs_iunlock(src, lock_mode);
if (error)
- goto err;
+ break;
ASSERT(nimaps == 1);
trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_IO_OVERWRITE,
@@ -1153,23 +1158,24 @@ xfs_reflink_remap_blocks(
error = xfs_reflink_remap_extent(dest, &imap, destoff,
new_isize);
if (error)
- goto err;
+ break;
if (fatal_signal_pending(current)) {
error = -EINTR;
- goto err;
+ break;
}
/* Advance drange/srange */
srcoff += range_len;
destoff += range_len;
len -= range_len;
+ remapped_len += range_len;
}
- return 0;
-
-err:
- trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
+ if (error)
+ trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
+ *remapped = min_t(loff_t, remap_len,
+ XFS_FSB_TO_B(src->i_mount, remapped_len));
return error;
}
@@ -1218,7 +1224,7 @@ retry:
}
/* Unlock both inodes after they've been prepped for a range clone. */
-STATIC void
+void
xfs_reflink_remap_unlock(
struct file *file_in,
struct file *file_out)
@@ -1286,21 +1292,20 @@ xfs_reflink_zero_posteof(
* stale data in the destination file. Hence we reject these clone attempts with
* -EINVAL in this case.
*/
-STATIC int
+int
xfs_reflink_remap_prep(
struct file *file_in,
loff_t pos_in,
struct file *file_out,
loff_t pos_out,
- u64 *len,
- bool is_dedupe)
+ loff_t *len,
+ unsigned int remap_flags)
{
struct inode *inode_in = file_inode(file_in);
struct xfs_inode *src = XFS_I(inode_in);
struct inode *inode_out = file_inode(file_out);
struct xfs_inode *dest = XFS_I(inode_out);
bool same_inode = (inode_in == inode_out);
- u64 blkmask = i_blocksize(inode_in) - 1;
ssize_t ret;
/* Lock both files against IO */
@@ -1323,29 +1328,11 @@ xfs_reflink_remap_prep(
if (IS_DAX(inode_in) || IS_DAX(inode_out))
goto out_unlock;
- ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
- len, is_dedupe);
- if (ret <= 0)
+ ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
+ len, remap_flags);
+ if (ret < 0 || *len == 0)
goto out_unlock;
- /*
- * If the dedupe data matches, chop off the partial EOF block
- * from the source file so we don't try to dedupe the partial
- * EOF block.
- */
- if (is_dedupe) {
- *len &= ~blkmask;
- } else if (*len & blkmask) {
- /*
- * The user is attempting to share a partial EOF block,
- * if it's inside the destination EOF then reject it.
- */
- if (pos_out + *len < i_size_read(inode_out)) {
- ret = -EINVAL;
- goto out_unlock;
- }
- }
-
/* Attach dquots to dest inode before changing block map */
ret = xfs_qm_dqattach(dest);
if (ret)
@@ -1365,31 +1352,9 @@ xfs_reflink_remap_prep(
goto out_unlock;
/* Zap any page cache for the destination file's range. */
- truncate_inode_pages_range(&inode_out->i_data, pos_out,
- PAGE_ALIGN(pos_out + *len) - 1);
-
- /* If we're altering the file contents... */
- if (!is_dedupe) {
- /*
- * ...update the timestamps (which will grab the ilock again
- * from xfs_fs_dirty_inode, so we have to call it before we
- * take the ilock).
- */
- if (!(file_out->f_mode & FMODE_NOCMTIME)) {
- ret = file_update_time(file_out);
- if (ret)
- goto out_unlock;
- }
-
- /*
- * ...clear the security bits if the process is not being run
- * by root. This keeps people from modifying setuid and setgid
- * binaries.
- */
- ret = file_remove_privs(file_out);
- if (ret)
- goto out_unlock;
- }
+ truncate_inode_pages_range(&inode_out->i_data,
+ round_down(pos_out, PAGE_SIZE),
+ round_up(pos_out + *len, PAGE_SIZE) - 1);
return 1;
out_unlock:
@@ -1398,72 +1363,6 @@ out_unlock:
}
/*
- * Link a range of blocks from one file to another.
- */
-int
-xfs_reflink_remap_range(
- struct file *file_in,
- loff_t pos_in,
- struct file *file_out,
- loff_t pos_out,
- u64 len,
- bool is_dedupe)
-{
- struct inode *inode_in = file_inode(file_in);
- struct xfs_inode *src = XFS_I(inode_in);
- struct inode *inode_out = file_inode(file_out);
- struct xfs_inode *dest = XFS_I(inode_out);
- struct xfs_mount *mp = src->i_mount;
- xfs_fileoff_t sfsbno, dfsbno;
- xfs_filblks_t fsblen;
- xfs_extlen_t cowextsize;
- ssize_t ret;
-
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return -EOPNOTSUPP;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- /* Prepare and then clone file data. */
- ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
- &len, is_dedupe);
- if (ret <= 0)
- return ret;
-
- trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
-
- dfsbno = XFS_B_TO_FSBT(mp, pos_out);
- sfsbno = XFS_B_TO_FSBT(mp, pos_in);
- fsblen = XFS_B_TO_FSB(mp, len);
- ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
- pos_out + len);
- if (ret)
- goto out_unlock;
-
- /*
- * Carry the cowextsize hint from src to dest if we're sharing the
- * entire source file to the entire destination file, the source file
- * has a cowextsize hint, and the destination file does not.
- */
- cowextsize = 0;
- if (pos_in == 0 && len == i_size_read(inode_in) &&
- (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
- pos_out == 0 && len >= i_size_read(inode_out) &&
- !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
- cowextsize = src->i_d.di_cowextsize;
-
- ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
- is_dedupe);
-
-out_unlock:
- xfs_reflink_remap_unlock(file_in, file_out);
- if (ret)
- trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
- return ret;
-}
-
-/*
* The user wants to preemptively CoW all shared blocks in this file,
* which enables us to turn off the reflink flag. Iterate all
* extents which are not prealloc/delalloc to see which ranges are
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 7f47202b5639..6d73daef1f13 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -27,13 +27,24 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
-extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe);
+extern loff_t xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, loff_t len,
+ unsigned int remap_flags);
extern int xfs_reflink_inode_has_shared_extents(struct xfs_trans *tp,
struct xfs_inode *ip, bool *has_shared);
extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
struct xfs_trans **tpp);
extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t len);
+extern int xfs_reflink_remap_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, loff_t *len,
+ unsigned int remap_flags);
+extern int xfs_reflink_remap_blocks(struct xfs_inode *src, loff_t pos_in,
+ struct xfs_inode *dest, loff_t pos_out, loff_t remap_len,
+ loff_t *remapped);
+extern int xfs_reflink_update_dest(struct xfs_inode *dest, xfs_off_t newlen,
+ xfs_extlen_t cowextsize, unsigned int remap_flags);
+extern void xfs_reflink_remap_unlock(struct file *file_in,
+ struct file *file_out);
#endif /* __XFS_REFLINK_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 1817a8415a5e..c2de013b2cf4 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -62,10 +62,6 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_ATTRIBUTES
#endif
-#ifndef PER_CPU_DEF_ATTRIBUTES
-#define PER_CPU_DEF_ATTRIBUTES
-#endif
-
#define raw_cpu_generic_read(pcp) \
({ \
*raw_cpu_ptr(&(pcp)); \
diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h
new file mode 100644
index 000000000000..48198c36d6b9
--- /dev/null
+++ b/include/crypto/asym_tpm_subtype.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _LINUX_ASYM_TPM_SUBTYPE_H
+#define _LINUX_ASYM_TPM_SUBTYPE_H
+
+#include <linux/keyctl.h>
+
+struct tpm_key {
+ void *blob;
+ u32 blob_len;
+ uint16_t key_len; /* Size in bits of the key */
+ const void *pub_key; /* pointer inside blob to the public key bytes */
+ uint16_t pub_key_len; /* length of the public key */
+};
+
+struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len);
+
+extern struct asymmetric_key_subtype asym_tpm_subtype;
+
+#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index e0b681a717ba..be626eac9113 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -14,6 +14,8 @@
#ifndef _LINUX_PUBLIC_KEY_H
#define _LINUX_PUBLIC_KEY_H
+#include <linux/keyctl.h>
+
/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
*
@@ -23,6 +25,7 @@
struct public_key {
void *key;
u32 keylen;
+ bool key_is_private;
const char *id_type;
const char *pkey_algo;
};
@@ -40,6 +43,7 @@ struct public_key_signature {
u8 digest_size; /* Number of bytes in digest */
const char *pkey_algo;
const char *hash_algo;
+ const char *encoding;
};
extern void public_key_signature_free(struct public_key_signature *sig);
@@ -65,8 +69,14 @@ extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring,
const union key_payload *payload,
struct key *trusted);
-extern int verify_signature(const struct key *key,
- const struct public_key_signature *sig);
+extern int query_asymmetric_key(const struct kernel_pkey_params *,
+ struct kernel_pkey_query *);
+
+extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *);
+extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *);
+extern int create_signature(struct kernel_pkey_params *, const void *, void *);
+extern int verify_signature(const struct key *,
+ const struct public_key_signature *);
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 91a877fa00cb..9ccad6b062f2 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -82,6 +82,53 @@ enum drm_connector_status {
connector_status_unknown = 3,
};
+/**
+ * enum drm_connector_registration_status - userspace registration status for
+ * a &drm_connector
+ *
+ * This enum is used to track the status of initializing a connector and
+ * registering it with userspace, so that DRM can prevent bogus modesets on
+ * connectors that no longer exist.
+ */
+enum drm_connector_registration_state {
+ /**
+ * @DRM_CONNECTOR_INITIALIZING: The connector has just been created,
+ * but has yet to be exposed to userspace. There should be no
+ * additional restrictions to how the state of this connector may be
+ * modified.
+ */
+ DRM_CONNECTOR_INITIALIZING = 0,
+
+ /**
+ * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized
+ * and registered with sysfs, as such it has been exposed to
+ * userspace. There should be no additional restrictions to how the
+ * state of this connector may be modified.
+ */
+ DRM_CONNECTOR_REGISTERED = 1,
+
+ /**
+ * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed
+ * to userspace and has since been unregistered and removed from
+ * userspace, or the connector was unregistered before it had a chance
+ * to be exposed to userspace (e.g. still in the
+ * @DRM_CONNECTOR_INITIALIZING state). When a connector is
+ * unregistered, there are additional restrictions to how its state
+ * may be modified:
+ *
+ * - An unregistered connector may only have its DPMS changed from
+ * On->Off. Once DPMS is changed to Off, it may not be switched back
+ * to On.
+ * - Modesets are not allowed on unregistered connectors, unless they
+ * would result in disabling its assigned CRTCs. This means
+ * disabling a CRTC on an unregistered connector is OK, but enabling
+ * one is not.
+ * - Removing a CRTC from an unregistered connector is OK, but new
+ * CRTCs may never be assigned to an unregistered connector.
+ */
+ DRM_CONNECTOR_UNREGISTERED = 2,
+};
+
enum subpixel_order {
SubPixelUnknown = 0,
SubPixelHorizontalRGB,
@@ -853,10 +900,12 @@ struct drm_connector {
bool ycbcr_420_allowed;
/**
- * @registered: Is this connector exposed (registered) with userspace?
+ * @registration_state: Is this connector initializing, exposed
+ * (registered) with userspace, or unregistered?
+ *
* Protected by @mutex.
*/
- bool registered;
+ enum drm_connector_registration_state registration_state;
/**
* @modes:
@@ -1166,6 +1215,24 @@ static inline void drm_connector_unreference(struct drm_connector *connector)
drm_connector_put(connector);
}
+/**
+ * drm_connector_is_unregistered - has the connector been unregistered from
+ * userspace?
+ * @connector: DRM connector
+ *
+ * Checks whether or not @connector has been unregistered from userspace.
+ *
+ * Returns:
+ * True if the connector was unregistered, false if the connector is
+ * registered or has not yet been registered with userspace.
+ */
+static inline bool
+drm_connector_is_unregistered(struct drm_connector *connector)
+{
+ return READ_ONCE(connector->registration_state) ==
+ DRM_CONNECTOR_UNREGISTERED;
+}
+
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
const char *drm_get_dpms_name(int val);
diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h
index b396f00e481d..86a8806e2140 100644
--- a/include/dt-bindings/clock/am3.h
+++ b/include/dt-bindings/clock/am3.h
@@ -16,6 +16,8 @@
#define AM3_CLKCTRL_OFFSET 0x0
#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET)
+/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
+
/* l4_per clocks */
#define AM3_L4_PER_CLKCTRL_OFFSET 0x14
#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET)
@@ -105,4 +107,121 @@
#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET)
#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20)
+/* XXX: Compatibility part end */
+
+/* l4ls clocks */
+#define AM3_L4LS_CLKCTRL_OFFSET 0x38
+#define AM3_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4LS_CLKCTRL_OFFSET)
+#define AM3_L4LS_UART6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x38)
+#define AM3_L4LS_MMC1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x3c)
+#define AM3_L4LS_ELM_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x40)
+#define AM3_L4LS_I2C3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x44)
+#define AM3_L4LS_I2C2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x48)
+#define AM3_L4LS_SPI0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x4c)
+#define AM3_L4LS_SPI1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x50)
+#define AM3_L4LS_L4_LS_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x60)
+#define AM3_L4LS_UART2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x6c)
+#define AM3_L4LS_UART3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x70)
+#define AM3_L4LS_UART4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x74)
+#define AM3_L4LS_UART5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x78)
+#define AM3_L4LS_TIMER7_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x7c)
+#define AM3_L4LS_TIMER2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x80)
+#define AM3_L4LS_TIMER3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x84)
+#define AM3_L4LS_TIMER4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x88)
+#define AM3_L4LS_RNG_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x90)
+#define AM3_L4LS_GPIO2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xac)
+#define AM3_L4LS_GPIO3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb0)
+#define AM3_L4LS_GPIO4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb4)
+#define AM3_L4LS_D_CAN0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc0)
+#define AM3_L4LS_D_CAN1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc4)
+#define AM3_L4LS_EPWMSS1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xcc)
+#define AM3_L4LS_EPWMSS0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd4)
+#define AM3_L4LS_EPWMSS2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd8)
+#define AM3_L4LS_TIMER5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xec)
+#define AM3_L4LS_TIMER6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf0)
+#define AM3_L4LS_MMC2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf4)
+#define AM3_L4LS_SPINLOCK_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x10c)
+#define AM3_L4LS_MAILBOX_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x110)
+#define AM3_L4LS_OCPWP_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x130)
+
+/* l3s clocks */
+#define AM3_L3S_CLKCTRL_OFFSET 0x1c
+#define AM3_L3S_CLKCTRL_INDEX(offset) ((offset) - AM3_L3S_CLKCTRL_OFFSET)
+#define AM3_L3S_USB_OTG_HS_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x1c)
+#define AM3_L3S_GPMC_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x30)
+#define AM3_L3S_MCASP0_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x34)
+#define AM3_L3S_MCASP1_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x68)
+#define AM3_L3S_MMC3_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0xf8)
+
+/* l3 clocks */
+#define AM3_L3_CLKCTRL_OFFSET 0x24
+#define AM3_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_CLKCTRL_OFFSET)
+#define AM3_L3_TPTC0_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x24)
+#define AM3_L3_EMIF_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x28)
+#define AM3_L3_OCMCRAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x2c)
+#define AM3_L3_AES_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x94)
+#define AM3_L3_SHAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xa0)
+#define AM3_L3_TPCC_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xbc)
+#define AM3_L3_L3_INSTR_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xdc)
+#define AM3_L3_L3_MAIN_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xe0)
+#define AM3_L3_TPTC1_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xfc)
+#define AM3_L3_TPTC2_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x100)
+
+/* l4hs clocks */
+#define AM3_L4HS_CLKCTRL_OFFSET 0x120
+#define AM3_L4HS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4HS_CLKCTRL_OFFSET)
+#define AM3_L4HS_L4_HS_CLKCTRL AM3_L4HS_CLKCTRL_INDEX(0x120)
+
+/* pruss_ocp clocks */
+#define AM3_PRUSS_OCP_CLKCTRL_OFFSET 0xe8
+#define AM3_PRUSS_OCP_CLKCTRL_INDEX(offset) ((offset) - AM3_PRUSS_OCP_CLKCTRL_OFFSET)
+#define AM3_PRUSS_OCP_PRUSS_CLKCTRL AM3_PRUSS_OCP_CLKCTRL_INDEX(0xe8)
+
+/* cpsw_125mhz clocks */
+#define AM3_CPSW_125MHZ_CPGMAC0_CLKCTRL AM3_CLKCTRL_INDEX(0x14)
+
+/* lcdc clocks */
+#define AM3_LCDC_CLKCTRL_OFFSET 0x18
+#define AM3_LCDC_CLKCTRL_INDEX(offset) ((offset) - AM3_LCDC_CLKCTRL_OFFSET)
+#define AM3_LCDC_LCDC_CLKCTRL AM3_LCDC_CLKCTRL_INDEX(0x18)
+
+/* clk_24mhz clocks */
+#define AM3_CLK_24MHZ_CLKCTRL_OFFSET 0x14c
+#define AM3_CLK_24MHZ_CLKCTRL_INDEX(offset) ((offset) - AM3_CLK_24MHZ_CLKCTRL_OFFSET)
+#define AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL AM3_CLK_24MHZ_CLKCTRL_INDEX(0x14c)
+
+/* l4_wkup clocks */
+#define AM3_L4_WKUP_CONTROL_CLKCTRL AM3_CLKCTRL_INDEX(0x4)
+#define AM3_L4_WKUP_GPIO1_CLKCTRL AM3_CLKCTRL_INDEX(0x8)
+#define AM3_L4_WKUP_L4_WKUP_CLKCTRL AM3_CLKCTRL_INDEX(0xc)
+#define AM3_L4_WKUP_UART1_CLKCTRL AM3_CLKCTRL_INDEX(0xb4)
+#define AM3_L4_WKUP_I2C1_CLKCTRL AM3_CLKCTRL_INDEX(0xb8)
+#define AM3_L4_WKUP_ADC_TSC_CLKCTRL AM3_CLKCTRL_INDEX(0xbc)
+#define AM3_L4_WKUP_SMARTREFLEX0_CLKCTRL AM3_CLKCTRL_INDEX(0xc0)
+#define AM3_L4_WKUP_TIMER1_CLKCTRL AM3_CLKCTRL_INDEX(0xc4)
+#define AM3_L4_WKUP_SMARTREFLEX1_CLKCTRL AM3_CLKCTRL_INDEX(0xc8)
+#define AM3_L4_WKUP_WD_TIMER2_CLKCTRL AM3_CLKCTRL_INDEX(0xd4)
+
+/* l3_aon clocks */
+#define AM3_L3_AON_CLKCTRL_OFFSET 0x14
+#define AM3_L3_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_AON_CLKCTRL_OFFSET)
+#define AM3_L3_AON_DEBUGSS_CLKCTRL AM3_L3_AON_CLKCTRL_INDEX(0x14)
+
+/* l4_wkup_aon clocks */
+#define AM3_L4_WKUP_AON_CLKCTRL_OFFSET 0xb0
+#define AM3_L4_WKUP_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_AON_CLKCTRL_OFFSET)
+#define AM3_L4_WKUP_AON_WKUP_M3_CLKCTRL AM3_L4_WKUP_AON_CLKCTRL_INDEX(0xb0)
+
+/* mpu clocks */
+#define AM3_MPU_MPU_CLKCTRL AM3_CLKCTRL_INDEX(0x4)
+
+/* l4_rtc clocks */
+#define AM3_L4_RTC_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0)
+
+/* gfx_l3 clocks */
+#define AM3_GFX_L3_GFX_CLKCTRL AM3_CLKCTRL_INDEX(0x4)
+
+/* l4_cefuse clocks */
+#define AM3_L4_CEFUSE_CEFUSE_CLKCTRL AM3_CLKCTRL_INDEX(0x20)
+
#endif
diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h
index d21df00b3270..0f545b5afd60 100644
--- a/include/dt-bindings/clock/am4.h
+++ b/include/dt-bindings/clock/am4.h
@@ -16,6 +16,8 @@
#define AM4_CLKCTRL_OFFSET 0x20
#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET)
+/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
+
/* l4_wkup clocks */
#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120)
#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
@@ -110,4 +112,134 @@
#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20)
#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20)
+/* XXX: Compatibility part end. */
+
+/* l3s_tsc clocks */
+#define AM4_L3S_TSC_CLKCTRL_OFFSET 0x120
+#define AM4_L3S_TSC_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_TSC_CLKCTRL_OFFSET)
+#define AM4_L3S_TSC_ADC_TSC_CLKCTRL AM4_L3S_TSC_CLKCTRL_INDEX(0x120)
+
+/* l4_wkup_aon clocks */
+#define AM4_L4_WKUP_AON_CLKCTRL_OFFSET 0x228
+#define AM4_L4_WKUP_AON_CLKCTRL_INDEX(offset) ((offset) - AM4_L4_WKUP_AON_CLKCTRL_OFFSET)
+#define AM4_L4_WKUP_AON_WKUP_M3_CLKCTRL AM4_L4_WKUP_AON_CLKCTRL_INDEX(0x228)
+#define AM4_L4_WKUP_AON_COUNTER_32K_CLKCTRL AM4_L4_WKUP_AON_CLKCTRL_INDEX(0x230)
+
+/* l4_wkup clocks */
+#define AM4_L4_WKUP_CLKCTRL_OFFSET 0x220
+#define AM4_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM4_L4_WKUP_CLKCTRL_OFFSET)
+#define AM4_L4_WKUP_L4_WKUP_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x220)
+#define AM4_L4_WKUP_TIMER1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x328)
+#define AM4_L4_WKUP_WD_TIMER2_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x338)
+#define AM4_L4_WKUP_I2C1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x340)
+#define AM4_L4_WKUP_UART1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x348)
+#define AM4_L4_WKUP_SMARTREFLEX0_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x350)
+#define AM4_L4_WKUP_SMARTREFLEX1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x358)
+#define AM4_L4_WKUP_CONTROL_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x360)
+#define AM4_L4_WKUP_GPIO1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x368)
+
+/* mpu clocks */
+#define AM4_MPU_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+
+/* gfx_l3 clocks */
+#define AM4_GFX_L3_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+
+/* l4_rtc clocks */
+#define AM4_L4_RTC_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+
+/* l3 clocks */
+#define AM4_L3_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+#define AM4_L3_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28)
+#define AM4_L3_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30)
+#define AM4_L3_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40)
+#define AM4_L3_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50)
+#define AM4_L3_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58)
+#define AM4_L3_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78)
+#define AM4_L3_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80)
+#define AM4_L3_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88)
+#define AM4_L3_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90)
+#define AM4_L3_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0)
+
+/* l3s clocks */
+#define AM4_L3S_CLKCTRL_OFFSET 0x68
+#define AM4_L3S_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_CLKCTRL_OFFSET)
+#define AM4_L3S_VPFE0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x68)
+#define AM4_L3S_VPFE1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x70)
+#define AM4_L3S_GPMC_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x220)
+#define AM4_L3S_MCASP0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x238)
+#define AM4_L3S_MCASP1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x240)
+#define AM4_L3S_MMC3_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x248)
+#define AM4_L3S_QSPI_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x258)
+#define AM4_L3S_USB_OTG_SS0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x260)
+#define AM4_L3S_USB_OTG_SS1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x268)
+
+/* pruss_ocp clocks */
+#define AM4_PRUSS_OCP_CLKCTRL_OFFSET 0x320
+#define AM4_PRUSS_OCP_CLKCTRL_INDEX(offset) ((offset) - AM4_PRUSS_OCP_CLKCTRL_OFFSET)
+#define AM4_PRUSS_OCP_PRUSS_CLKCTRL AM4_PRUSS_OCP_CLKCTRL_INDEX(0x320)
+
+/* l4ls clocks */
+#define AM4_L4LS_CLKCTRL_OFFSET 0x420
+#define AM4_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM4_L4LS_CLKCTRL_OFFSET)
+#define AM4_L4LS_L4_LS_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x420)
+#define AM4_L4LS_D_CAN0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x428)
+#define AM4_L4LS_D_CAN1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x430)
+#define AM4_L4LS_EPWMSS0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x438)
+#define AM4_L4LS_EPWMSS1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x440)
+#define AM4_L4LS_EPWMSS2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x448)
+#define AM4_L4LS_EPWMSS3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x450)
+#define AM4_L4LS_EPWMSS4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x458)
+#define AM4_L4LS_EPWMSS5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x460)
+#define AM4_L4LS_ELM_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x468)
+#define AM4_L4LS_GPIO2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x478)
+#define AM4_L4LS_GPIO3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x480)
+#define AM4_L4LS_GPIO4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x488)
+#define AM4_L4LS_GPIO5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x490)
+#define AM4_L4LS_GPIO6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x498)
+#define AM4_L4LS_HDQ1W_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4a0)
+#define AM4_L4LS_I2C2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4a8)
+#define AM4_L4LS_I2C3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4b0)
+#define AM4_L4LS_MAILBOX_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4b8)
+#define AM4_L4LS_MMC1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4c0)
+#define AM4_L4LS_MMC2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4c8)
+#define AM4_L4LS_RNG_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4e0)
+#define AM4_L4LS_SPI0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x500)
+#define AM4_L4LS_SPI1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x508)
+#define AM4_L4LS_SPI2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x510)
+#define AM4_L4LS_SPI3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x518)
+#define AM4_L4LS_SPI4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x520)
+#define AM4_L4LS_SPINLOCK_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x528)
+#define AM4_L4LS_TIMER2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x530)
+#define AM4_L4LS_TIMER3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x538)
+#define AM4_L4LS_TIMER4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x540)
+#define AM4_L4LS_TIMER5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x548)
+#define AM4_L4LS_TIMER6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x550)
+#define AM4_L4LS_TIMER7_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x558)
+#define AM4_L4LS_TIMER8_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x560)
+#define AM4_L4LS_TIMER9_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x568)
+#define AM4_L4LS_TIMER10_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x570)
+#define AM4_L4LS_TIMER11_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x578)
+#define AM4_L4LS_UART2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x580)
+#define AM4_L4LS_UART3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x588)
+#define AM4_L4LS_UART4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x590)
+#define AM4_L4LS_UART5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x598)
+#define AM4_L4LS_UART6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5a0)
+#define AM4_L4LS_OCP2SCP0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5b8)
+#define AM4_L4LS_OCP2SCP1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5c0)
+
+/* emif clocks */
+#define AM4_EMIF_CLKCTRL_OFFSET 0x720
+#define AM4_EMIF_CLKCTRL_INDEX(offset) ((offset) - AM4_EMIF_CLKCTRL_OFFSET)
+#define AM4_EMIF_EMIF_CLKCTRL AM4_EMIF_CLKCTRL_INDEX(0x720)
+
+/* dss clocks */
+#define AM4_DSS_CLKCTRL_OFFSET 0xa20
+#define AM4_DSS_CLKCTRL_INDEX(offset) ((offset) - AM4_DSS_CLKCTRL_OFFSET)
+#define AM4_DSS_DSS_CORE_CLKCTRL AM4_DSS_CLKCTRL_INDEX(0xa20)
+
+/* cpsw_125mhz clocks */
+#define AM4_CPSW_125MHZ_CLKCTRL_OFFSET 0xb20
+#define AM4_CPSW_125MHZ_CLKCTRL_INDEX(offset) ((offset) - AM4_CPSW_125MHZ_CLKCTRL_OFFSET)
+#define AM4_CPSW_125MHZ_CPGMAC0_CLKCTRL AM4_CPSW_125MHZ_CLKCTRL_INDEX(0xb20)
+
#endif
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index ab3ee241d10c..ed30da28d820 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -9,6 +9,20 @@
#ifndef _DT_BINDINGS_CLK_AT91_H
#define _DT_BINDINGS_CLK_AT91_H
+#define PMC_TYPE_CORE 0
+#define PMC_TYPE_SYSTEM 1
+#define PMC_TYPE_PERIPHERAL 2
+#define PMC_TYPE_GCK 3
+
+#define PMC_SLOW 0
+#define PMC_MCK 1
+#define PMC_UTMI 2
+#define PMC_MAIN 3
+#define PMC_MCK2 4
+#define PMC_I2S0_MUX 5
+#define PMC_I2S1_MUX 6
+
+#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
#define AT91_PMC_LOCKA 1 /* PLLA Lock */
#define AT91_PMC_LOCKB 2 /* PLLB Lock */
@@ -19,5 +33,6 @@
#define AT91_PMC_MOSCRCS 17 /* Main On-Chip RC */
#define AT91_PMC_CFDEV 18 /* Clock Failure Detector Event */
#define AT91_PMC_GCKRDY 24 /* Generated Clocks */
+#endif
#endif
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index d7549c57cac3..ec969b5aeb25 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -16,19 +16,21 @@
#define DRA7_CLKCTRL_OFFSET 0x20
#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET)
+/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
+
/* mpu clocks */
#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
/* ipu clocks */
-#define DRA7_IPU_CLKCTRL_OFFSET 0x40
-#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET)
-#define DRA7_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50)
-#define DRA7_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58)
-#define DRA7_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60)
-#define DRA7_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68)
-#define DRA7_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70)
-#define DRA7_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78)
-#define DRA7_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80)
+#define _DRA7_IPU_CLKCTRL_OFFSET 0x40
+#define _DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - _DRA7_IPU_CLKCTRL_OFFSET)
+#define DRA7_MCASP1_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x50)
+#define DRA7_TIMER5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x58)
+#define DRA7_TIMER6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x60)
+#define DRA7_TIMER7_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x68)
+#define DRA7_TIMER8_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x70)
+#define DRA7_I2C5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x78)
+#define DRA7_UART6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x80)
/* rtc clocks */
#define DRA7_RTC_CLKCTRL_OFFSET 0x40
@@ -99,65 +101,65 @@
#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0)
/* l4per clocks */
-#define DRA7_L4PER_CLKCTRL_OFFSET 0x0
-#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET)
-#define DRA7_L4_PER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc)
-#define DRA7_L4_PER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x14)
-#define DRA7_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28)
-#define DRA7_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30)
-#define DRA7_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48)
-#define DRA7_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50)
-#define DRA7_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58)
-#define DRA7_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60)
-#define DRA7_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68)
-#define DRA7_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70)
-#define DRA7_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78)
-#define DRA7_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80)
-#define DRA7_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88)
-#define DRA7_EPWMSS1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x90)
-#define DRA7_EPWMSS2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x98)
-#define DRA7_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0)
-#define DRA7_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8)
-#define DRA7_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0)
-#define DRA7_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8)
-#define DRA7_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0)
-#define DRA7_EPWMSS0_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc4)
-#define DRA7_TIMER13_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc8)
-#define DRA7_TIMER14_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd0)
-#define DRA7_TIMER15_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd8)
-#define DRA7_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0)
-#define DRA7_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8)
-#define DRA7_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100)
-#define DRA7_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108)
-#define DRA7_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110)
-#define DRA7_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118)
-#define DRA7_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120)
-#define DRA7_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128)
-#define DRA7_TIMER16_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x130)
-#define DRA7_QSPI_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x138)
-#define DRA7_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140)
-#define DRA7_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148)
-#define DRA7_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150)
-#define DRA7_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158)
-#define DRA7_MCASP2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x160)
-#define DRA7_MCASP3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x168)
-#define DRA7_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170)
-#define DRA7_MCASP5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x178)
-#define DRA7_MCASP8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x190)
-#define DRA7_MCASP4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x198)
-#define DRA7_AES1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
-#define DRA7_AES2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
-#define DRA7_DES_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
-#define DRA7_RNG_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
-#define DRA7_SHAM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
-#define DRA7_UART7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
-#define DRA7_UART8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
-#define DRA7_UART9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
-#define DRA7_DCAN2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
-#define DRA7_MCASP6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x204)
-#define DRA7_MCASP7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x208)
+#define _DRA7_L4PER_CLKCTRL_OFFSET 0x0
+#define _DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - _DRA7_L4PER_CLKCTRL_OFFSET)
+#define DRA7_L4_PER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc)
+#define DRA7_L4_PER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x14)
+#define DRA7_TIMER10_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x28)
+#define DRA7_TIMER11_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x30)
+#define DRA7_TIMER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x38)
+#define DRA7_TIMER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x40)
+#define DRA7_TIMER4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x48)
+#define DRA7_TIMER9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x50)
+#define DRA7_ELM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x58)
+#define DRA7_GPIO2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x60)
+#define DRA7_GPIO3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x68)
+#define DRA7_GPIO4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x70)
+#define DRA7_GPIO5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x78)
+#define DRA7_GPIO6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x80)
+#define DRA7_HDQ1W_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x88)
+#define DRA7_EPWMSS1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x90)
+#define DRA7_EPWMSS2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x98)
+#define DRA7_I2C1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa0)
+#define DRA7_I2C2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa8)
+#define DRA7_I2C3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb0)
+#define DRA7_I2C4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb8)
+#define DRA7_L4_PER1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc0)
+#define DRA7_EPWMSS0_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc4)
+#define DRA7_TIMER13_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc8)
+#define DRA7_TIMER14_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd0)
+#define DRA7_TIMER15_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd8)
+#define DRA7_MCSPI1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf0)
+#define DRA7_MCSPI2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf8)
+#define DRA7_MCSPI3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x100)
+#define DRA7_MCSPI4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x108)
+#define DRA7_GPIO7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x110)
+#define DRA7_GPIO8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x118)
+#define DRA7_MMC3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x120)
+#define DRA7_MMC4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x128)
+#define DRA7_TIMER16_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x130)
+#define DRA7_QSPI_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x138)
+#define DRA7_UART1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x140)
+#define DRA7_UART2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x148)
+#define DRA7_UART3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x150)
+#define DRA7_UART4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x158)
+#define DRA7_MCASP2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x160)
+#define DRA7_MCASP3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x168)
+#define DRA7_UART5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x170)
+#define DRA7_MCASP5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x178)
+#define DRA7_MCASP8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x190)
+#define DRA7_MCASP4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x198)
+#define DRA7_AES1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
+#define DRA7_AES2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
+#define DRA7_DES_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
+#define DRA7_RNG_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
+#define DRA7_SHAM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
+#define DRA7_UART7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
+#define DRA7_UART8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
+#define DRA7_UART9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
+#define DRA7_DCAN2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
+#define DRA7_MCASP6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x204)
+#define DRA7_MCASP7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x208)
/* wkupaon clocks */
#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
@@ -170,4 +172,192 @@
#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
#define DRA7_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
+/* XXX: Compatibility part end. */
+
+/* mpu clocks */
+#define DRA7_MPU_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* dsp1 clocks */
+#define DRA7_DSP1_MMU0_DSP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* ipu1 clocks */
+#define DRA7_IPU1_MMU_IPU1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* ipu clocks */
+#define DRA7_IPU_CLKCTRL_OFFSET 0x50
+#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET)
+#define DRA7_IPU_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50)
+#define DRA7_IPU_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58)
+#define DRA7_IPU_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60)
+#define DRA7_IPU_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68)
+#define DRA7_IPU_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70)
+#define DRA7_IPU_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78)
+#define DRA7_IPU_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80)
+
+/* dsp2 clocks */
+#define DRA7_DSP2_MMU0_DSP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* rtc clocks */
+#define DRA7_RTC_RTCSS_CLKCTRL DRA7_CLKCTRL_INDEX(0x44)
+
+/* coreaon clocks */
+#define DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
+
+/* l3main1 clocks */
+#define DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_L3MAIN1_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_L3MAIN1_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
+#define DRA7_L3MAIN1_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
+#define DRA7_L3MAIN1_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
+#define DRA7_L3MAIN1_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_L3MAIN1_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
+
+/* ipu2 clocks */
+#define DRA7_IPU2_MMU_IPU2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* dma clocks */
+#define DRA7_DMA_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* emif clocks */
+#define DRA7_EMIF_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* atl clocks */
+#define DRA7_ATL_CLKCTRL_OFFSET 0x0
+#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET)
+#define DRA7_ATL_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0)
+
+/* l4cfg clocks */
+#define DRA7_L4CFG_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_L4CFG_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_L4CFG_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+#define DRA7_L4CFG_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
+#define DRA7_L4CFG_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
+#define DRA7_L4CFG_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58)
+#define DRA7_L4CFG_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60)
+#define DRA7_L4CFG_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68)
+#define DRA7_L4CFG_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
+#define DRA7_L4CFG_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
+#define DRA7_L4CFG_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
+#define DRA7_L4CFG_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_L4CFG_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
+#define DRA7_L4CFG_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98)
+#define DRA7_L4CFG_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
+
+/* l3instr clocks */
+#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+
+/* dss clocks */
+#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+
+/* l3init clocks */
+#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+#define DRA7_L3INIT_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
+#define DRA7_L3INIT_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
+#define DRA7_L3INIT_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
+#define DRA7_L3INIT_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_L3INIT_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0)
+#define DRA7_L3INIT_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8)
+#define DRA7_L3INIT_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0)
+
+/* pcie clocks */
+#define DRA7_PCIE_CLKCTRL_OFFSET 0xb0
+#define DRA7_PCIE_CLKCTRL_INDEX(offset) ((offset) - DRA7_PCIE_CLKCTRL_OFFSET)
+#define DRA7_PCIE_PCIE1_CLKCTRL DRA7_PCIE_CLKCTRL_INDEX(0xb0)
+#define DRA7_PCIE_PCIE2_CLKCTRL DRA7_PCIE_CLKCTRL_INDEX(0xb8)
+
+/* gmac clocks */
+#define DRA7_GMAC_CLKCTRL_OFFSET 0xd0
+#define DRA7_GMAC_CLKCTRL_INDEX(offset) ((offset) - DRA7_GMAC_CLKCTRL_OFFSET)
+#define DRA7_GMAC_GMAC_CLKCTRL DRA7_GMAC_CLKCTRL_INDEX(0xd0)
+
+/* l4per clocks */
+#define DRA7_L4PER_CLKCTRL_OFFSET 0x28
+#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET)
+#define DRA7_L4PER_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28)
+#define DRA7_L4PER_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30)
+#define DRA7_L4PER_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38)
+#define DRA7_L4PER_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40)
+#define DRA7_L4PER_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48)
+#define DRA7_L4PER_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50)
+#define DRA7_L4PER_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58)
+#define DRA7_L4PER_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60)
+#define DRA7_L4PER_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68)
+#define DRA7_L4PER_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70)
+#define DRA7_L4PER_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78)
+#define DRA7_L4PER_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80)
+#define DRA7_L4PER_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88)
+#define DRA7_L4PER_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0)
+#define DRA7_L4PER_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8)
+#define DRA7_L4PER_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0)
+#define DRA7_L4PER_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8)
+#define DRA7_L4PER_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0)
+#define DRA7_L4PER_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0)
+#define DRA7_L4PER_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8)
+#define DRA7_L4PER_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100)
+#define DRA7_L4PER_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108)
+#define DRA7_L4PER_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110)
+#define DRA7_L4PER_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118)
+#define DRA7_L4PER_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120)
+#define DRA7_L4PER_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128)
+#define DRA7_L4PER_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140)
+#define DRA7_L4PER_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148)
+#define DRA7_L4PER_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150)
+#define DRA7_L4PER_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158)
+#define DRA7_L4PER_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170)
+
+/* l4sec clocks */
+#define DRA7_L4SEC_CLKCTRL_OFFSET 0x1a0
+#define DRA7_L4SEC_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4SEC_CLKCTRL_OFFSET)
+#define DRA7_L4SEC_AES1_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1a0)
+#define DRA7_L4SEC_AES2_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1a8)
+#define DRA7_L4SEC_DES_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1b0)
+#define DRA7_L4SEC_RNG_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c0)
+#define DRA7_L4SEC_SHAM_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c8)
+
+/* l4per2 clocks */
+#define DRA7_L4PER2_CLKCTRL_OFFSET 0xc
+#define DRA7_L4PER2_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER2_CLKCTRL_OFFSET)
+#define DRA7_L4PER2_L4_PER2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0xc)
+#define DRA7_L4PER2_PRUSS1_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x18)
+#define DRA7_L4PER2_PRUSS2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x20)
+#define DRA7_L4PER2_EPWMSS1_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x90)
+#define DRA7_L4PER2_EPWMSS2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x98)
+#define DRA7_L4PER2_EPWMSS0_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0xc4)
+#define DRA7_L4PER2_QSPI_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x138)
+#define DRA7_L4PER2_MCASP2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x160)
+#define DRA7_L4PER2_MCASP3_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x168)
+#define DRA7_L4PER2_MCASP5_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x178)
+#define DRA7_L4PER2_MCASP8_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x190)
+#define DRA7_L4PER2_MCASP4_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x198)
+#define DRA7_L4PER2_UART7_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1d0)
+#define DRA7_L4PER2_UART8_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1e0)
+#define DRA7_L4PER2_UART9_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1e8)
+#define DRA7_L4PER2_DCAN2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1f0)
+#define DRA7_L4PER2_MCASP6_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x204)
+#define DRA7_L4PER2_MCASP7_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x208)
+
+/* l4per3 clocks */
+#define DRA7_L4PER3_CLKCTRL_OFFSET 0x14
+#define DRA7_L4PER3_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER3_CLKCTRL_OFFSET)
+#define DRA7_L4PER3_L4_PER3_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0x14)
+#define DRA7_L4PER3_TIMER13_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xc8)
+#define DRA7_L4PER3_TIMER14_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xd0)
+#define DRA7_L4PER3_TIMER15_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xd8)
+#define DRA7_L4PER3_TIMER16_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0x130)
+
+/* wkupaon clocks */
+#define DRA7_WKUPAON_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_WKUPAON_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+#define DRA7_WKUPAON_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
+#define DRA7_WKUPAON_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
+#define DRA7_WKUPAON_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
+#define DRA7_WKUPAON_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
+#define DRA7_WKUPAON_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
+#define DRA7_WKUPAON_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_WKUPAON_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
+
#endif
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 5b1d68512360..a0439ce8e8d3 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -187,32 +187,6 @@
#define CLK_MIPI_HSI 349 /* Exynos4210 only */
#define CLK_PIXELASYNCM0 351
#define CLK_PIXELASYNCM1 352
-#define CLK_FIMC_LITE0 353 /* Exynos4x12 only */
-#define CLK_FIMC_LITE1 354 /* Exynos4x12 only */
-#define CLK_PPMUISPX 355 /* Exynos4x12 only */
-#define CLK_PPMUISPMX 356 /* Exynos4x12 only */
-#define CLK_FIMC_ISP 357 /* Exynos4x12 only */
-#define CLK_FIMC_DRC 358 /* Exynos4x12 only */
-#define CLK_FIMC_FD 359 /* Exynos4x12 only */
-#define CLK_MCUISP 360 /* Exynos4x12 only */
-#define CLK_GICISP 361 /* Exynos4x12 only */
-#define CLK_SMMU_ISP 362 /* Exynos4x12 only */
-#define CLK_SMMU_DRC 363 /* Exynos4x12 only */
-#define CLK_SMMU_FD 364 /* Exynos4x12 only */
-#define CLK_SMMU_LITE0 365 /* Exynos4x12 only */
-#define CLK_SMMU_LITE1 366 /* Exynos4x12 only */
-#define CLK_MCUCTL_ISP 367 /* Exynos4x12 only */
-#define CLK_MPWM_ISP 368 /* Exynos4x12 only */
-#define CLK_I2C0_ISP 369 /* Exynos4x12 only */
-#define CLK_I2C1_ISP 370 /* Exynos4x12 only */
-#define CLK_MTCADC_ISP 371 /* Exynos4x12 only */
-#define CLK_PWM_ISP 372 /* Exynos4x12 only */
-#define CLK_WDT_ISP 373 /* Exynos4x12 only */
-#define CLK_UART_ISP 374 /* Exynos4x12 only */
-#define CLK_ASYNCAXIM 375 /* Exynos4x12 only */
-#define CLK_SMMU_ISPCX 376 /* Exynos4x12 only */
-#define CLK_SPI0_ISP 377 /* Exynos4x12 only */
-#define CLK_SPI1_ISP 378 /* Exynos4x12 only */
#define CLK_PWM_ISP_SCLK 379 /* Exynos4x12 only */
#define CLK_SPI0_ISP_SCLK 380 /* Exynos4x12 only */
#define CLK_SPI1_ISP_SCLK 381 /* Exynos4x12 only */
@@ -254,10 +228,6 @@
#define CLK_PPMUACP 415
/* div clocks */
-#define CLK_DIV_ISP0 450 /* Exynos4x12 only */
-#define CLK_DIV_ISP1 451 /* Exynos4x12 only */
-#define CLK_DIV_MCUISP0 452 /* Exynos4x12 only */
-#define CLK_DIV_MCUISP1 453 /* Exynos4x12 only */
#define CLK_DIV_ACLK200 454 /* Exynos4x12 only */
#define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */
#define CLK_DIV_ACP 456
diff --git a/include/dt-bindings/clock/hi3670-clock.h b/include/dt-bindings/clock/hi3670-clock.h
new file mode 100644
index 000000000000..fa48583f87d6
--- /dev/null
+++ b/include/dt-bindings/clock/hi3670-clock.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device Tree binding constants for HiSilicon Hi3670 SoC
+ *
+ * Copyright (c) 2001-2021, Huawei Tech. Co., Ltd.
+ * Copyright (c) 2018 Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_HI3670_H
+#define __DT_BINDINGS_CLOCK_HI3670_H
+
+/* clk in stub clock */
+#define HI3670_CLK_STUB_CLUSTER0 0
+#define HI3670_CLK_STUB_CLUSTER1 1
+#define HI3670_CLK_STUB_GPU 2
+#define HI3670_CLK_STUB_DDR 3
+#define HI3670_CLK_STUB_DDR_VOTE 4
+#define HI3670_CLK_STUB_DDR_LIMIT 5
+#define HI3670_CLK_STUB_NUM 6
+
+/* clk in crg clock */
+#define HI3670_CLKIN_SYS 0
+#define HI3670_CLKIN_REF 1
+#define HI3670_CLK_FLL_SRC 2
+#define HI3670_CLK_PPLL0 3
+#define HI3670_CLK_PPLL1 4
+#define HI3670_CLK_PPLL2 5
+#define HI3670_CLK_PPLL3 6
+#define HI3670_CLK_PPLL4 7
+#define HI3670_CLK_PPLL6 8
+#define HI3670_CLK_PPLL7 9
+#define HI3670_CLK_PPLL_PCIE 10
+#define HI3670_CLK_PCIEPLL_REV 11
+#define HI3670_CLK_SCPLL 12
+#define HI3670_PCLK 13
+#define HI3670_CLK_UART0_DBG 14
+#define HI3670_CLK_UART6 15
+#define HI3670_OSC32K 16
+#define HI3670_OSC19M 17
+#define HI3670_CLK_480M 18
+#define HI3670_CLK_INVALID 19
+#define HI3670_CLK_DIV_SYSBUS 20
+#define HI3670_CLK_FACTOR_MMC 21
+#define HI3670_CLK_SD_SYS 22
+#define HI3670_CLK_SDIO_SYS 23
+#define HI3670_CLK_DIV_A53HPM 24
+#define HI3670_CLK_DIV_320M 25
+#define HI3670_PCLK_GATE_UART0 26
+#define HI3670_CLK_FACTOR_UART0 27
+#define HI3670_CLK_FACTOR_USB3PHY_PLL 28
+#define HI3670_CLK_GATE_ABB_USB 29
+#define HI3670_CLK_GATE_UFSPHY_REF 30
+#define HI3670_ICS_VOLT_HIGH 31
+#define HI3670_ICS_VOLT_MIDDLE 32
+#define HI3670_VENC_VOLT_HOLD 33
+#define HI3670_VDEC_VOLT_HOLD 34
+#define HI3670_EDC_VOLT_HOLD 35
+#define HI3670_CLK_ISP_SNCLK_FAC 36
+#define HI3670_CLK_FACTOR_RXDPHY 37
+#define HI3670_AUTODIV_SYSBUS 38
+#define HI3670_AUTODIV_EMMC0BUS 39
+#define HI3670_PCLK_ANDGT_MMC1_PCIE 40
+#define HI3670_CLK_GATE_VCODECBUS_GT 41
+#define HI3670_CLK_ANDGT_SD 42
+#define HI3670_CLK_SD_SYS_GT 43
+#define HI3670_CLK_ANDGT_SDIO 44
+#define HI3670_CLK_SDIO_SYS_GT 45
+#define HI3670_CLK_A53HPM_ANDGT 46
+#define HI3670_CLK_320M_PLL_GT 47
+#define HI3670_CLK_ANDGT_UARTH 48
+#define HI3670_CLK_ANDGT_UARTL 49
+#define HI3670_CLK_ANDGT_UART0 50
+#define HI3670_CLK_ANDGT_SPI 51
+#define HI3670_CLK_ANDGT_PCIEAXI 52
+#define HI3670_CLK_DIV_AO_ASP_GT 53
+#define HI3670_CLK_GATE_CSI_TRANS 54
+#define HI3670_CLK_GATE_DSI_TRANS 55
+#define HI3670_CLK_ANDGT_PTP 56
+#define HI3670_CLK_ANDGT_OUT0 57
+#define HI3670_CLK_ANDGT_OUT1 58
+#define HI3670_CLKGT_DP_AUDIO_PLL_AO 59
+#define HI3670_CLK_ANDGT_VDEC 60
+#define HI3670_CLK_ANDGT_VENC 61
+#define HI3670_CLK_ISP_SNCLK_ANGT 62
+#define HI3670_CLK_ANDGT_RXDPHY 63
+#define HI3670_CLK_ANDGT_ICS 64
+#define HI3670_AUTODIV_DMABUS 65
+#define HI3670_CLK_MUX_SYSBUS 66
+#define HI3670_CLK_MUX_VCODECBUS 67
+#define HI3670_CLK_MUX_SD_SYS 68
+#define HI3670_CLK_MUX_SD_PLL 69
+#define HI3670_CLK_MUX_SDIO_SYS 70
+#define HI3670_CLK_MUX_SDIO_PLL 71
+#define HI3670_CLK_MUX_A53HPM 72
+#define HI3670_CLK_MUX_320M 73
+#define HI3670_CLK_MUX_UARTH 74
+#define HI3670_CLK_MUX_UARTL 75
+#define HI3670_CLK_MUX_UART0 76
+#define HI3670_CLK_MUX_I2C 77
+#define HI3670_CLK_MUX_SPI 78
+#define HI3670_CLK_MUX_PCIEAXI 79
+#define HI3670_CLK_MUX_AO_ASP 80
+#define HI3670_CLK_MUX_VDEC 81
+#define HI3670_CLK_MUX_VENC 82
+#define HI3670_CLK_ISP_SNCLK_MUX0 83
+#define HI3670_CLK_ISP_SNCLK_MUX1 84
+#define HI3670_CLK_ISP_SNCLK_MUX2 85
+#define HI3670_CLK_MUX_RXDPHY_CFG 86
+#define HI3670_CLK_MUX_ICS 87
+#define HI3670_CLK_DIV_CFGBUS 88
+#define HI3670_CLK_DIV_MMC0BUS 89
+#define HI3670_CLK_DIV_MMC1BUS 90
+#define HI3670_PCLK_DIV_MMC1_PCIE 91
+#define HI3670_CLK_DIV_VCODECBUS 92
+#define HI3670_CLK_DIV_SD 93
+#define HI3670_CLK_DIV_SDIO 94
+#define HI3670_CLK_DIV_UARTH 95
+#define HI3670_CLK_DIV_UARTL 96
+#define HI3670_CLK_DIV_UART0 97
+#define HI3670_CLK_DIV_I2C 98
+#define HI3670_CLK_DIV_SPI 99
+#define HI3670_CLK_DIV_PCIEAXI 100
+#define HI3670_CLK_DIV_AO_ASP 101
+#define HI3670_CLK_DIV_CSI_TRANS 102
+#define HI3670_CLK_DIV_DSI_TRANS 103
+#define HI3670_CLK_DIV_PTP 104
+#define HI3670_CLK_DIV_CLKOUT0_PLL 105
+#define HI3670_CLK_DIV_CLKOUT1_PLL 106
+#define HI3670_CLKDIV_DP_AUDIO_PLL_AO 107
+#define HI3670_CLK_DIV_VDEC 108
+#define HI3670_CLK_DIV_VENC 109
+#define HI3670_CLK_ISP_SNCLK_DIV0 110
+#define HI3670_CLK_ISP_SNCLK_DIV1 111
+#define HI3670_CLK_ISP_SNCLK_DIV2 112
+#define HI3670_CLK_DIV_ICS 113
+#define HI3670_PPLL1_EN_ACPU 114
+#define HI3670_PPLL2_EN_ACPU 115
+#define HI3670_PPLL3_EN_ACPU 116
+#define HI3670_PPLL1_GT_CPU 117
+#define HI3670_PPLL2_GT_CPU 118
+#define HI3670_PPLL3_GT_CPU 119
+#define HI3670_CLK_GATE_PPLL2_MEDIA 120
+#define HI3670_CLK_GATE_PPLL3_MEDIA 121
+#define HI3670_CLK_GATE_PPLL4_MEDIA 122
+#define HI3670_CLK_GATE_PPLL6_MEDIA 123
+#define HI3670_CLK_GATE_PPLL7_MEDIA 124
+#define HI3670_PCLK_GPIO0 125
+#define HI3670_PCLK_GPIO1 126
+#define HI3670_PCLK_GPIO2 127
+#define HI3670_PCLK_GPIO3 128
+#define HI3670_PCLK_GPIO4 129
+#define HI3670_PCLK_GPIO5 130
+#define HI3670_PCLK_GPIO6 131
+#define HI3670_PCLK_GPIO7 132
+#define HI3670_PCLK_GPIO8 133
+#define HI3670_PCLK_GPIO9 134
+#define HI3670_PCLK_GPIO10 135
+#define HI3670_PCLK_GPIO11 136
+#define HI3670_PCLK_GPIO12 137
+#define HI3670_PCLK_GPIO13 138
+#define HI3670_PCLK_GPIO14 139
+#define HI3670_PCLK_GPIO15 140
+#define HI3670_PCLK_GPIO16 141
+#define HI3670_PCLK_GPIO17 142
+#define HI3670_PCLK_GPIO20 143
+#define HI3670_PCLK_GPIO21 144
+#define HI3670_PCLK_GATE_DSI0 145
+#define HI3670_PCLK_GATE_DSI1 146
+#define HI3670_HCLK_GATE_USB3OTG 147
+#define HI3670_ACLK_GATE_USB3DVFS 148
+#define HI3670_HCLK_GATE_SDIO 149
+#define HI3670_PCLK_GATE_PCIE_SYS 150
+#define HI3670_PCLK_GATE_PCIE_PHY 151
+#define HI3670_PCLK_GATE_MMC1_PCIE 152
+#define HI3670_PCLK_GATE_MMC0_IOC 153
+#define HI3670_PCLK_GATE_MMC1_IOC 154
+#define HI3670_CLK_GATE_DMAC 155
+#define HI3670_CLK_GATE_VCODECBUS2DDR 156
+#define HI3670_CLK_CCI400_BYPASS 157
+#define HI3670_CLK_GATE_CCI400 158
+#define HI3670_CLK_GATE_SD 159
+#define HI3670_HCLK_GATE_SD 160
+#define HI3670_CLK_GATE_SDIO 161
+#define HI3670_CLK_GATE_A57HPM 162
+#define HI3670_CLK_GATE_A53HPM 163
+#define HI3670_CLK_GATE_PA_A53 164
+#define HI3670_CLK_GATE_PA_A57 165
+#define HI3670_CLK_GATE_PA_G3D 166
+#define HI3670_CLK_GATE_GPUHPM 167
+#define HI3670_CLK_GATE_PERIHPM 168
+#define HI3670_CLK_GATE_AOHPM 169
+#define HI3670_CLK_GATE_UART1 170
+#define HI3670_CLK_GATE_UART4 171
+#define HI3670_PCLK_GATE_UART1 172
+#define HI3670_PCLK_GATE_UART4 173
+#define HI3670_CLK_GATE_UART2 174
+#define HI3670_CLK_GATE_UART5 175
+#define HI3670_PCLK_GATE_UART2 176
+#define HI3670_PCLK_GATE_UART5 177
+#define HI3670_CLK_GATE_UART0 178
+#define HI3670_CLK_GATE_I2C3 179
+#define HI3670_CLK_GATE_I2C4 180
+#define HI3670_CLK_GATE_I2C7 181
+#define HI3670_PCLK_GATE_I2C3 182
+#define HI3670_PCLK_GATE_I2C4 183
+#define HI3670_PCLK_GATE_I2C7 184
+#define HI3670_CLK_GATE_SPI1 185
+#define HI3670_CLK_GATE_SPI4 186
+#define HI3670_PCLK_GATE_SPI1 187
+#define HI3670_PCLK_GATE_SPI4 188
+#define HI3670_CLK_GATE_USB3OTG_REF 189
+#define HI3670_CLK_GATE_USB2PHY_REF 190
+#define HI3670_CLK_GATE_PCIEAUX 191
+#define HI3670_ACLK_GATE_PCIE 192
+#define HI3670_CLK_GATE_MMC1_PCIEAXI 193
+#define HI3670_CLK_GATE_PCIEPHY_REF 194
+#define HI3670_CLK_GATE_PCIE_DEBOUNCE 195
+#define HI3670_CLK_GATE_PCIEIO 196
+#define HI3670_CLK_GATE_PCIE_HP 197
+#define HI3670_CLK_GATE_AO_ASP 198
+#define HI3670_PCLK_GATE_PCTRL 199
+#define HI3670_CLK_CSI_TRANS_GT 200
+#define HI3670_CLK_DSI_TRANS_GT 201
+#define HI3670_CLK_GATE_PWM 202
+#define HI3670_ABB_AUDIO_EN0 203
+#define HI3670_ABB_AUDIO_EN1 204
+#define HI3670_ABB_AUDIO_GT_EN0 205
+#define HI3670_ABB_AUDIO_GT_EN1 206
+#define HI3670_CLK_GATE_DP_AUDIO_PLL_AO 207
+#define HI3670_PERI_VOLT_HOLD 208
+#define HI3670_PERI_VOLT_MIDDLE 209
+#define HI3670_CLK_GATE_ISP_SNCLK0 210
+#define HI3670_CLK_GATE_ISP_SNCLK1 211
+#define HI3670_CLK_GATE_ISP_SNCLK2 212
+#define HI3670_CLK_GATE_RXDPHY0_CFG 213
+#define HI3670_CLK_GATE_RXDPHY1_CFG 214
+#define HI3670_CLK_GATE_RXDPHY2_CFG 215
+#define HI3670_CLK_GATE_TXDPHY0_CFG 216
+#define HI3670_CLK_GATE_TXDPHY0_REF 217
+#define HI3670_CLK_GATE_TXDPHY1_CFG 218
+#define HI3670_CLK_GATE_TXDPHY1_REF 219
+#define HI3670_CLK_GATE_MEDIA_TCXO 220
+
+/* clk in sctrl */
+#define HI3670_CLK_ANDGT_IOPERI 0
+#define HI3670_CLKANDGT_ASP_SUBSYS_PERI 1
+#define HI3670_CLK_ANGT_ASP_SUBSYS 2
+#define HI3670_CLK_MUX_UFS_SUBSYS 3
+#define HI3670_CLK_MUX_CLKOUT0 4
+#define HI3670_CLK_MUX_CLKOUT1 5
+#define HI3670_CLK_MUX_ASP_SUBSYS_PERI 6
+#define HI3670_CLK_MUX_ASP_PLL 7
+#define HI3670_CLK_DIV_AOBUS 8
+#define HI3670_CLK_DIV_UFS_SUBSYS 9
+#define HI3670_CLK_DIV_IOPERI 10
+#define HI3670_CLK_DIV_CLKOUT0_TCXO 11
+#define HI3670_CLK_DIV_CLKOUT1_TCXO 12
+#define HI3670_CLK_ASP_SUBSYS_PERI_DIV 13
+#define HI3670_CLK_DIV_ASP_SUBSYS 14
+#define HI3670_PPLL0_EN_ACPU 15
+#define HI3670_PPLL0_GT_CPU 16
+#define HI3670_CLK_GATE_PPLL0_MEDIA 17
+#define HI3670_PCLK_GPIO18 18
+#define HI3670_PCLK_GPIO19 19
+#define HI3670_CLK_GATE_SPI 20
+#define HI3670_PCLK_GATE_SPI 21
+#define HI3670_CLK_GATE_UFS_SUBSYS 22
+#define HI3670_CLK_GATE_UFSIO_REF 23
+#define HI3670_PCLK_AO_GPIO0 24
+#define HI3670_PCLK_AO_GPIO1 25
+#define HI3670_PCLK_AO_GPIO2 26
+#define HI3670_PCLK_AO_GPIO3 27
+#define HI3670_PCLK_AO_GPIO4 28
+#define HI3670_PCLK_AO_GPIO5 29
+#define HI3670_PCLK_AO_GPIO6 30
+#define HI3670_CLK_GATE_OUT0 31
+#define HI3670_CLK_GATE_OUT1 32
+#define HI3670_PCLK_GATE_SYSCNT 33
+#define HI3670_CLK_GATE_SYSCNT 34
+#define HI3670_CLK_GATE_ASP_SUBSYS_PERI 35
+#define HI3670_CLK_GATE_ASP_SUBSYS 36
+#define HI3670_CLK_GATE_ASP_TCXO 37
+#define HI3670_CLK_GATE_DP_AUDIO_PLL 38
+
+/* clk in pmuctrl */
+#define HI3670_GATE_ABB_192 0
+
+/* clk in pctrl */
+#define HI3670_GATE_UFS_TCXO_EN 0
+#define HI3670_GATE_USB_TCXO_EN 1
+
+/* clk in iomcu */
+#define HI3670_CLK_GATE_I2C0 0
+#define HI3670_CLK_GATE_I2C1 1
+#define HI3670_CLK_GATE_I2C2 2
+#define HI3670_CLK_GATE_SPI0 3
+#define HI3670_CLK_GATE_SPI2 4
+#define HI3670_CLK_GATE_UART3 5
+#define HI3670_CLK_I2C0_GATE_IOMCU 6
+#define HI3670_CLK_I2C1_GATE_IOMCU 7
+#define HI3670_CLK_I2C2_GATE_IOMCU 8
+#define HI3670_CLK_SPI0_GATE_IOMCU 9
+#define HI3670_CLK_SPI2_GATE_IOMCU 10
+#define HI3670_CLK_UART3_GATE_IOMCU 11
+#define HI3670_CLK_GATE_PERI0_IOMCU 12
+
+/* clk in media1 */
+#define HI3670_CLK_GATE_VIVOBUS_ANDGT 0
+#define HI3670_CLK_ANDGT_EDC0 1
+#define HI3670_CLK_ANDGT_LDI0 2
+#define HI3670_CLK_ANDGT_LDI1 3
+#define HI3670_CLK_MMBUF_PLL_ANDGT 4
+#define HI3670_PCLK_MMBUF_ANDGT 5
+#define HI3670_CLK_MUX_VIVOBUS 6
+#define HI3670_CLK_MUX_EDC0 7
+#define HI3670_CLK_MUX_LDI0 8
+#define HI3670_CLK_MUX_LDI1 9
+#define HI3670_CLK_SW_MMBUF 10
+#define HI3670_CLK_DIV_VIVOBUS 11
+#define HI3670_CLK_DIV_EDC0 12
+#define HI3670_CLK_DIV_LDI0 13
+#define HI3670_CLK_DIV_LDI1 14
+#define HI3670_ACLK_DIV_MMBUF 15
+#define HI3670_PCLK_DIV_MMBUF 16
+#define HI3670_ACLK_GATE_NOC_DSS 17
+#define HI3670_PCLK_GATE_NOC_DSS_CFG 18
+#define HI3670_PCLK_GATE_MMBUF_CFG 19
+#define HI3670_PCLK_GATE_DISP_NOC_SUBSYS 20
+#define HI3670_ACLK_GATE_DISP_NOC_SUBSYS 21
+#define HI3670_PCLK_GATE_DSS 22
+#define HI3670_ACLK_GATE_DSS 23
+#define HI3670_CLK_GATE_VIVOBUSFREQ 24
+#define HI3670_CLK_GATE_EDC0 25
+#define HI3670_CLK_GATE_LDI0 26
+#define HI3670_CLK_GATE_LDI1FREQ 27
+#define HI3670_CLK_GATE_BRG 28
+#define HI3670_ACLK_GATE_ASC 29
+#define HI3670_CLK_GATE_DSS_AXI_MM 30
+#define HI3670_CLK_GATE_MMBUF 31
+#define HI3670_PCLK_GATE_MMBUF 32
+#define HI3670_CLK_GATE_ATDIV_VIVO 33
+
+/* clk in media2 */
+#define HI3670_CLK_GATE_VDECFREQ 0
+#define HI3670_CLK_GATE_VENCFREQ 1
+#define HI3670_CLK_GATE_ICSFREQ 2
+
+#endif /* __DT_BINDINGS_CLOCK_HI3670_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 7ad171b8f3bf..87b068f4a998 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -273,6 +273,7 @@
#define IMX6QDL_CLK_MLB_PODF 260
#define IMX6QDL_CLK_EPIT1 261
#define IMX6QDL_CLK_EPIT2 262
-#define IMX6QDL_CLK_END 263
+#define IMX6QDL_CLK_MMDC_P0_IPG 263
+#define IMX6QDL_CLK_END 264
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h
index e14573e293c5..cfbfc39d1878 100644
--- a/include/dt-bindings/clock/imx6sl-clock.h
+++ b/include/dt-bindings/clock/imx6sl-clock.h
@@ -175,6 +175,8 @@
#define IMX6SL_CLK_SSI2_IPG 162
#define IMX6SL_CLK_SSI3_IPG 163
#define IMX6SL_CLK_SPDIF_GCLK 164
-#define IMX6SL_CLK_END 165
+#define IMX6SL_CLK_MMDC_P0_IPG 165
+#define IMX6SL_CLK_MMDC_P1_IPG 166
+#define IMX6SL_CLK_END 167
#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
index 1036475f997d..f446710fe63d 100644
--- a/include/dt-bindings/clock/imx6sll-clock.h
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -203,7 +203,8 @@
#define IMX6SLL_CLK_GPIO4 176
#define IMX6SLL_CLK_GPIO5 177
#define IMX6SLL_CLK_GPIO6 178
+#define IMX6SLL_CLK_MMDC_P1_IPG 179
-#define IMX6SLL_CLK_END 179
+#define IMX6SLL_CLK_END 180
#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */
diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h
index cd2d6c570e86..fb420c734774 100644
--- a/include/dt-bindings/clock/imx6sx-clock.h
+++ b/include/dt-bindings/clock/imx6sx-clock.h
@@ -279,6 +279,7 @@
#define IMX6SX_CLK_LVDS2_OUT 266
#define IMX6SX_CLK_LVDS2_IN 267
#define IMX6SX_CLK_ANACLK2 268
-#define IMX6SX_CLK_CLK_END 269
+#define IMX6SX_CLK_MMDC_P1_IPG 269
+#define IMX6SX_CLK_CLK_END 270
#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index f8e0476a3a0e..f718aac9b9da 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -259,7 +259,8 @@
#define IMX6UL_CLK_GPIO3 246
#define IMX6UL_CLK_GPIO4 247
#define IMX6UL_CLK_GPIO5 248
+#define IMX6UL_CLK_MMDC_P1_IPG 249
-#define IMX6UL_CLK_END 249
+#define IMX6UL_CLK_END 250
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/jz4725b-cgu.h b/include/dt-bindings/clock/jz4725b-cgu.h
new file mode 100644
index 000000000000..460bbeff6ab8
--- /dev/null
+++ b/include/dt-bindings/clock/jz4725b-cgu.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,jz4725b-cgu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__
+
+#define JZ4725B_CLK_EXT 0
+#define JZ4725B_CLK_OSC32K 1
+#define JZ4725B_CLK_PLL 2
+#define JZ4725B_CLK_PLL_HALF 3
+#define JZ4725B_CLK_CCLK 4
+#define JZ4725B_CLK_HCLK 5
+#define JZ4725B_CLK_PCLK 6
+#define JZ4725B_CLK_MCLK 7
+#define JZ4725B_CLK_IPU 8
+#define JZ4725B_CLK_LCD 9
+#define JZ4725B_CLK_I2S 10
+#define JZ4725B_CLK_SPI 11
+#define JZ4725B_CLK_MMC_MUX 12
+#define JZ4725B_CLK_UDC 13
+#define JZ4725B_CLK_UART 14
+#define JZ4725B_CLK_DMA 15
+#define JZ4725B_CLK_ADC 16
+#define JZ4725B_CLK_I2C 17
+#define JZ4725B_CLK_AIC 18
+#define JZ4725B_CLK_MMC0 19
+#define JZ4725B_CLK_MMC1 20
+#define JZ4725B_CLK_BCH 21
+#define JZ4725B_CLK_TCU 22
+#define JZ4725B_CLK_EXT512 23
+#define JZ4725B_CLK_RTC 24
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__ */
diff --git a/include/dt-bindings/clock/maxim,max77686.h b/include/dt-bindings/clock/maxim,max77686.h
index 7b28b0905869..af8261dcace1 100644
--- a/include/dt-bindings/clock/maxim,max77686.h
+++ b/include/dt-bindings/clock/maxim,max77686.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clocks for the Maxim 77686 PMIC.
*/
diff --git a/include/dt-bindings/clock/maxim,max77802.h b/include/dt-bindings/clock/maxim,max77802.h
index 997312edcbb5..51adcbaed697 100644
--- a/include/dt-bindings/clock/maxim,max77802.h
+++ b/include/dt-bindings/clock/maxim,max77802.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clocks for the Maxim 77802 PMIC.
*/
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
new file mode 100644
index 000000000000..4f7a2d2320bf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_CAM_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_CAM_CC_SDM845_H
+
+/* CAM_CC clock registers */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_ATB_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK 6
+#define CAM_CC_CCI_CLK 7
+#define CAM_CC_CCI_CLK_SRC 8
+#define CAM_CC_CPAS_AHB_CLK 9
+#define CAM_CC_CPHY_RX_CLK_SRC 10
+#define CAM_CC_CSI0PHYTIMER_CLK 11
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 12
+#define CAM_CC_CSI1PHYTIMER_CLK 13
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 14
+#define CAM_CC_CSI2PHYTIMER_CLK 15
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 16
+#define CAM_CC_CSI3PHYTIMER_CLK 17
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 18
+#define CAM_CC_CSIPHY0_CLK 19
+#define CAM_CC_CSIPHY1_CLK 20
+#define CAM_CC_CSIPHY2_CLK 21
+#define CAM_CC_CSIPHY3_CLK 22
+#define CAM_CC_FAST_AHB_CLK_SRC 23
+#define CAM_CC_FD_CORE_CLK 24
+#define CAM_CC_FD_CORE_CLK_SRC 25
+#define CAM_CC_FD_CORE_UAR_CLK 26
+#define CAM_CC_ICP_APB_CLK 27
+#define CAM_CC_ICP_ATB_CLK 28
+#define CAM_CC_ICP_CLK 29
+#define CAM_CC_ICP_CLK_SRC 30
+#define CAM_CC_ICP_CTI_CLK 31
+#define CAM_CC_ICP_TS_CLK 32
+#define CAM_CC_IFE_0_AXI_CLK 33
+#define CAM_CC_IFE_0_CLK 34
+#define CAM_CC_IFE_0_CLK_SRC 35
+#define CAM_CC_IFE_0_CPHY_RX_CLK 36
+#define CAM_CC_IFE_0_CSID_CLK 37
+#define CAM_CC_IFE_0_CSID_CLK_SRC 38
+#define CAM_CC_IFE_0_DSP_CLK 39
+#define CAM_CC_IFE_1_AXI_CLK 40
+#define CAM_CC_IFE_1_CLK 41
+#define CAM_CC_IFE_1_CLK_SRC 42
+#define CAM_CC_IFE_1_CPHY_RX_CLK 43
+#define CAM_CC_IFE_1_CSID_CLK 44
+#define CAM_CC_IFE_1_CSID_CLK_SRC 45
+#define CAM_CC_IFE_1_DSP_CLK 46
+#define CAM_CC_IFE_LITE_CLK 47
+#define CAM_CC_IFE_LITE_CLK_SRC 48
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 49
+#define CAM_CC_IFE_LITE_CSID_CLK 50
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 51
+#define CAM_CC_IPE_0_AHB_CLK 52
+#define CAM_CC_IPE_0_AREG_CLK 53
+#define CAM_CC_IPE_0_AXI_CLK 54
+#define CAM_CC_IPE_0_CLK 55
+#define CAM_CC_IPE_0_CLK_SRC 56
+#define CAM_CC_IPE_1_AHB_CLK 57
+#define CAM_CC_IPE_1_AREG_CLK 58
+#define CAM_CC_IPE_1_AXI_CLK 59
+#define CAM_CC_IPE_1_CLK 60
+#define CAM_CC_IPE_1_CLK_SRC 61
+#define CAM_CC_JPEG_CLK 62
+#define CAM_CC_JPEG_CLK_SRC 63
+#define CAM_CC_LRME_CLK 64
+#define CAM_CC_LRME_CLK_SRC 65
+#define CAM_CC_MCLK0_CLK 66
+#define CAM_CC_MCLK0_CLK_SRC 67
+#define CAM_CC_MCLK1_CLK 68
+#define CAM_CC_MCLK1_CLK_SRC 69
+#define CAM_CC_MCLK2_CLK 70
+#define CAM_CC_MCLK2_CLK_SRC 71
+#define CAM_CC_MCLK3_CLK 72
+#define CAM_CC_MCLK3_CLK_SRC 73
+#define CAM_CC_PLL0 74
+#define CAM_CC_PLL0_OUT_EVEN 75
+#define CAM_CC_PLL1 76
+#define CAM_CC_PLL1_OUT_EVEN 77
+#define CAM_CC_PLL2 78
+#define CAM_CC_PLL2_OUT_EVEN 79
+#define CAM_CC_PLL3 80
+#define CAM_CC_PLL3_OUT_EVEN 81
+#define CAM_CC_SLOW_AHB_CLK_SRC 82
+#define CAM_CC_SOC_AHB_CLK 83
+#define CAM_CC_SYS_TMR_CLK 84
+
+/* CAM_CC Resets */
+#define TITAN_CAM_CC_CCI_BCR 0
+#define TITAN_CAM_CC_CPAS_BCR 1
+#define TITAN_CAM_CC_CSI0PHY_BCR 2
+#define TITAN_CAM_CC_CSI1PHY_BCR 3
+#define TITAN_CAM_CC_CSI2PHY_BCR 4
+#define TITAN_CAM_CC_MCLK0_BCR 5
+#define TITAN_CAM_CC_MCLK1_BCR 6
+#define TITAN_CAM_CC_MCLK2_BCR 7
+#define TITAN_CAM_CC_MCLK3_BCR 8
+#define TITAN_CAM_CC_TITAN_TOP_BCR 9
+
+/* CAM_CC GDSCRs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define IPE_1_GDSC 2
+#define IFE_0_GDSC 3
+#define IFE_1_GDSC 4
+#define TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h
index 7d20eedfee98..e02742fc81cc 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8960.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h
@@ -319,5 +319,7 @@
#define CE3_SRC 303
#define CE3_CORE_CLK 304
#define CE3_H_CLK 305
+#define PLL16 306
+#define PLL17 307
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h
index 75b07cf5eed0..db80f2ee571b 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8996.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h
@@ -235,6 +235,15 @@
#define GCC_RX1_USB2_CLKREF_CLK 218
#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK 219
#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 220
+#define GCC_EDP_CLKREF_CLK 221
+#define GCC_MSS_CFG_AHB_CLK 222
+#define GCC_MSS_Q6_BIMC_AXI_CLK 223
+#define GCC_MSS_SNOC_AXI_CLK 224
+#define GCC_MSS_MNOC_BIMC_AXI_CLK 225
+#define GCC_DCC_AHB_CLK 226
+#define GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK 227
+#define GCC_MMSS_GPLL0_DIV_CLK 228
+#define GCC_MSS_GPLL0_DIV_CLK 229
#define GCC_SYSTEM_NOC_BCR 0
#define GCC_CONFIG_NOC_BCR 1
diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h
new file mode 100644
index 000000000000..6ceb55ed72c6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS404_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCS404_H
+
+#define GCC_APSS_AHB_CLK_SRC 0
+#define GCC_BLSP1_QUP0_I2C_APPS_CLK_SRC 1
+#define GCC_BLSP1_QUP0_SPI_APPS_CLK_SRC 2
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 3
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 4
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 5
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 6
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 7
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 8
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 9
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 10
+#define GCC_BLSP1_UART0_APPS_CLK_SRC 11
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 12
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 13
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 14
+#define GCC_BLSP2_QUP0_I2C_APPS_CLK_SRC 15
+#define GCC_BLSP2_QUP0_SPI_APPS_CLK_SRC 16
+#define GCC_BLSP2_UART0_APPS_CLK_SRC 17
+#define GCC_BYTE0_CLK_SRC 18
+#define GCC_EMAC_CLK_SRC 19
+#define GCC_EMAC_PTP_CLK_SRC 20
+#define GCC_ESC0_CLK_SRC 21
+#define GCC_APSS_AHB_CLK 22
+#define GCC_APSS_AXI_CLK 23
+#define GCC_BIMC_APSS_AXI_CLK 24
+#define GCC_BIMC_GFX_CLK 25
+#define GCC_BIMC_MDSS_CLK 26
+#define GCC_BLSP1_AHB_CLK 27
+#define GCC_BLSP1_QUP0_I2C_APPS_CLK 28
+#define GCC_BLSP1_QUP0_SPI_APPS_CLK 29
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 30
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 31
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 32
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 33
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 34
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 35
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 36
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 37
+#define GCC_BLSP1_UART0_APPS_CLK 38
+#define GCC_BLSP1_UART1_APPS_CLK 39
+#define GCC_BLSP1_UART2_APPS_CLK 40
+#define GCC_BLSP1_UART3_APPS_CLK 41
+#define GCC_BLSP2_AHB_CLK 42
+#define GCC_BLSP2_QUP0_I2C_APPS_CLK 43
+#define GCC_BLSP2_QUP0_SPI_APPS_CLK 44
+#define GCC_BLSP2_UART0_APPS_CLK 45
+#define GCC_BOOT_ROM_AHB_CLK 46
+#define GCC_DCC_CLK 47
+#define GCC_GENI_IR_H_CLK 48
+#define GCC_ETH_AXI_CLK 49
+#define GCC_ETH_PTP_CLK 50
+#define GCC_ETH_RGMII_CLK 51
+#define GCC_ETH_SLAVE_AHB_CLK 52
+#define GCC_GENI_IR_S_CLK 53
+#define GCC_GP1_CLK 54
+#define GCC_GP2_CLK 55
+#define GCC_GP3_CLK 56
+#define GCC_MDSS_AHB_CLK 57
+#define GCC_MDSS_AXI_CLK 58
+#define GCC_MDSS_BYTE0_CLK 59
+#define GCC_MDSS_ESC0_CLK 60
+#define GCC_MDSS_HDMI_APP_CLK 61
+#define GCC_MDSS_HDMI_PCLK_CLK 62
+#define GCC_MDSS_MDP_CLK 63
+#define GCC_MDSS_PCLK0_CLK 64
+#define GCC_MDSS_VSYNC_CLK 65
+#define GCC_OXILI_AHB_CLK 66
+#define GCC_OXILI_GFX3D_CLK 67
+#define GCC_PCIE_0_AUX_CLK 68
+#define GCC_PCIE_0_CFG_AHB_CLK 69
+#define GCC_PCIE_0_MSTR_AXI_CLK 70
+#define GCC_PCIE_0_PIPE_CLK 71
+#define GCC_PCIE_0_SLV_AXI_CLK 72
+#define GCC_PCNOC_USB2_CLK 73
+#define GCC_PCNOC_USB3_CLK 74
+#define GCC_PDM2_CLK 75
+#define GCC_PDM_AHB_CLK 76
+#define GCC_VSYNC_CLK_SRC 77
+#define GCC_PRNG_AHB_CLK 78
+#define GCC_PWM0_XO512_CLK 79
+#define GCC_PWM1_XO512_CLK 80
+#define GCC_PWM2_XO512_CLK 81
+#define GCC_SDCC1_AHB_CLK 82
+#define GCC_SDCC1_APPS_CLK 83
+#define GCC_SDCC1_ICE_CORE_CLK 84
+#define GCC_SDCC2_AHB_CLK 85
+#define GCC_SDCC2_APPS_CLK 86
+#define GCC_SYS_NOC_USB3_CLK 87
+#define GCC_USB20_MOCK_UTMI_CLK 88
+#define GCC_USB2A_PHY_SLEEP_CLK 89
+#define GCC_USB30_MASTER_CLK 90
+#define GCC_USB30_MOCK_UTMI_CLK 91
+#define GCC_USB30_SLEEP_CLK 92
+#define GCC_USB3_PHY_AUX_CLK 93
+#define GCC_USB3_PHY_PIPE_CLK 94
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 95
+#define GCC_USB_HS_SYSTEM_CLK 96
+#define GCC_GFX3D_CLK_SRC 97
+#define GCC_GP1_CLK_SRC 98
+#define GCC_GP2_CLK_SRC 99
+#define GCC_GP3_CLK_SRC 100
+#define GCC_GPLL0_OUT_MAIN 101
+#define GCC_GPLL1_OUT_MAIN 102
+#define GCC_GPLL3_OUT_MAIN 103
+#define GCC_GPLL4_OUT_MAIN 104
+#define GCC_HDMI_APP_CLK_SRC 105
+#define GCC_HDMI_PCLK_CLK_SRC 106
+#define GCC_MDP_CLK_SRC 107
+#define GCC_PCIE_0_AUX_CLK_SRC 108
+#define GCC_PCIE_0_PIPE_CLK_SRC 109
+#define GCC_PCLK0_CLK_SRC 110
+#define GCC_PDM2_CLK_SRC 111
+#define GCC_SDCC1_APPS_CLK_SRC 112
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 113
+#define GCC_SDCC2_APPS_CLK_SRC 114
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 115
+#define GCC_USB30_MASTER_CLK_SRC 116
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 117
+#define GCC_USB3_PHY_AUX_CLK_SRC 118
+#define GCC_USB_HS_SYSTEM_CLK_SRC 119
+#define GCC_GPLL0_AO_CLK_SRC 120
+#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 122
+#define GCC_GPLL0_AO_OUT_MAIN 123
+#define GCC_GPLL0_SLEEP_CLK_SRC 124
+#define GCC_GPLL6 125
+#define GCC_GPLL6_OUT_AUX 126
+#define GCC_MDSS_MDP_VOTE_CLK 127
+#define GCC_MDSS_ROTATOR_VOTE_CLK 128
+#define GCC_BIMC_GPU_CLK 129
+#define GCC_GTCU_AHB_CLK 130
+#define GCC_GFX_TCU_CLK 131
+#define GCC_GFX_TBU_CLK 132
+#define GCC_SMMU_CFG_CLK 133
+#define GCC_APSS_TCU_CLK 134
+#define GCC_CRYPTO_AHB_CLK 135
+#define GCC_CRYPTO_AXI_CLK 136
+#define GCC_CRYPTO_CLK 137
+#define GCC_MDP_TBU_CLK 138
+#define GCC_QDSS_DAP_CLK 139
+#define GCC_DCC_XO_CLK 140
+
+#define GCC_GENI_IR_BCR 0
+#define GCC_USB_HS_BCR 1
+#define GCC_USB2_HS_PHY_ONLY_BCR 2
+#define GCC_QUSB2_PHY_BCR 3
+#define GCC_USB_HS_PHY_CFG_AHB_BCR 4
+#define GCC_USB2A_PHY_BCR 5
+#define GCC_USB3_PHY_BCR 6
+#define GCC_USB_30_BCR 7
+#define GCC_USB3PHY_PHY_BCR 8
+#define GCC_PCIE_0_BCR 9
+#define GCC_PCIE_0_PHY_BCR 10
+#define GCC_PCIE_0_LINK_DOWN_BCR 11
+#define GCC_PCIEPHY_0_PHY_BCR 12
+#define GCC_EMAC_BCR 13
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h
new file mode 100644
index 000000000000..468302282913
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, Craig Tatlor.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_660_H
+#define _DT_BINDINGS_CLK_MSM_GCC_660_H
+
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 0
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 1
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 2
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 3
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 4
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 5
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 6
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 7
+#define BLSP1_UART1_APPS_CLK_SRC 8
+#define BLSP1_UART2_APPS_CLK_SRC 9
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 10
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 11
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 12
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 13
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 14
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 15
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 16
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 17
+#define BLSP2_UART1_APPS_CLK_SRC 18
+#define BLSP2_UART2_APPS_CLK_SRC 19
+#define GCC_AGGRE2_UFS_AXI_CLK 20
+#define GCC_AGGRE2_USB3_AXI_CLK 21
+#define GCC_BIMC_GFX_CLK 22
+#define GCC_BIMC_HMSS_AXI_CLK 23
+#define GCC_BIMC_MSS_Q6_AXI_CLK 24
+#define GCC_BLSP1_AHB_CLK 25
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 26
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 27
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 28
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 29
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 30
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 31
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 32
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 33
+#define GCC_BLSP1_UART1_APPS_CLK 34
+#define GCC_BLSP1_UART2_APPS_CLK 35
+#define GCC_BLSP2_AHB_CLK 36
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 37
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 38
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 39
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 40
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 41
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 42
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 43
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 44
+#define GCC_BLSP2_UART1_APPS_CLK 45
+#define GCC_BLSP2_UART2_APPS_CLK 46
+#define GCC_BOOT_ROM_AHB_CLK 47
+#define GCC_CFG_NOC_USB2_AXI_CLK 48
+#define GCC_CFG_NOC_USB3_AXI_CLK 49
+#define GCC_DCC_AHB_CLK 50
+#define GCC_GP1_CLK 51
+#define GCC_GP2_CLK 52
+#define GCC_GP3_CLK 53
+#define GCC_GPU_BIMC_GFX_CLK 54
+#define GCC_GPU_CFG_AHB_CLK 55
+#define GCC_GPU_GPLL0_CLK 56
+#define GCC_GPU_GPLL0_DIV_CLK 57
+#define GCC_HMSS_DVM_BUS_CLK 58
+#define GCC_HMSS_RBCPR_CLK 59
+#define GCC_MMSS_GPLL0_CLK 60
+#define GCC_MMSS_GPLL0_DIV_CLK 61
+#define GCC_MMSS_NOC_CFG_AHB_CLK 62
+#define GCC_MMSS_SYS_NOC_AXI_CLK 63
+#define GCC_MSS_CFG_AHB_CLK 64
+#define GCC_MSS_GPLL0_DIV_CLK 65
+#define GCC_MSS_MNOC_BIMC_AXI_CLK 66
+#define GCC_MSS_Q6_BIMC_AXI_CLK 67
+#define GCC_MSS_SNOC_AXI_CLK 68
+#define GCC_PDM2_CLK 69
+#define GCC_PDM_AHB_CLK 70
+#define GCC_PRNG_AHB_CLK 71
+#define GCC_QSPI_AHB_CLK 72
+#define GCC_QSPI_SER_CLK 73
+#define GCC_SDCC1_AHB_CLK 74
+#define GCC_SDCC1_APPS_CLK 75
+#define GCC_SDCC1_ICE_CORE_CLK 76
+#define GCC_SDCC2_AHB_CLK 77
+#define GCC_SDCC2_APPS_CLK 78
+#define GCC_UFS_AHB_CLK 79
+#define GCC_UFS_AXI_CLK 80
+#define GCC_UFS_CLKREF_CLK 81
+#define GCC_UFS_ICE_CORE_CLK 82
+#define GCC_UFS_PHY_AUX_CLK 83
+#define GCC_UFS_RX_SYMBOL_0_CLK 84
+#define GCC_UFS_RX_SYMBOL_1_CLK 85
+#define GCC_UFS_TX_SYMBOL_0_CLK 86
+#define GCC_UFS_UNIPRO_CORE_CLK 87
+#define GCC_USB20_MASTER_CLK 88
+#define GCC_USB20_MOCK_UTMI_CLK 89
+#define GCC_USB20_SLEEP_CLK 90
+#define GCC_USB30_MASTER_CLK 91
+#define GCC_USB30_MOCK_UTMI_CLK 92
+#define GCC_USB30_SLEEP_CLK 93
+#define GCC_USB3_CLKREF_CLK 94
+#define GCC_USB3_PHY_AUX_CLK 95
+#define GCC_USB3_PHY_PIPE_CLK 96
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 97
+#define GP1_CLK_SRC 98
+#define GP2_CLK_SRC 99
+#define GP3_CLK_SRC 100
+#define GPLL0 101
+#define GPLL0_EARLY 102
+#define GPLL1 103
+#define GPLL1_EARLY 104
+#define GPLL4 105
+#define GPLL4_EARLY 106
+#define HMSS_GPLL0_CLK_SRC 107
+#define HMSS_GPLL4_CLK_SRC 108
+#define HMSS_RBCPR_CLK_SRC 109
+#define PDM2_CLK_SRC 110
+#define QSPI_SER_CLK_SRC 111
+#define SDCC1_APPS_CLK_SRC 112
+#define SDCC1_ICE_CORE_CLK_SRC 113
+#define SDCC2_APPS_CLK_SRC 114
+#define UFS_AXI_CLK_SRC 115
+#define UFS_ICE_CORE_CLK_SRC 116
+#define UFS_PHY_AUX_CLK_SRC 117
+#define UFS_UNIPRO_CORE_CLK_SRC 118
+#define USB20_MASTER_CLK_SRC 119
+#define USB20_MOCK_UTMI_CLK_SRC 120
+#define USB30_MASTER_CLK_SRC 121
+#define USB30_MOCK_UTMI_CLK_SRC 122
+#define USB3_PHY_AUX_CLK_SRC 123
+#define GPLL0_OUT_MSSCC 124
+#define GCC_UFS_AXI_HW_CTL_CLK 125
+#define GCC_UFS_ICE_CORE_HW_CTL_CLK 126
+#define GCC_UFS_PHY_AUX_HW_CTL_CLK 127
+#define GCC_UFS_UNIPRO_CORE_HW_CTL_CLK 128
+#define GCC_RX0_USB2_CLKREF_CLK 129
+#define GCC_RX1_USB2_CLKREF_CLK 130
+
+#define PCIE_0_GDSC 0
+#define UFS_GDSC 1
+#define USB_30_GDSC 2
+
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_UFS_BCR 2
+#define GCC_USB3_DP_PHY_BCR 3
+#define GCC_USB3_PHY_BCR 4
+#define GCC_USB3PHY_PHY_BCR 5
+#define GCC_USB_20_BCR 6
+#define GCC_USB_30_BCR 7
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index f96fc2dbf60e..b8eae5a76503 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -194,6 +194,9 @@
#define GPLL4 184
#define GCC_CPUSS_DVM_BUS_CLK 185
#define GCC_CPUSS_GNOC_CLK 186
+#define GCC_QSPI_CORE_CLK_SRC 187
+#define GCC_QSPI_CORE_CLK 188
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h
index 0dcb3e87d44c..a267ac250143 100644
--- a/include/dt-bindings/clock/r7s72100-clock.h
+++ b/include/dt-bindings/clock/r7s72100-clock.h
@@ -1,10 +1,7 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#ifndef __DT_BINDINGS_CLOCK_R7S72100_H__
diff --git a/include/dt-bindings/clock/r7s9210-cpg-mssr.h b/include/dt-bindings/clock/r7s9210-cpg-mssr.h
new file mode 100644
index 000000000000..b6f85ca149aa
--- /dev/null
+++ b/include/dt-bindings/clock/r7s9210-cpg-mssr.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R7S9210 CPG Core Clocks */
+#define R7S9210_CLK_I 0
+#define R7S9210_CLK_G 1
+#define R7S9210_CLK_B 2
+#define R7S9210_CLK_P1 3
+#define R7S9210_CLK_P1C 4
+#define R7S9210_CLK_P0 5
+
+#endif /* __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7743-cpg-mssr.h b/include/dt-bindings/clock/r8a7743-cpg-mssr.h
index e1d1f3c6a99e..3ba936029d9f 100644
--- a/include/dt-bindings/clock/r8a7743-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7743-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2016 Cogent Embedded Inc.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2016 Cogent Embedded Inc.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7744-cpg-mssr.h b/include/dt-bindings/clock/r8a7744-cpg-mssr.h
new file mode 100644
index 000000000000..2690be0c3e22
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7744-cpg-mssr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7744 CPG Core Clocks */
+#define R8A7744_CLK_Z 0
+#define R8A7744_CLK_ZG 1
+#define R8A7744_CLK_ZTR 2
+#define R8A7744_CLK_ZTRD2 3
+#define R8A7744_CLK_ZT 4
+#define R8A7744_CLK_ZX 5
+#define R8A7744_CLK_ZS 6
+#define R8A7744_CLK_HP 7
+#define R8A7744_CLK_B 9
+#define R8A7744_CLK_LB 10
+#define R8A7744_CLK_P 11
+#define R8A7744_CLK_CL 12
+#define R8A7744_CLK_M2 13
+#define R8A7744_CLK_ZB3 15
+#define R8A7744_CLK_ZB3D2 16
+#define R8A7744_CLK_DDR 17
+#define R8A7744_CLK_SDH 18
+#define R8A7744_CLK_SD0 19
+#define R8A7744_CLK_SD2 20
+#define R8A7744_CLK_SD3 21
+#define R8A7744_CLK_MMC0 22
+#define R8A7744_CLK_MP 23
+#define R8A7744_CLK_QSPI 26
+#define R8A7744_CLK_CP 27
+#define R8A7744_CLK_RCAN 28
+#define R8A7744_CLK_R 29
+#define R8A7744_CLK_OSC 30
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7745-cpg-mssr.h b/include/dt-bindings/clock/r8a7745-cpg-mssr.h
index 56ad6f0c6760..f81066c9d192 100644
--- a/include/dt-bindings/clock/r8a7745-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7745-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2016 Cogent Embedded Inc.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2016 Cogent Embedded Inc.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
new file mode 100644
index 000000000000..9bc5d45ff4b5
--- /dev/null
+++ b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a774a1 CPG Core Clocks */
+#define R8A774A1_CLK_Z 0
+#define R8A774A1_CLK_Z2 1
+#define R8A774A1_CLK_ZG 2
+#define R8A774A1_CLK_ZTR 3
+#define R8A774A1_CLK_ZTRD2 4
+#define R8A774A1_CLK_ZT 5
+#define R8A774A1_CLK_ZX 6
+#define R8A774A1_CLK_S0D1 7
+#define R8A774A1_CLK_S0D2 8
+#define R8A774A1_CLK_S0D3 9
+#define R8A774A1_CLK_S0D4 10
+#define R8A774A1_CLK_S0D6 11
+#define R8A774A1_CLK_S0D8 12
+#define R8A774A1_CLK_S0D12 13
+#define R8A774A1_CLK_S1D2 14
+#define R8A774A1_CLK_S1D4 15
+#define R8A774A1_CLK_S2D1 16
+#define R8A774A1_CLK_S2D2 17
+#define R8A774A1_CLK_S2D4 18
+#define R8A774A1_CLK_S3D1 19
+#define R8A774A1_CLK_S3D2 20
+#define R8A774A1_CLK_S3D4 21
+#define R8A774A1_CLK_LB 22
+#define R8A774A1_CLK_CL 23
+#define R8A774A1_CLK_ZB3 24
+#define R8A774A1_CLK_ZB3D2 25
+#define R8A774A1_CLK_ZB3D4 26
+#define R8A774A1_CLK_CR 27
+#define R8A774A1_CLK_CRD2 28
+#define R8A774A1_CLK_SD0H 29
+#define R8A774A1_CLK_SD0 30
+#define R8A774A1_CLK_SD1H 31
+#define R8A774A1_CLK_SD1 32
+#define R8A774A1_CLK_SD2H 33
+#define R8A774A1_CLK_SD2 34
+#define R8A774A1_CLK_SD3H 35
+#define R8A774A1_CLK_SD3 36
+#define R8A774A1_CLK_RPC 37
+#define R8A774A1_CLK_RPCD2 38
+#define R8A774A1_CLK_MSO 39
+#define R8A774A1_CLK_HDMI 40
+#define R8A774A1_CLK_CSI0 41
+#define R8A774A1_CLK_CP 42
+#define R8A774A1_CLK_CPEX 43
+#define R8A774A1_CLK_R 44
+#define R8A774A1_CLK_OSC 45
+
+#endif /* __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
new file mode 100644
index 000000000000..8fe51b6aca28
--- /dev/null
+++ b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a774c0 CPG Core Clocks */
+#define R8A774C0_CLK_Z2 0
+#define R8A774C0_CLK_ZG 1
+#define R8A774C0_CLK_ZTR 2
+#define R8A774C0_CLK_ZT 3
+#define R8A774C0_CLK_ZX 4
+#define R8A774C0_CLK_S0D1 5
+#define R8A774C0_CLK_S0D3 6
+#define R8A774C0_CLK_S0D6 7
+#define R8A774C0_CLK_S0D12 8
+#define R8A774C0_CLK_S0D24 9
+#define R8A774C0_CLK_S1D1 10
+#define R8A774C0_CLK_S1D2 11
+#define R8A774C0_CLK_S1D4 12
+#define R8A774C0_CLK_S2D1 13
+#define R8A774C0_CLK_S2D2 14
+#define R8A774C0_CLK_S2D4 15
+#define R8A774C0_CLK_S3D1 16
+#define R8A774C0_CLK_S3D2 17
+#define R8A774C0_CLK_S3D4 18
+#define R8A774C0_CLK_S0D6C 19
+#define R8A774C0_CLK_S3D1C 20
+#define R8A774C0_CLK_S3D2C 21
+#define R8A774C0_CLK_S3D4C 22
+#define R8A774C0_CLK_LB 23
+#define R8A774C0_CLK_CL 24
+#define R8A774C0_CLK_ZB3 25
+#define R8A774C0_CLK_ZB3D2 26
+#define R8A774C0_CLK_CR 27
+#define R8A774C0_CLK_CRD2 28
+#define R8A774C0_CLK_SD0H 29
+#define R8A774C0_CLK_SD0 30
+#define R8A774C0_CLK_SD1H 31
+#define R8A774C0_CLK_SD1 32
+#define R8A774C0_CLK_SD3H 33
+#define R8A774C0_CLK_SD3 34
+#define R8A774C0_CLK_RPC 35
+#define R8A774C0_CLK_RPCD2 36
+#define R8A774C0_CLK_ZA2 37
+#define R8A774C0_CLK_ZA8 38
+#define R8A774C0_CLK_Z2D 39
+#define R8A774C0_CLK_MSO 40
+#define R8A774C0_CLK_R 41
+#define R8A774C0_CLK_OSC 42
+#define R8A774C0_CLK_LV0 43
+#define R8A774C0_CLK_LV1 44
+#define R8A774C0_CLK_CSI0 45
+#define R8A774C0_CLK_CP 46
+#define R8A774C0_CLK_CPEX 47
+
+#endif /* __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7790-cpg-mssr.h b/include/dt-bindings/clock/r8a7790-cpg-mssr.h
index 1625b8bf3482..c5955b56b36d 100644
--- a/include/dt-bindings/clock/r8a7790-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7790-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7791-cpg-mssr.h b/include/dt-bindings/clock/r8a7791-cpg-mssr.h
index e8823410c01c..aadd06c566c0 100644
--- a/include/dt-bindings/clock/r8a7791-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7791-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7792-cpg-mssr.h b/include/dt-bindings/clock/r8a7792-cpg-mssr.h
index 72ce85cb2f94..829c44db0271 100644
--- a/include/dt-bindings/clock/r8a7792-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7792-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
index 7318d45d4e7e..49c66d8ed178 100644
--- a/include/dt-bindings/clock/r8a7793-clock.h
+++ b/include/dt-bindings/clock/r8a7793-clock.h
@@ -1,16 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* r8a7793 clock definition
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7793_H__
diff --git a/include/dt-bindings/clock/r8a7793-cpg-mssr.h b/include/dt-bindings/clock/r8a7793-cpg-mssr.h
index 8809b0f62d61..d1ff646c31f2 100644
--- a/include/dt-bindings/clock/r8a7793-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7793-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index 93e99c3ffc8d..649f005782d0 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -1,11 +1,7 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (C) 2014 Renesas Electronics Corporation
* Copyright 2013 Ideas On Board SPRL
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7794_H__
diff --git a/include/dt-bindings/clock/r8a7794-cpg-mssr.h b/include/dt-bindings/clock/r8a7794-cpg-mssr.h
index 9d720311ae3a..6314e23b51af 100644
--- a/include/dt-bindings/clock/r8a7794-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7794-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
index f047eaf261f3..948389641565 100644
--- a/include/dt-bindings/clock/r8a7795-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
index 1e5942695f0d..e6087f2f7e3a 100644
--- a/include/dt-bindings/clock/r8a7796-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2016 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2016 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a77970-cpg-mssr.h b/include/dt-bindings/clock/r8a77970-cpg-mssr.h
index 4146395595b1..6145ebe66361 100644
--- a/include/dt-bindings/clock/r8a77970-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a77970-cpg-mssr.h
@@ -1,11 +1,7 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (C) 2016 Renesas Electronics Corp.
* Copyright (C) 2017 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a77995-cpg-mssr.h b/include/dt-bindings/clock/r8a77995-cpg-mssr.h
index 4e8ae3dee590..1eb11acfa563 100644
--- a/include/dt-bindings/clock/r8a77995-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a77995-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2017 Glider bvba
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2017 Glider bvba
*/
#ifndef __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/renesas-cpg-mssr.h b/include/dt-bindings/clock/renesas-cpg-mssr.h
index 569a3cc33ffb..8169ad063f0a 100644
--- a/include/dt-bindings/clock/renesas-cpg-mssr.h
+++ b/include/dt-bindings/clock/renesas-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index b9462b7d3dfe..dc2101a634be 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -139,8 +139,9 @@
#define HCLK_CIF1 470
#define HCLK_VEPU 471
#define HCLK_VDPU 472
+#define HCLK_HDMI 473
-#define CLK_NR_CLKS (HCLK_VDPU + 1)
+#define CLK_NR_CLKS (HCLK_HDMI + 1)
/* soft-reset indices */
#define SRST_MCORE 2
diff --git a/include/dt-bindings/clock/samsung,s2mps11.h b/include/dt-bindings/clock/samsung,s2mps11.h
index b903d7de27c9..5ece35d429ff 100644
--- a/include/dt-bindings/clock/samsung,s2mps11.h
+++ b/include/dt-bindings/clock/samsung,s2mps11.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Markus Reichl
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clocks for the Samsung S2MPS11 PMIC.
*/
diff --git a/include/dt-bindings/clock/samsung,s3c64xx-clock.h b/include/dt-bindings/clock/samsung,s3c64xx-clock.h
index ad95c7f50090..19d233f37e2f 100644
--- a/include/dt-bindings/clock/samsung,s3c64xx-clock.h
+++ b/include/dt-bindings/clock/samsung,s3c64xx-clock.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Samsung S3C64xx clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H
#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index d66432c6e675..a8ac4cfcdcbc 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -43,6 +43,7 @@
#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_
#define _DT_BINDINGS_CLK_SUN50I_A64_H_
+#define CLK_PLL_VIDEO0 7
#define CLK_PLL_PERIPH0 11
#define CLK_BUS_MIPI_DSI 28
diff --git a/include/dt-bindings/reset/actions,s700-reset.h b/include/dt-bindings/reset/actions,s700-reset.h
new file mode 100644
index 000000000000..5e3b16b8ef53
--- /dev/null
+++ b/include/dt-bindings/reset/actions,s700-reset.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+//
+// Device Tree binding constants for Actions Semi S700 Reset Management Unit
+//
+// Copyright (c) 2018 Linaro Ltd.
+
+#ifndef __DT_BINDINGS_ACTIONS_S700_RESET_H
+#define __DT_BINDINGS_ACTIONS_S700_RESET_H
+
+#define RESET_AUDIO 0
+#define RESET_CSI 1
+#define RESET_DE 2
+#define RESET_DSI 3
+#define RESET_GPIO 4
+#define RESET_I2C0 5
+#define RESET_I2C1 6
+#define RESET_I2C2 7
+#define RESET_I2C3 8
+#define RESET_KEY 9
+#define RESET_LCD0 10
+#define RESET_SI 11
+#define RESET_SPI0 12
+#define RESET_SPI1 13
+#define RESET_SPI2 14
+#define RESET_SPI3 15
+#define RESET_UART0 16
+#define RESET_UART1 17
+#define RESET_UART2 18
+#define RESET_UART3 19
+#define RESET_UART4 20
+#define RESET_UART5 21
+#define RESET_UART6 22
+
+#endif /* __DT_BINDINGS_ACTIONS_S700_RESET_H */
diff --git a/include/dt-bindings/reset/actions,s900-reset.h b/include/dt-bindings/reset/actions,s900-reset.h
new file mode 100644
index 000000000000..42c19d02e43b
--- /dev/null
+++ b/include/dt-bindings/reset/actions,s900-reset.h
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+//
+// Device Tree binding constants for Actions Semi S900 Reset Management Unit
+//
+// Copyright (c) 2018 Linaro Ltd.
+
+#ifndef __DT_BINDINGS_ACTIONS_S900_RESET_H
+#define __DT_BINDINGS_ACTIONS_S900_RESET_H
+
+#define RESET_CHIPID 0
+#define RESET_CPU_SCNT 1
+#define RESET_SRAMI 2
+#define RESET_DDR_CTL_PHY 3
+#define RESET_DMAC 4
+#define RESET_GPIO 5
+#define RESET_BISP_AXI 6
+#define RESET_CSI0 7
+#define RESET_CSI1 8
+#define RESET_DE 9
+#define RESET_DSI 10
+#define RESET_GPU3D_PA 11
+#define RESET_GPU3D_PB 12
+#define RESET_HDE 13
+#define RESET_I2C0 14
+#define RESET_I2C1 15
+#define RESET_I2C2 16
+#define RESET_I2C3 17
+#define RESET_I2C4 18
+#define RESET_I2C5 19
+#define RESET_IMX 20
+#define RESET_NANDC0 21
+#define RESET_NANDC1 22
+#define RESET_SD0 23
+#define RESET_SD1 24
+#define RESET_SD2 25
+#define RESET_SD3 26
+#define RESET_SPI0 27
+#define RESET_SPI1 28
+#define RESET_SPI2 29
+#define RESET_SPI3 30
+#define RESET_UART0 31
+#define RESET_UART1 32
+#define RESET_UART2 33
+#define RESET_UART3 34
+#define RESET_UART4 35
+#define RESET_UART5 36
+#define RESET_UART6 37
+#define RESET_HDMI 38
+#define RESET_LVDS 39
+#define RESET_EDP 40
+#define RESET_USB2HUB 41
+#define RESET_USB2HSIC 42
+#define RESET_USB3 43
+#define RESET_PCM1 44
+#define RESET_AUDIO 45
+#define RESET_PCM0 46
+#define RESET_SE 47
+#define RESET_GIC 48
+#define RESET_DDR_CTL_PHY_AXI 49
+#define RESET_CMU_DDR 50
+#define RESET_DMM 51
+#define RESET_HDCP2TX 52
+#define RESET_ETHERNET 53
+
+#endif /* __DT_BINDINGS_ACTIONS_S900_RESET_H */
diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
index e0a9c2368872..9ce2f0fae57e 100644
--- a/include/keys/asymmetric-subtype.h
+++ b/include/keys/asymmetric-subtype.h
@@ -17,6 +17,8 @@
#include <linux/seq_file.h>
#include <keys/asymmetric-type.h>
+struct kernel_pkey_query;
+struct kernel_pkey_params;
struct public_key_signature;
/*
@@ -34,6 +36,13 @@ struct asymmetric_key_subtype {
/* Destroy a key of this subtype */
void (*destroy)(void *payload_crypto, void *payload_auth);
+ int (*query)(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info);
+
+ /* Encrypt/decrypt/sign data */
+ int (*eds_op)(struct kernel_pkey_params *params,
+ const void *in, void *out);
+
/* Verify the signature on a key of this subtype (optional) */
int (*verify_signature)(const struct key *key,
const struct public_key_signature *sig);
diff --git a/security/keys/trusted.h b/include/keys/trusted.h
index 8d5fe9eafb22..adbcb6817826 100644
--- a/security/keys/trusted.h
+++ b/include/keys/trusted.h
@@ -3,7 +3,7 @@
#define __TRUSTED_KEY_H
/* implementation specific TPM constants */
-#define MAX_BUF_SIZE 512
+#define MAX_BUF_SIZE 1024
#define TPM_GETRANDOM_SIZE 14
#define TPM_OSAP_SIZE 36
#define TPM_OIAP_SIZE 10
@@ -36,6 +36,18 @@ enum {
SRK_keytype = 4
};
+int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+ unsigned int keylen, unsigned char *h1,
+ unsigned char *h2, unsigned char h3, ...);
+int TSS_checkhmac1(unsigned char *buffer,
+ const uint32_t command,
+ const unsigned char *ononce,
+ const unsigned char *key,
+ unsigned int keylen, ...);
+
+int trusted_tpm_send(unsigned char *cmd, size_t buflen);
+int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce);
+
#define TPM_DEBUG 0
#if TPM_DEBUG
diff --git a/include/linux/adxl.h b/include/linux/adxl.h
index 2a629acb4c3f..2d29f55923e3 100644
--- a/include/linux/adxl.h
+++ b/include/linux/adxl.h
@@ -7,7 +7,12 @@
#ifndef _LINUX_ADXL_H
#define _LINUX_ADXL_H
+#ifdef CONFIG_ACPI_ADXL
const char * const *adxl_get_component_names(void);
int adxl_decode(u64 addr, u64 component_values[]);
+#else
+static inline const char * const *adxl_get_component_names(void) { return NULL; }
+static inline int adxl_decode(u64 addr, u64 component_values[]) { return -EOPNOTSUPP; }
+#endif
#endif /* _LINUX_ADXL_H */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 2c9756bd9c4c..b2488055fd1d 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -62,13 +62,19 @@
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
- VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
- VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
};
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
@@ -831,7 +837,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
- return VIRTCHNL_ERR_PARAM;
+ return VIRTCHNL_STATUS_ERR_PARAM;
}
/* few more checks */
if (err_msg_format || valid_len != msglen)
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b47c7f716731..056fb627edb3 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -503,31 +503,23 @@ do { \
disk_devt((bio)->bi_disk)
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-int bio_associate_blkg_from_page(struct bio *bio, struct page *page);
+int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
#else
-static inline int bio_associate_blkg_from_page(struct bio *bio,
- struct page *page) { return 0; }
+static inline int bio_associate_blkcg_from_page(struct bio *bio,
+ struct page *page) { return 0; }
#endif
#ifdef CONFIG_BLK_CGROUP
+int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
-int bio_associate_blkg_from_css(struct bio *bio,
- struct cgroup_subsys_state *css);
-int bio_associate_create_blkg(struct request_queue *q, struct bio *bio);
-int bio_reassociate_blkg(struct request_queue *q, struct bio *bio);
void bio_disassociate_task(struct bio *bio);
-void bio_clone_blkg_association(struct bio *dst, struct bio *src);
+void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
-static inline int bio_associate_blkg_from_css(struct bio *bio,
- struct cgroup_subsys_state *css)
-{ return 0; }
-static inline int bio_associate_create_blkg(struct request_queue *q,
- struct bio *bio) { return 0; }
-static inline int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
-{ return 0; }
+static inline int bio_associate_blkcg(struct bio *bio,
+ struct cgroup_subsys_state *blkcg_css) { return 0; }
static inline void bio_disassociate_task(struct bio *bio) { }
-static inline void bio_clone_blkg_association(struct bio *dst,
- struct bio *src) { }
+static inline void bio_clone_blkcg_association(struct bio *dst,
+ struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index acf5e8df3504..f58e97446abc 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -28,8 +28,8 @@
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
- * Note that nbits should be always a compile time evaluable constant.
- * Otherwise many inlines will generate horrible code.
+ * The generated code is more efficient when nbits is known at
+ * compile-time and at most BITS_PER_LONG.
*
* ::
*
@@ -204,38 +204,31 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+/*
+ * The static inlines below do not handle constant nbits==0 correctly,
+ * so make such users (should any ever turn up) call the out-of-line
+ * versions.
+ */
#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
- if (small_const_nbits(nbits))
- *dst = 0UL;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
- }
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
}
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- if (small_const_nbits(nbits))
- *dst = ~0UL;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0xff, len);
- }
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0xff, len);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
- if (small_const_nbits(nbits))
- *dst = *src;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memcpy(dst, src, len);
- }
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memcpy(dst, src, len);
}
/*
@@ -398,7 +391,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
}
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, int nbits)
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 7ddb1349394d..705f7c442691 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -236,33 +236,33 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
#ifdef __KERNEL__
#ifndef set_mask_bits
-#define set_mask_bits(ptr, _mask, _bits) \
+#define set_mask_bits(ptr, mask, bits) \
({ \
- const typeof(*ptr) mask = (_mask), bits = (_bits); \
- typeof(*ptr) old, new; \
+ const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
+ typeof(*(ptr)) old__, new__; \
\
do { \
- old = READ_ONCE(*ptr); \
- new = (old & ~mask) | bits; \
- } while (cmpxchg(ptr, old, new) != old); \
+ old__ = READ_ONCE(*(ptr)); \
+ new__ = (old__ & ~mask__) | bits__; \
+ } while (cmpxchg(ptr, old__, new__) != old__); \
\
- new; \
+ new__; \
})
#endif
#ifndef bit_clear_unless
-#define bit_clear_unless(ptr, _clear, _test) \
+#define bit_clear_unless(ptr, clear, test) \
({ \
- const typeof(*ptr) clear = (_clear), test = (_test); \
- typeof(*ptr) old, new; \
+ const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
+ typeof(*(ptr)) old__, new__; \
\
do { \
- old = READ_ONCE(*ptr); \
- new = old & ~clear; \
- } while (!(old & test) && \
- cmpxchg(ptr, old, new) != old); \
+ old__ = READ_ONCE(*(ptr)); \
+ new__ = old__ & ~clear__; \
+ } while (!(old__ & test__) && \
+ cmpxchg(ptr, old__, new__) != old__); \
\
- !(old & test); \
+ !(old__ & test__); \
})
#endif
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 1e76ceebeb5d..6d766a19f2bb 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -126,7 +126,7 @@ struct blkcg_gq {
struct request_list rl;
/* reference count */
- struct percpu_ref refcnt;
+ atomic_t refcnt;
/* is this blkg online? protected by both blkcg and q locks */
bool online;
@@ -184,8 +184,6 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
-struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -232,59 +230,22 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx);
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
-/**
- * blkcg_css - find the current css
- *
- * Find the css associated with either the kthread or the current task.
- * This may return a dying css, so it is up to the caller to use tryget logic
- * to confirm it is alive and well.
- */
-static inline struct cgroup_subsys_state *blkcg_css(void)
-{
- struct cgroup_subsys_state *css;
-
- css = kthread_blkcg();
- if (css)
- return css;
- return task_css(current, io_cgrp_id);
-}
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct blkcg, css) : NULL;
}
-/**
- * __bio_blkcg - internal version of bio_blkcg for bfq and cfq
- *
- * DO NOT USE.
- * There is a flaw using this version of the function. In particular, this was
- * used in a broken paradigm where association was called on the given css. It
- * is possible though that the returned css from task_css() is in the process
- * of dying due to migration of the current task. So it is improper to assume
- * *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will
- * take additional work to handle more gracefully.
- */
-static inline struct blkcg *__bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_blkg)
- return bio->bi_blkg->blkcg;
- return css_to_blkcg(blkcg_css());
-}
-
-/**
- * bio_blkcg - grab the blkcg associated with a bio
- * @bio: target bio
- *
- * This returns the blkcg associated with a bio, NULL if not associated.
- * Callers are expected to either handle NULL or know association has been
- * done prior to calling this.
- */
static inline struct blkcg *bio_blkcg(struct bio *bio)
{
- if (bio && bio->bi_blkg)
- return bio->bi_blkg->blkcg;
- return NULL;
+ struct cgroup_subsys_state *css;
+
+ if (bio && bio->bi_css)
+ return css_to_blkcg(bio->bi_css);
+ css = kthread_blkcg();
+ if (css)
+ return css_to_blkcg(css);
+ return css_to_blkcg(task_css(current, io_cgrp_id));
}
static inline bool blk_cgroup_congested(void)
@@ -490,35 +451,26 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
*/
static inline void blkg_get(struct blkcg_gq *blkg)
{
- percpu_ref_get(&blkg->refcnt);
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ atomic_inc(&blkg->refcnt);
}
/**
- * blkg_tryget - try and get a blkg reference
+ * blkg_try_get - try and get a blkg reference
* @blkg: blkg to get
*
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
* of freeing this blkg, so we can only use it if the refcnt is not zero.
*/
-static inline bool blkg_tryget(struct blkcg_gq *blkg)
+static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
{
- return percpu_ref_tryget(&blkg->refcnt);
+ if (atomic_inc_not_zero(&blkg->refcnt))
+ return blkg;
+ return NULL;
}
-/**
- * blkg_tryget_closest - try and get a blkg ref on the closet blkg
- * @blkg: blkg to get
- *
- * This walks up the blkg tree to find the closest non-dying blkg and returns
- * the blkg that it did association with as it may not be the passed in blkg.
- */
-static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
-{
- while (!percpu_ref_tryget(&blkg->refcnt))
- blkg = blkg->parent;
- return blkg;
-}
+void __blkg_release_rcu(struct rcu_head *rcu);
/**
* blkg_put - put a blkg reference
@@ -526,7 +478,9 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
*/
static inline void blkg_put(struct blkcg_gq *blkg)
{
- percpu_ref_put(&blkg->refcnt);
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ if (atomic_dec_and_test(&blkg->refcnt))
+ call_rcu(&blkg->rcu_head, __blkg_release_rcu);
}
/**
@@ -579,36 +533,25 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
rcu_read_lock();
- if (bio && bio->bi_blkg) {
- blkcg = bio->bi_blkg->blkcg;
- if (blkcg == &blkcg_root)
- goto rl_use_root;
-
- blkg_get(bio->bi_blkg);
- rcu_read_unlock();
- return &bio->bi_blkg->rl;
- }
+ blkcg = bio_blkcg(bio);
- blkcg = css_to_blkcg(blkcg_css());
+ /* bypass blkg lookup and use @q->root_rl directly for root */
if (blkcg == &blkcg_root)
- goto rl_use_root;
+ goto root_rl;
+ /*
+ * Try to use blkg->rl. blkg lookup may fail under memory pressure
+ * or if either the blkcg or queue is going away. Fall back to
+ * root_rl in such cases.
+ */
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg))
- blkg = __blkg_lookup_create(blkcg, q);
-
- if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg))
- goto rl_use_root;
+ goto root_rl;
+ blkg_get(blkg);
rcu_read_unlock();
return &blkg->rl;
-
- /*
- * Each blkg has its own request_list, however, the root blkcg
- * uses the request_queue's root_rl. This is to avoid most
- * overhead for the root blkcg.
- */
-rl_use_root:
+root_rl:
rcu_read_unlock();
return &q->root_rl;
}
@@ -854,26 +797,32 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
struct bio *bio) { return false; }
#endif
-
-static inline void blkcg_bio_issue_init(struct bio *bio)
-{
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-}
-
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio)
{
+ struct blkcg *blkcg;
struct blkcg_gq *blkg;
bool throtl = false;
rcu_read_lock();
+ blkcg = bio_blkcg(bio);
+
+ /* associate blkcg if bio hasn't attached one */
+ bio_associate_blkcg(bio, &blkcg->css);
- bio_associate_create_blkg(q, bio);
- blkg = bio->bi_blkg;
+ blkg = blkg_lookup(blkcg, q);
+ if (unlikely(!blkg)) {
+ spin_lock_irq(q->queue_lock);
+ blkg = blkg_lookup_create(blkcg, q);
+ if (IS_ERR(blkg))
+ blkg = NULL;
+ spin_unlock_irq(q->queue_lock);
+ }
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
+ blkg = blkg ?: q->root_blkg;
/*
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
* is a split bio and we would have already accounted for the
@@ -885,8 +834,6 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
- blkcg_bio_issue_init(bio);
-
rcu_read_unlock();
return !throtl;
}
@@ -983,7 +930,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { }
-static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
@@ -999,7 +945,6 @@ static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
-static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 093a818c5b68..1dcf652ba0aa 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -178,6 +178,7 @@ struct bio {
* release. Read comment on top of bio_associate_current().
*/
struct io_context *bi_ioc;
+ struct cgroup_subsys_state *bi_css;
struct blkcg_gq *bi_blkg;
struct bio_issue bi_issue;
#endif
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
deleted file mode 100644
index 42515195d7d8..000000000000
--- a/include/linux/bootmem.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
- */
-#ifndef _LINUX_BOOTMEM_H
-#define _LINUX_BOOTMEM_H
-
-#include <linux/mmzone.h>
-#include <linux/mm_types.h>
-#include <asm/dma.h>
-#include <asm/processor.h>
-
-/*
- * simple boot-time physical memory area allocator.
- */
-
-extern unsigned long max_low_pfn;
-extern unsigned long min_low_pfn;
-
-/*
- * highest page
- */
-extern unsigned long max_pfn;
-/*
- * highest possible page
- */
-extern unsigned long long max_possible_pfn;
-
-#ifndef CONFIG_NO_BOOTMEM
-/**
- * struct bootmem_data - per-node information used by the bootmem allocator
- * @node_min_pfn: the starting physical address of the node's memory
- * @node_low_pfn: the end physical address of the directly addressable memory
- * @node_bootmem_map: is a bitmap pointer - the bits represent all physical
- * memory pages (including holes) on the node.
- * @last_end_off: the offset within the page of the end of the last allocation;
- * if 0, the page used is full
- * @hint_idx: the PFN of the page used with the last allocation;
- * together with using this with the @last_end_offset field,
- * a test can be made to see if allocations can be merged
- * with the page used for the last allocation rather than
- * using up a full new page.
- * @list: list entry in the linked list ordered by the memory addresses
- */
-typedef struct bootmem_data {
- unsigned long node_min_pfn;
- unsigned long node_low_pfn;
- void *node_bootmem_map;
- unsigned long last_end_off;
- unsigned long hint_idx;
- struct list_head list;
-} bootmem_data_t;
-
-extern bootmem_data_t bootmem_node_data[];
-#endif
-
-extern unsigned long bootmem_bootmap_pages(unsigned long);
-
-extern unsigned long init_bootmem_node(pg_data_t *pgdat,
- unsigned long freepfn,
- unsigned long startpfn,
- unsigned long endpfn);
-extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
-
-extern unsigned long free_all_bootmem(void);
-extern void reset_node_managed_pages(pg_data_t *pgdat);
-extern void reset_all_zones_managed_pages(void);
-
-extern void free_bootmem_node(pg_data_t *pgdat,
- unsigned long addr,
- unsigned long size);
-extern void free_bootmem(unsigned long physaddr, unsigned long size);
-extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
-
-/*
- * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
- * the architecture-specific code should honor this).
- *
- * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success).
- * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory
- * already was reserved.
- */
-#define BOOTMEM_DEFAULT 0
-#define BOOTMEM_EXCLUSIVE (1<<0)
-
-extern int reserve_bootmem(unsigned long addr,
- unsigned long size,
- int flags);
-extern int reserve_bootmem_node(pg_data_t *pgdat,
- unsigned long physaddr,
- unsigned long size,
- int flags);
-
-extern void *__alloc_bootmem(unsigned long size,
- unsigned long align,
- unsigned long goal);
-extern void *__alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_node_high(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit) __malloc;
-extern void *__alloc_bootmem_low(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_low_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-
-#ifdef CONFIG_NO_BOOTMEM
-/* We are using top down, so it is safe to use 0 here */
-#define BOOTMEM_LOW_LIMIT 0
-#else
-#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
-#endif
-
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
-
-#define alloc_bootmem(x) \
- __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_align(x, align) \
- __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_nopanic(x) \
- __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages(x) \
- __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_nopanic(x) \
- __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-
-#define alloc_bootmem_low(x) \
- __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
-#define alloc_bootmem_low_pages_nopanic(x) \
- __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem_low(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages_node(pgdat, x) \
- __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
-
-
-#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM)
-
-/* FIXME: use MEMBLOCK_ALLOC_* variants here */
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
-
-/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
-void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr,
- phys_addr_t max_addr, int nid);
-void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr,
- phys_addr_t max_addr, int nid);
-void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid);
-void __memblock_free_early(phys_addr_t base, phys_addr_t size);
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
-
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_raw(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- nid);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
-#else
-
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-
-
-/* Fall back to all the existing bootmem APIs */
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_raw(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low_nopanic(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return __alloc_bootmem_nopanic(size, align, min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
- SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return __alloc_bootmem_node_high(NODE_DATA(nid), size, align,
- min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid_raw(
- phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
- min_addr, max_addr);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid_nopanic(
- phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
- min_addr, max_addr);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- free_bootmem_node(NODE_DATA(nid), base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem_late(base, size);
-}
-#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
-
-extern void *alloc_large_system_hash(const char *tablename,
- unsigned long bucketsize,
- unsigned long numentries,
- int scale,
- int flags,
- unsigned int *_hash_shift,
- unsigned int *_hash_mask,
- unsigned long low_limit,
- unsigned long high_limit);
-
-#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
- * shift passed via *_hash_shift */
-#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
-
-/* Only NUMA needs hash distribution. 64bit NUMA architectures have
- * sufficient vmalloc space.
- */
-#ifdef CONFIG_NUMA
-#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
-#else
-#define hashdist (0)
-#endif
-
-
-#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 9e8056ec20fa..d93e89761a8b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -51,6 +51,9 @@ struct bpf_reg_state {
* PTR_TO_MAP_VALUE_OR_NULL
*/
struct bpf_map *map_ptr;
+
+ /* Max size from any of the above. */
+ unsigned long raw;
};
/* Fixed part of pointer offset, pointer types only */
s32 off;
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 49c93b9308d7..68bb09c29ce8 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -81,7 +81,13 @@ struct ceph_options {
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
-#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
+
+/*
+ * Handle the largest possible rbd object in one message.
+ * There is no limit on the size of cephfs objects, but it has to obey
+ * rsize and wsize mount options anyway.
+ */
+#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
#define CEPH_AUTH_NAME_DEFAULT "guest"
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index fc2b4491ee0a..800a2128d411 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -82,22 +82,6 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
};
-static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
-{
- switch (type) {
- case CEPH_MSG_DATA_NONE:
- case CEPH_MSG_DATA_PAGES:
- case CEPH_MSG_DATA_PAGELIST:
-#ifdef CONFIG_BLOCK
- case CEPH_MSG_DATA_BIO:
-#endif /* CONFIG_BLOCK */
- case CEPH_MSG_DATA_BVECS:
- return true;
- default:
- return false;
- }
-}
-
#ifdef CONFIG_BLOCK
struct ceph_bio_iter {
@@ -181,7 +165,6 @@ struct ceph_bvec_iter {
} while (0)
struct ceph_msg_data {
- struct list_head links; /* ceph_msg->data */
enum ceph_msg_data_type type;
union {
#ifdef CONFIG_BLOCK
@@ -202,7 +185,6 @@ struct ceph_msg_data {
struct ceph_msg_data_cursor {
size_t total_resid; /* across all data items */
- struct list_head *data_head; /* = &ceph_msg->data */
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
@@ -240,7 +222,9 @@ struct ceph_msg {
struct ceph_buffer *middle;
size_t data_length;
- struct list_head data;
+ struct ceph_msg_data *data;
+ int num_data_items;
+ int max_data_items;
struct ceph_msg_data_cursor cursor;
struct ceph_connection *con;
@@ -381,6 +365,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
+struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
+ gfp_t flags, bool can_fail);
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
bool can_fail);
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h
index 76c98a512758..729cdf700eae 100644
--- a/include/linux/ceph/msgpool.h
+++ b/include/linux/ceph/msgpool.h
@@ -13,14 +13,15 @@ struct ceph_msgpool {
mempool_t *pool;
int type; /* preallocated message type */
int front_len; /* preallocated payload size */
+ int max_data_items;
};
-extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
- int front_len, int size, bool blocking,
- const char *name);
+int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
+ int front_len, int max_data_items, int size,
+ const char *name);
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
-extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
- int front_len);
+struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
+ int max_data_items);
extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
#endif
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 02096da01845..7a2af5034278 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -136,6 +136,13 @@ struct ceph_osd_req_op {
u64 expected_object_size;
u64 expected_write_size;
} alloc_hint;
+ struct {
+ u64 snapid;
+ u64 src_version;
+ u8 flags;
+ u32 src_fadvise_flags;
+ struct ceph_osd_data osd_data;
+ } copy_from;
};
};
@@ -444,9 +451,8 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-extern int osd_req_op_cls_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode,
- const char *class, const char *method);
+int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
+ const char *class, const char *method);
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode);
@@ -511,6 +517,16 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
struct timespec64 *mtime,
struct page **pages, int nr_pages);
+int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ struct ceph_object_id *dst_oid,
+ struct ceph_object_locator *dst_oloc,
+ u32 dst_fadvise_flags,
+ u8 copy_from_flags);
+
/* watch/notify */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index d0223364349f..5dead8486fd8 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -23,16 +23,7 @@ struct ceph_pagelist_cursor {
size_t room; /* room remaining to reset to */
};
-static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
-{
- INIT_LIST_HEAD(&pl->head);
- pl->mapped_tail = NULL;
- pl->length = 0;
- pl->room = 0;
- INIT_LIST_HEAD(&pl->free_list);
- pl->num_pages_free = 0;
- refcount_set(&pl->refcnt, 1);
-}
+struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags);
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index f1988387c5ad..3eb0e55665b4 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -410,6 +410,14 @@ enum {
enum {
CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */
+ CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */
+ CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in
+ the near future */
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed
+ in the near future */
+ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only
+ once by this client */
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
@@ -432,6 +440,15 @@ enum {
};
enum {
+ CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */
+ CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */
+ CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */
+ CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to
+ * cloneid */
+ CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */
+};
+
+enum {
CEPH_OSD_WATCH_OP_UNWATCH = 0,
CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1,
/* note: use only ODD ids to prevent pre-giant code from
@@ -497,6 +514,17 @@ struct ceph_osd_op {
__le64 expected_object_size;
__le64 expected_write_size;
} __attribute__ ((packed)) alloc_hint;
+ struct {
+ __le64 snapid;
+ __le64 src_version;
+ __u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */
+ /*
+ * CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags
+ * for src object, flags for dest object are in
+ * ceph_osd_op::flags.
+ */
+ __le32 src_fadvise_flags;
+ } __attribute__ ((packed)) copy_from;
};
__le32 payload_len;
} __attribute__ ((packed));
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 9968332cceed..9d12757a65b0 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -93,8 +93,6 @@ extern struct css_set init_css_set;
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
-struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
- struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 08b1aa70a38d..60c51871b04b 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -119,6 +119,11 @@ struct clk_duty {
* Called with enable_lock held. This function must not
* sleep.
*
+ * @save_context: Save the context of the clock in prepration for poweroff.
+ *
+ * @restore_context: Restore the context of the clock after a restoration
+ * of power.
+ *
* @recalc_rate Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
* ensure that the prepare_mutex is held across this call.
@@ -223,6 +228,8 @@ struct clk_ops {
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
void (*disable_unused)(struct clk_hw *hw);
+ int (*save_context)(struct clk_hw *hw);
+ void (*restore_context)(struct clk_hw *hw);
unsigned long (*recalc_rate)(struct clk_hw *hw,
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
@@ -1011,5 +1018,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
#endif /* platform dependent I/O accessors */
+void clk_gate_restore_context(struct clk_hw *hw);
+
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 4f750c481b82..a7773b5c0b9f 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -312,7 +312,26 @@ struct clk *clk_get(struct device *dev, const char *id);
*/
int __must_check clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
-
+/**
+ * clk_bulk_get_all - lookup and obtain all available references to clock
+ * producer.
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * This helper function allows drivers to get all clk consumers in one
+ * operation. If any of the clk cannot be acquired then any clks
+ * that were obtained will be freed before returning to the caller.
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_bulk_get should not be called from within interrupt context.
+ */
+int __must_check clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks);
/**
* devm_clk_bulk_get - managed get multiple clk consumers
* @dev: device for clock "consumer"
@@ -327,6 +346,22 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
*/
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
+/**
+ * devm_clk_bulk_get_all - managed get multiple clk consumers
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * This helper function allows drivers to get several clk
+ * consumers in one operation with management, the clks will
+ * automatically be freed when the device is unbound.
+ */
+
+int __must_check devm_clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks);
/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
@@ -488,6 +523,19 @@ void clk_put(struct clk *clk);
void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
/**
+ * clk_bulk_put_all - "free" all the clock source
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Note: drivers must ensure that all clk_bulk_enable calls made on this
+ * clock source are balanced by clk_bulk_disable calls prior to calling
+ * this function.
+ *
+ * clk_bulk_put_all should not be called from within interrupt context.
+ */
+void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
+
+/**
* devm_clk_put - "free" a managed clock source
* @dev: device used to acquire the clock
* @clk: clock source acquired with devm_clk_get()
@@ -629,6 +677,23 @@ struct clk *clk_get_parent(struct clk *clk);
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
+/**
+ * clk_save_context - save clock context for poweroff
+ *
+ * Saves the context of the clock register for powerstates in which the
+ * contents of the registers will be lost. Occurs deep within the suspend
+ * code so locking is not necessary.
+ */
+int clk_save_context(void);
+
+/**
+ * clk_restore_context - restore clock context after poweroff
+ *
+ * This occurs with all clocks enabled. Occurs deep within the resume code
+ * so locking is not necessary.
+ */
+void clk_restore_context(void);
+
#else /* !CONFIG_HAVE_CLK */
static inline struct clk *clk_get(struct device *dev, const char *id)
@@ -642,6 +707,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
return 0;
}
+static inline int __must_check clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+ return 0;
+}
+
static inline struct clk *devm_clk_get(struct device *dev, const char *id)
{
return NULL;
@@ -653,6 +724,13 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk
return 0;
}
+static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+
+ return 0;
+}
+
static inline struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
@@ -663,6 +741,8 @@ static inline void clk_put(struct clk *clk) {}
static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
+static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
+
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
@@ -728,6 +808,14 @@ static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
return NULL;
}
+
+static inline int clk_save_context(void)
+{
+ return 0;
+}
+
+static inline void clk_restore_context(void) {}
+
#endif
/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index 9ebf1f8243bb..0ebbe2f0b45e 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -1,14 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright 2013 Ideas On Board SPRL
* Copyright 2013, 2014 Horms Solutions Ltd.
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Contact: Simon Horman <horms@verge.net.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_CLK_RENESAS_H_
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index a8faa38b1ed6..eacc5df57b99 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -159,6 +159,7 @@ struct clk_hw_omap {
const char *clkdm_name;
struct clockdomain *clkdm;
const struct clk_hw_omap_ops *ops;
+ u32 context;
};
/*
@@ -290,9 +291,15 @@ struct ti_clk_features {
#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
#define TI_CLK_ERRATA_I810 BIT(3)
+#define TI_CLK_CLKCTRL_COMPAT BIT(4)
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
+int omap3_noncore_dpll_save_context(struct clk_hw *hw);
+void omap3_noncore_dpll_restore_context(struct clk_hw *hw);
+
+int omap3_core_dpll_save_context(struct clk_hw *hw);
+void omap3_core_dpll_restore_context(struct clk_hw *hw);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index d30e4dbd4be2..06e77473f175 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -488,8 +488,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+ /* fall through */
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+ /* fall through */
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+ /* fall through */
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
}
return copy_to_user(compat, &v, size) ? -EFAULT : 0;
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index b1ce500fe8b3..3e7dafb3ea80 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -21,8 +21,6 @@
#define __SANITIZE_ADDRESS__
#endif
-#define __no_sanitize_address __attribute__((no_sanitize("address")))
-
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
@@ -41,6 +39,3 @@
* compilers, like ICC.
*/
#define barrier() __asm__ __volatile__("" : : : "memory")
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
-#define __assume_aligned(a, ...) \
- __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 90ddfefb6c2b..c0f5db3a9621 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -68,31 +68,20 @@
*/
#define uninitialized_var(x) x = x
-#ifdef __CHECKER__
-#define __must_be_array(a) 0
-#else
-/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
-#endif
-
#ifdef RETPOLINE
-#define __noretpoline __attribute__((indirect_branch("keep")))
+#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-#define __optimize(level) __attribute__((__optimize__(level)))
-
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-#ifndef __CHECKER__
-#define __compiletime_warning(message) __attribute__((warning(message)))
-#define __compiletime_error(message) __attribute__((error(message)))
+#define __compiletime_warning(message) __attribute__((__warning__(message)))
+#define __compiletime_error(message) __attribute__((__error__(message)))
-#ifdef LATENT_ENTROPY_PLUGIN
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
-#endif /* __CHECKER__ */
/*
* calling noreturn functions, __builtin_unreachable() and __builtin_trap()
@@ -107,10 +96,6 @@
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() \
do { \
@@ -119,9 +104,6 @@
__builtin_unreachable(); \
} while (0)
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
-
#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
#define __randomize_layout __attribute__((randomize_layout))
#define __no_randomize_layout __attribute__((no_randomize_layout))
@@ -131,32 +113,6 @@
#endif
/*
- * When used with Link Time Optimization, gcc can optimize away C functions or
- * variables which are referenced only from assembly code. __visible tells the
- * optimizer that something else uses this function or variable, thus preventing
- * this.
- */
-#define __visible __attribute__((externally_visible))
-
-/* gcc version specific checks */
-
-#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
-/*
- * __assume_aligned(n, k): Tell the optimizer that the returned
- * pointer can be assumed to be k modulo n. The second argument is
- * optional (default 0), so we use a variadic macro to make the
- * shorthand.
- *
- * Beware: Do not apply this to functions which may return
- * ERR_PTRs. Also, it is probably unwise to apply it to functions
- * returning extra information in the low bits (but in that case the
- * compiler should see some alignment anyway, when the return value is
- * massaged by 'flags = ptr & 3; ptr &= ~3;').
- */
-#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
-#endif
-
-/*
* GCC 'asm goto' miscompiles certain code sequences:
*
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
@@ -187,39 +143,22 @@
#define KASAN_ABI_VERSION 3
#endif
-#if GCC_VERSION >= 40902
/*
- * Tell the compiler that address safety instrumentation (KASAN)
- * should not be applied to that function.
- * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * Because __no_sanitize_address conflicts with inlining:
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * we do one or the other.
*/
-#define __no_sanitize_address __attribute__((no_sanitize_address))
#ifdef CONFIG_KASAN
#define __no_sanitize_address_or_inline \
__no_sanitize_address __maybe_unused notrace
#else
#define __no_sanitize_address_or_inline inline
#endif
-#endif
#if GCC_VERSION >= 50100
-/*
- * Mark structures as requiring designated initializers.
- * https://gcc.gnu.org/onlinedocs/gcc/Designated-Inits.html
- */
-#define __designated_init __attribute__((designated_init))
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
-#if !defined(__noclone)
-#define __noclone /* not needed */
-#endif
-
-#if !defined(__no_sanitize_address)
-#define __no_sanitize_address
-#define __no_sanitize_address_or_inline inline
-#endif
-
/*
* Turn individual warnings and errors on and off locally, depending
* on version.
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 4c7f9befa9f6..517bd14e1222 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -29,17 +29,8 @@
*/
#define OPTIMIZER_HIDE_VAR(var) barrier()
-/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
-#define __must_be_array(a) 0
-
#endif
/* icc has this, but it's called _bswap16 */
#define __HAVE_BUILTIN_BSWAP16__
#define __builtin_bswap16 _bswap16
-
-/* The following are for compatibility with GCC, from compiler-gcc.h,
- * and may be redefined here because they should not be shared with other
- * compilers, like clang.
- */
-#define __visible __attribute__((externally_visible))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 4170fcee5adb..18c80cfa4fc4 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -23,8 +23,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __branch_check__(x, expect, is_constant) ({ \
long ______r; \
static struct ftrace_likely_data \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_annotated_branch"))) \
+ __aligned(4) \
+ __section("_ftrace_annotated_branch") \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
@@ -59,8 +59,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
({ \
int ______r; \
static struct ftrace_branch_data \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_branch"))) \
+ __aligned(4) \
+ __section("_ftrace_branch") \
______f = { \
.func = __func__, \
.file = __FILE__, \
@@ -115,7 +115,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define ASM_UNREACHABLE
#endif
#ifndef unreachable
-# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
+# define unreachable() do { \
+ annotate_unreachable(); \
+ __builtin_unreachable(); \
+} while (0)
#endif
/*
@@ -137,7 +140,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
extern typeof(sym) sym; \
static const unsigned long __kentry_##sym \
__used \
- __attribute__((section("___kentry" "+" #sym ), used)) \
+ __section("___kentry" "+" #sym ) \
= (unsigned long)&sym;
#endif
@@ -278,7 +281,7 @@ unsigned long read_word_at_a_time(const void *addr)
* visible to the compiler.
*/
#define __ADDRESSABLE(sym) \
- static void * __attribute__((section(".discard.addressable"), used)) \
+ static void * __section(".discard.addressable") __used \
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
/**
@@ -331,10 +334,6 @@ static inline void *offset_to_ptr(const int *off)
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
-#ifndef __optimize
-# define __optimize(level)
-#endif
-
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
@@ -376,4 +375,7 @@ static inline void *offset_to_ptr(const int *off)
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
new file mode 100644
index 000000000000..6b28c1b7310c
--- /dev/null
+++ b/include/linux/compiler_attributes.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_ATTRIBUTES_H
+#define __LINUX_COMPILER_ATTRIBUTES_H
+
+/*
+ * The attributes in this file are unconditionally defined and they directly
+ * map to compiler attribute(s) -- except those that are optional.
+ *
+ * Any other "attributes" (i.e. those that depend on a configuration option,
+ * on a compiler, on an architecture, on plugins, on other attributes...)
+ * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ *
+ * This file is meant to be sorted (by actual attribute name,
+ * not by #define identifier). Use the __attribute__((__name__)) syntax
+ * (i.e. with underscores) to avoid future collisions with other macros.
+ * If an attribute is optional, state the reason in the comment.
+ */
+
+/*
+ * To check for optional attributes, we use __has_attribute, which is supported
+ * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support
+ * 4.6 <= gcc < 5, we implement __has_attribute by hand.
+ *
+ * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
+ * depending on the compiler used to build it; however, these attributes have
+ * no semantic effects for sparse, so it does not matter. Also note that,
+ * in order to avoid sparse's warnings, even the unsupported ones must be
+ * defined to 0.
+ */
+#ifndef __has_attribute
+# define __has_attribute(x) __GCC4_has_attribute_##x
+# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
+# define __GCC4_has_attribute___designated_init__ 0
+# define __GCC4_has_attribute___externally_visible__ 1
+# define __GCC4_has_attribute___noclone__ 1
+# define __GCC4_has_attribute___optimize__ 1
+# define __GCC4_has_attribute___nonstring__ 0
+# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
+ */
+#define __alias(symbol) __attribute__((__alias__(#symbol)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-aligned-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-aligned-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-aligned-variable-attribute
+ */
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __aligned_largest __attribute__((__aligned__))
+
+/*
+ * Note: users of __always_inline currently do not write "inline" themselves,
+ * which seems to be required by gcc to apply the attribute according
+ * to its docs (and also "warning: always_inline function might not be
+ * inlinable [-Wattributes]" is emitted).
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-always_005finline-function-attribute
+ * clang: mentioned
+ */
+#define __always_inline inline __attribute__((__always_inline__))
+
+/*
+ * The second argument is optional (default 0), so we use a variadic macro
+ * to make the shorthand.
+ *
+ * Beware: Do not apply this to functions which may return
+ * ERR_PTRs. Also, it is probably unwise to apply it to functions
+ * returning extra information in the low bits (but in that case the
+ * compiler should see some alignment anyway, when the return value is
+ * massaged by 'flags = ptr & 3; ptr &= ~3;').
+ *
+ * Optional: only supported since gcc >= 4.9
+ * Optional: not supported by icc
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned
+ */
+#if __has_attribute(__assume_aligned__)
+# define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
+#else
+# define __assume_aligned(a, ...)
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+ */
+#define __cold __attribute__((__cold__))
+
+/*
+ * Note the long name.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
+ */
+#define __attribute_const__ __attribute__((__const__))
+
+/*
+ * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
+ * attribute warnings entirely and for good") for more information.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-deprecated-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-deprecated-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-deprecated-variable-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Enumerator-Attributes.html#index-deprecated-enumerator-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#deprecated
+ */
+#define __deprecated
+
+/*
+ * Optional: only supported since gcc >= 5.1
+ * Optional: not supported by clang
+ * Optional: not supported by icc
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute
+ */
+#if __has_attribute(__designated_init__)
+# define __designated_init __attribute__((__designated_init__))
+#else
+# define __designated_init
+#endif
+
+/*
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
+ */
+#if __has_attribute(__externally_visible__)
+# define __visible __attribute__((__externally_visible__))
+#else
+# define __visible
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-format-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#format
+ */
+#define __printf(a, b) __attribute__((__format__(printf, a, b)))
+#define __scanf(a, b) __attribute__((__format__(scanf, a, b)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-gnu_005finline-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#gnu-inline
+ */
+#define __gnu_inline __attribute__((__gnu_inline__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ */
+#define __malloc __attribute__((__malloc__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-mode-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-mode-variable-attribute
+ */
+#define __mode(x) __attribute__((__mode__(x)))
+
+/*
+ * Optional: not supported by clang
+ * Note: icc does not recognize gcc's no-tracer
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-optimize-function-attribute
+ */
+#if __has_attribute(__noclone__)
+# if __has_attribute(__optimize__)
+# define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
+# else
+# define __noclone __attribute__((__noclone__))
+# endif
+#else
+# define __noclone
+#endif
+
+/*
+ * Note the missing underscores.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
+ * clang: mentioned
+ */
+#define noinline __attribute__((__noinline__))
+
+/*
+ * Optional: only supported since gcc >= 8
+ * Optional: not supported by clang
+ * Optional: not supported by icc
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute
+ */
+#if __has_attribute(__nonstring__)
+# define __nonstring __attribute__((__nonstring__))
+#else
+# define __nonstring
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#id1
+ */
+#define __noreturn __attribute__((__noreturn__))
+
+/*
+ * Optional: only supported since gcc >= 4.8
+ * Optional: not supported by icc
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fsanitize_005faddress-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-sanitize-address-no-address-safety-analysis
+ */
+#if __has_attribute(__no_sanitize_address__)
+# define __no_sanitize_address __attribute__((__no_sanitize_address__))
+#else
+# define __no_sanitize_address
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
+ * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
+ */
+#define __packed __attribute__((__packed__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
+ */
+#define __pure __attribute__((__pure__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-section-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate
+ */
+#define __section(S) __attribute__((__section__(#S)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-unused-label-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#maybe-unused-unused
+ */
+#define __always_unused __attribute__((__unused__))
+#define __maybe_unused __attribute__((__unused__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-used-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-used-variable-attribute
+ */
+#define __used __attribute__((__used__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
+ */
+#define __weak __attribute__((__weak__))
+
+#endif /* __LINUX_COMPILER_ATTRIBUTES_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 97cfe29b3f0a..3439d7d0249a 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
@@ -54,6 +55,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#ifdef __KERNEL__
+/* Attributes */
+#include <linux/compiler_attributes.h>
+
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
@@ -78,12 +82,6 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#include <asm/compiler.h>
#endif
-/*
- * Generic compiler-independent macros required for kernel
- * build go below this comment. Actual compiler/compiler version
- * specific implementations come from the above header files
- */
-
struct ftrace_branch_data {
const char *func;
const char *file;
@@ -106,10 +104,6 @@ struct ftrace_likely_data {
unsigned long constant;
};
-/* Don't. Just don't. */
-#define __deprecated
-#define __deprecated_for_modules
-
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
@@ -119,10 +113,6 @@ struct ftrace_likely_data {
* compilers. We don't consider that to be an error, so set them to nothing.
* For example, some of them are for compiler specific plugins.
*/
-#ifndef __designated_init
-# define __designated_init
-#endif
-
#ifndef __latent_entropy
# define __latent_entropy
#endif
@@ -140,17 +130,6 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
-#ifndef __visible
-#define __visible
-#endif
-
-/*
- * Assume alignment of return value.
- */
-#ifndef __assume_aligned
-#define __assume_aligned(a, ...)
-#endif
-
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
@@ -159,14 +138,6 @@ struct ftrace_likely_data {
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-#ifndef __attribute_const__
-#define __attribute_const__ __attribute__((__const__))
-#endif
-
-#ifndef __noclone
-#define __noclone
-#endif
-
/* Helpers for emitting diagnostics in pragmas. */
#ifndef __diag
#define __diag(string)
@@ -186,43 +157,16 @@ struct ftrace_likely_data {
#define __diag_error(compiler, version, option, comment) \
__diag_ ## compiler(version, error, option)
-/*
- * From the GCC manual:
- *
- * Many functions have no effects except the return value and their
- * return value depends only on the parameters and/or global
- * variables. Such a function can be subject to common subexpression
- * elimination and loop optimization just as an arithmetic operator
- * would be.
- * [...]
- */
-#define __pure __attribute__((pure))
-#define __aligned(x) __attribute__((aligned(x)))
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
-#define __maybe_unused __attribute__((unused))
-#define __always_unused __attribute__((unused))
-#define __mode(x) __attribute__((mode(x)))
-#define __malloc __attribute__((__malloc__))
-#define __used __attribute__((__used__))
-#define __noreturn __attribute__((noreturn))
-#define __packed __attribute__((packed))
-#define __weak __attribute__((weak))
-#define __alias(symbol) __attribute__((alias(#symbol)))
-#define __cold __attribute__((cold))
-#define __section(S) __attribute__((__section__(#S)))
-
-
#ifdef CONFIG_ENABLE_MUST_CHECK
-#define __must_check __attribute__((warn_unused_result))
+#define __must_check __attribute__((__warn_unused_result__))
#else
#define __must_check
#endif
-#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
+#if defined(CC_USING_HOTPATCH)
#define notrace __attribute__((hotpatch(0, 0)))
#else
-#define notrace __attribute__((no_instrument_function))
+#define notrace __attribute__((__no_instrument_function__))
#endif
/*
@@ -231,23 +175,11 @@ struct ftrace_likely_data {
* stack and frame pointer being set up and there is no chance to
* restore the lr register to the value before mcount was called.
*/
-#define __naked __attribute__((naked)) notrace
+#define __naked __attribute__((__naked__)) notrace
#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
/*
- * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
- * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
- * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
- * defined so the gnu89 semantics are the default.
- */
-#ifdef __GNUC_STDC_INLINE__
-# define __gnu_inline __attribute__((gnu_inline))
-#else
-# define __gnu_inline
-#endif
-
-/*
* Force always-inline if the user requests it so via the .config.
* GCC does not warn about unused static inline functions for
* -Wunused-function. This turns out to avoid the need for complex #ifdef
@@ -258,22 +190,20 @@ struct ftrace_likely_data {
* semantics rather than c99. This prevents multiple symbol definition errors
* of extern inline functions at link time.
* A lot of inline functions can cause havoc with function tracing.
+ * Do not use __always_inline here, since currently it expands to inline again
+ * (which would break users of __always_inline).
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING)
-#define inline \
- inline __attribute__((always_inline, unused)) notrace __gnu_inline
+#define inline inline __attribute__((__always_inline__)) __gnu_inline \
+ __maybe_unused notrace
#else
-#define inline inline __attribute__((unused)) notrace __gnu_inline
+#define inline inline __gnu_inline \
+ __maybe_unused notrace
#endif
#define __inline__ inline
-#define __inline inline
-#define noinline __attribute__((noinline))
-
-#ifndef __always_inline
-#define __always_inline inline __attribute__((always_inline))
-#endif
+#define __inline inline
/*
* Rather then using noinline to prevent stack consumption, use
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8252df30b9a1..c95c0807471f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1752,6 +1752,25 @@ struct block_device_operations;
#define NOMMU_VMFLAGS \
(NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
+/*
+ * These flags control the behavior of the remap_file_range function pointer.
+ * If it is called with len == 0 that means "remap to end of source file".
+ * See Documentation/filesystems/vfs.txt for more details about this call.
+ *
+ * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate)
+ * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request
+ */
+#define REMAP_FILE_DEDUP (1 << 0)
+#define REMAP_FILE_CAN_SHORTEN (1 << 1)
+
+/*
+ * These flags signal that the caller is ok with altering various aspects of
+ * the behavior of the remap operation. The changes must be made by the
+ * implementation; the vfs remap helper functions can take advantage of them.
+ * Flags in this category exist to preserve the quirky behavior of the hoisted
+ * btrfs clone/dedupe ioctls.
+ */
+#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
struct iov_iter;
@@ -1790,10 +1809,9 @@ struct file_operations {
#endif
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
loff_t, size_t, unsigned int);
- int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t,
- u64);
- int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
- u64);
+ loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
} __randomize_layout;
@@ -1856,21 +1874,21 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
- struct inode *inode_out, loff_t pos_out,
- u64 *len, bool is_dedupe);
-extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len);
-extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len);
-extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
- struct inode *dest, loff_t destoff,
- loff_t len, bool *is_same);
+extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *count,
+ unsigned int remap_flags);
+extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
+extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
extern int vfs_dedupe_file_range(struct file *file,
struct file_dedupe_range *same);
-extern int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
- struct file *dst_file, loff_t dst_pos,
- u64 len);
+extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
+ struct file *dst_file, loff_t dst_pos,
+ loff_t len, unsigned int remap_flags);
struct super_operations {
@@ -2998,6 +3016,9 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
+extern int generic_remap_checks(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *count, unsigned int remap_flags);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 24bcc5eec6b4..76f8db0b0e71 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
+ int node);
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
- alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index dde947083d4e..c6fb869a81c0 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -11,7 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * Authors: Jérôme Glisse <jglisse@redhat.com>
+ * Authors: Jérôme Glisse <jglisse@redhat.com>
*/
/*
* Heterogeneous Memory Management (HMM)
@@ -274,14 +274,29 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
struct hmm_mirror;
/*
- * enum hmm_update_type - type of update
+ * enum hmm_update_event - type of update
* @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
*/
-enum hmm_update_type {
+enum hmm_update_event {
HMM_UPDATE_INVALIDATE,
};
/*
+ * struct hmm_update - HMM update informations for callback
+ *
+ * @start: virtual start address of the range to update
+ * @end: virtual end address of the range to update
+ * @event: event triggering the update (what is happening)
+ * @blockable: can the callback block/sleep ?
+ */
+struct hmm_update {
+ unsigned long start;
+ unsigned long end;
+ enum hmm_update_event event;
+ bool blockable;
+};
+
+/*
* struct hmm_mirror_ops - HMM mirror device operations callback
*
* @update: callback to update range on a device
@@ -300,9 +315,9 @@ struct hmm_mirror_ops {
/* sync_cpu_device_pagetables() - synchronize page tables
*
* @mirror: pointer to struct hmm_mirror
- * @update_type: type of update that occurred to the CPU page table
- * @start: virtual start address of the range to update
- * @end: virtual end address of the range to update
+ * @update: update informations (see struct hmm_update)
+ * Returns: -EAGAIN if update.blockable false and callback need to
+ * block, 0 otherwise.
*
* This callback ultimately originates from mmu_notifiers when the CPU
* page table is updated. The device driver must update its page table
@@ -313,10 +328,8 @@ struct hmm_mirror_ops {
* page tables are completely updated (TLBs flushed, etc); this is a
* synchronous call.
*/
- void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
- enum hmm_update_type update_type,
- unsigned long start,
- unsigned long end);
+ int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
+ const struct hmm_update *update);
};
/*
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index c759d1cbcedd..a64f21a97369 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -37,7 +37,9 @@ struct in_device {
unsigned long mr_v1_seen;
unsigned long mr_v2_seen;
unsigned long mr_maxdelay;
- unsigned char mr_qrv;
+ unsigned long mr_qi; /* Query Interval */
+ unsigned long mr_qri; /* Query Response Interval */
+ unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
unsigned char mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 05d8fb5a06c4..bc9af551fc83 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -17,6 +17,9 @@
#ifdef CONFIG_KEYS
+struct kernel_pkey_query;
+struct kernel_pkey_params;
+
/*
* key under-construction record
* - passed to the request_key actor if supplied
@@ -155,6 +158,14 @@ struct key_type {
*/
struct key_restriction *(*lookup_restriction)(const char *params);
+ /* Asymmetric key accessor functions. */
+ int (*asym_query)(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info);
+ int (*asym_eds_op)(struct kernel_pkey_params *params,
+ const void *in, void *out);
+ int (*asym_verify_signature)(struct kernel_pkey_params *params,
+ const void *in, const void *in2);
+
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
new file mode 100644
index 000000000000..c7c48c79ce0e
--- /dev/null
+++ b/include/linux/keyctl.h
@@ -0,0 +1,46 @@
+/* keyctl kernel bits
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef __LINUX_KEYCTL_H
+#define __LINUX_KEYCTL_H
+
+#include <uapi/linux/keyctl.h>
+
+struct kernel_pkey_query {
+ __u32 supported_ops; /* Which ops are supported */
+ __u32 key_size; /* Size of the key in bits */
+ __u16 max_data_size; /* Maximum size of raw data to sign in bytes */
+ __u16 max_sig_size; /* Maximum size of signature in bytes */
+ __u16 max_enc_size; /* Maximum size of encrypted blob in bytes */
+ __u16 max_dec_size; /* Maximum size of decrypted blob in bytes */
+};
+
+enum kernel_pkey_operation {
+ kernel_pkey_encrypt,
+ kernel_pkey_decrypt,
+ kernel_pkey_sign,
+ kernel_pkey_verify,
+};
+
+struct kernel_pkey_params {
+ struct key *key;
+ const char *encoding; /* Encoding (eg. "oaep" or "raw" for none) */
+ const char *hash_algo; /* Digest algorithm used (eg. "sha1") or NULL if N/A */
+ char *info; /* Modified info string to be released later */
+ __u32 in_len; /* Input data size */
+ union {
+ __u32 out_len; /* Output buffer size (enc/dec/sign) */
+ __u32 in2_len; /* 2nd input data size (verify) */
+ };
+ enum kernel_pkey_operation op : 8;
+};
+
+#endif /* __LINUX_KEYCTL_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 2acdd046df2d..aee299a6aa76 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,7 +2,6 @@
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__
-#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Logical memory blocks.
*
@@ -16,6 +15,19 @@
#include <linux/init.h>
#include <linux/mm.h>
+#include <asm/dma.h>
+
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
+
+/*
+ * highest page
+ */
+extern unsigned long max_pfn;
+/*
+ * highest possible page
+ */
+extern unsigned long long max_possible_pfn;
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_PHYSMEM_REGIONS 4
@@ -120,6 +132,10 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
enum memblock_flags choose_memblock_flags(void);
+unsigned long memblock_free_all(void);
+void reset_node_managed_pages(pg_data_t *pgdat);
+void reset_all_zones_managed_pages(void);
+
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
@@ -301,10 +317,116 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
-phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+/* Flags for memblock allocation APIs */
+#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
+#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+
+/* We are using top down, so it is safe to use 0 here */
+#define MEMBLOCK_LOW_LIMIT 0
+
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+
+phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align);
+
+void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+
+static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_raw(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_from(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t min_addr)
+{
+ return memblock_alloc_try_nid(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_low(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
+}
+static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t min_addr)
+{
+ return memblock_alloc_try_nid_nopanic(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_node(phys_addr_t size,
+ phys_addr_t align, int nid)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
+
+static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
+ int nid)
+{
+ return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
+ MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
+
+static inline void __init memblock_free_early(phys_addr_t base,
+ phys_addr_t size)
+{
+ __memblock_free_early(base, size);
+}
+
+static inline void __init memblock_free_early_nid(phys_addr_t base,
+ phys_addr_t size, int nid)
+{
+ __memblock_free_early(base, size);
+}
-phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
+{
+ __memblock_free_late(base, size);
+}
/*
* Set the allocation direction to bottom-up or top-down.
@@ -324,10 +446,6 @@ static inline bool memblock_bottom_up(void)
return memblock.bottom_up;
}
-/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
-#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
-#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
enum memblock_flags flags);
@@ -433,6 +551,31 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
i < memblock_type->cnt; \
i++, rgn = &memblock_type->regions[i])
+extern void *alloc_large_system_hash(const char *tablename,
+ unsigned long bucketsize,
+ unsigned long numentries,
+ int scale,
+ int flags,
+ unsigned int *_hash_shift,
+ unsigned int *_hash_mask,
+ unsigned long low_limit,
+ unsigned long high_limit);
+
+#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
+#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
+ * shift passed via *_hash_shift */
+#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
+
+/* Only NUMA needs hash distribution. 64bit NUMA architectures have
+ * sufficient vmalloc space.
+ */
+#ifdef CONFIG_NUMA
+#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
+#else
+#define hashdist (0)
+#endif
+
#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
#else
@@ -440,12 +583,6 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
{
}
#endif
-#else
-static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 34a28227068d..ffd9cd10fcf3 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -301,6 +301,7 @@ extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern void remove_memory(int nid, u64 start, u64 size);
+extern void __remove_memory(int nid, u64 start, u64 size);
#else
static inline bool is_mem_section_removable(unsigned long pfn,
@@ -317,11 +318,13 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
}
static inline void remove_memory(int nid, u64 start, u64 size) {}
+static inline void __remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
extern void __ref free_area_init_core_hotplug(int nid);
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
+extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource, bool online);
extern int arch_add_memory(int nid, u64 start, u64 size,
@@ -330,7 +333,6 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
-extern void remove_memory(int nid, u64 start, u64 size);
extern int sparse_add_one_section(struct pglist_data *pgdat,
unsigned long start_pfn, struct vmem_altmap *altmap);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5228c62af416..bac395f1d00a 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 20949dde35cd..e44e3ec8a9c7 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -36,7 +36,7 @@
* I2C requires 1 additional byte for requests.
* I2C requires 2 additional bytes for responses.
* SPI requires up to 32 additional bytes for responses.
- * */
+ */
#define EC_PROTO_VERSION_UNKNOWN 0
#define EC_MAX_REQUEST_OVERHEAD 1
#define EC_MAX_RESPONSE_OVERHEAD 32
@@ -58,13 +58,14 @@ enum {
EC_MAX_MSG_BYTES = 64 * 1024,
};
-/*
- * @version: Command version number (often 0)
- * @command: Command to send (EC_CMD_...)
- * @outsize: Outgoing length in bytes
- * @insize: Max number of bytes to accept from EC
- * @result: EC's response to the command (separate from communication failure)
- * @data: Where to put the incoming data from EC and outgoing data to EC
+/**
+ * struct cros_ec_command - Information about a ChromeOS EC command.
+ * @version: Command version number (often 0).
+ * @command: Command to send (EC_CMD_...).
+ * @outsize: Outgoing length in bytes.
+ * @insize: Max number of bytes to accept from the EC.
+ * @result: EC's response to the command (separate from communication failure).
+ * @data: Where to put the incoming data from EC and outgoing data to EC.
*/
struct cros_ec_command {
uint32_t version;
@@ -76,48 +77,55 @@ struct cros_ec_command {
};
/**
- * struct cros_ec_device - Information about a ChromeOS EC device
- *
- * @phys_name: name of physical comms layer (e.g. 'i2c-4')
+ * struct cros_ec_device - Information about a ChromeOS EC device.
+ * @phys_name: Name of physical comms layer (e.g. 'i2c-4').
* @dev: Device pointer for physical comms device
- * @was_wake_device: true if this device was set to wake the system from
- * sleep at the last suspend
- * @cmd_readmem: direct read of the EC memory-mapped region, if supported
- * @offset is within EC_LPC_ADDR_MEMMAP region.
- * @bytes: number of bytes to read. zero means "read a string" (including
- * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read.
- * Caller must ensure that the buffer is large enough for the result when
- * reading a string.
- *
- * @priv: Private data
- * @irq: Interrupt to use
- * @id: Device id
- * @din: input buffer (for data from EC)
- * @dout: output buffer (for data to EC)
- * \note
- * These two buffers will always be dword-aligned and include enough
- * space for up to 7 word-alignment bytes also, so we can ensure that
- * the body of the message is always dword-aligned (64-bit).
- * We use this alignment to keep ARM and x86 happy. Probably word
- * alignment would be OK, there might be a small performance advantage
- * to using dword.
- * @din_size: size of din buffer to allocate (zero to use static din)
- * @dout_size: size of dout buffer to allocate (zero to use static dout)
- * @wake_enabled: true if this device can wake the system from sleep
- * @suspended: true if this device had been suspended
- * @cmd_xfer: send command to EC and get response
- * Returns the number of bytes received if the communication succeeded, but
- * that doesn't mean the EC was happy with the command. The caller
- * should check msg.result for the EC's result code.
- * @pkt_xfer: send packet to EC and get response
- * @lock: one transaction at a time
- * @mkbp_event_supported: true if this EC supports the MKBP event protocol.
- * @event_notifier: interrupt event notifier for transport devices.
- * @event_data: raw payload transferred with the MKBP event.
- * @event_size: size in bytes of the event data.
+ * @was_wake_device: True if this device was set to wake the system from
+ * sleep at the last suspend.
+ * @cros_class: The class structure for this device.
+ * @cmd_readmem: Direct read of the EC memory-mapped region, if supported.
+ * @offset: Is within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: Number of bytes to read. zero means "read a string" (including
+ * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be
+ * read. Caller must ensure that the buffer is large enough for the
+ * result when reading a string.
+ * @max_request: Max size of message requested.
+ * @max_response: Max size of message response.
+ * @max_passthru: Max sice of passthru message.
+ * @proto_version: The protocol version used for this device.
+ * @priv: Private data.
+ * @irq: Interrupt to use.
+ * @id: Device id.
+ * @din: Input buffer (for data from EC). This buffer will always be
+ * dword-aligned and include enough space for up to 7 word-alignment
+ * bytes also, so we can ensure that the body of the message is always
+ * dword-aligned (64-bit). We use this alignment to keep ARM and x86
+ * happy. Probably word alignment would be OK, there might be a small
+ * performance advantage to using dword.
+ * @dout: Output buffer (for data to EC). This buffer will always be
+ * dword-aligned and include enough space for up to 7 word-alignment
+ * bytes also, so we can ensure that the body of the message is always
+ * dword-aligned (64-bit). We use this alignment to keep ARM and x86
+ * happy. Probably word alignment would be OK, there might be a small
+ * performance advantage to using dword.
+ * @din_size: Size of din buffer to allocate (zero to use static din).
+ * @dout_size: Size of dout buffer to allocate (zero to use static dout).
+ * @wake_enabled: True if this device can wake the system from sleep.
+ * @suspended: True if this device had been suspended.
+ * @cmd_xfer: Send command to EC and get response.
+ * Returns the number of bytes received if the communication
+ * succeeded, but that doesn't mean the EC was happy with the
+ * command. The caller should check msg.result for the EC's result
+ * code.
+ * @pkt_xfer: Send packet to EC and get response.
+ * @lock: One transaction at a time.
+ * @mkbp_event_supported: True if this EC supports the MKBP event protocol.
+ * @event_notifier: Interrupt event notifier for transport devices.
+ * @event_data: Raw payload transferred with the MKBP event.
+ * @event_size: Size in bytes of the event data.
+ * @host_event_wake_mask: Mask of host events that cause wake from suspend.
*/
struct cros_ec_device {
-
/* These are used by other drivers that want to talk to the EC */
const char *phys_name;
struct device *dev;
@@ -153,20 +161,19 @@ struct cros_ec_device {
};
/**
- * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information
- *
+ * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
* @sensor_num: Id of the sensor, as reported by the EC.
*/
struct cros_ec_sensor_platform {
u8 sensor_num;
};
-/* struct cros_ec_platform - ChromeOS EC platform information
- *
- * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
- * used in /dev/ and sysfs.
- * @cmd_offset: offset to apply for each command. Set when
- * registering a devicde behind another one.
+/**
+ * struct cros_ec_platform - ChromeOS EC platform information.
+ * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
+ * used in /dev/ and sysfs.
+ * @cmd_offset: Offset to apply for each command. Set when
+ * registering a device behind another one.
*/
struct cros_ec_platform {
const char *ec_name;
@@ -175,16 +182,16 @@ struct cros_ec_platform {
struct cros_ec_debugfs;
-/*
- * struct cros_ec_dev - ChromeOS EC device entry point
- *
- * @class_dev: Device structure used in sysfs
- * @cdev: Character device structure in /dev
- * @ec_dev: cros_ec_device structure to talk to the physical device
- * @dev: pointer to the platform device
- * @debug_info: cros_ec_debugfs structure for debugging information
- * @has_kb_wake_angle: true if at least 2 accelerometer are connected to the EC.
- * @cmd_offset: offset to apply for each command.
+/**
+ * struct cros_ec_dev - ChromeOS EC device entry point.
+ * @class_dev: Device structure used in sysfs.
+ * @cdev: Character device structure in /dev.
+ * @ec_dev: cros_ec_device structure to talk to the physical device.
+ * @dev: Pointer to the platform device.
+ * @debug_info: cros_ec_debugfs structure for debugging information.
+ * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
+ * @cmd_offset: Offset to apply for each command.
+ * @features: Features supported by the EC.
*/
struct cros_ec_dev {
struct device class_dev;
@@ -200,124 +207,129 @@ struct cros_ec_dev {
#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
/**
- * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
+ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+ * @ec_dev: Device to suspend.
*
* This can be called by drivers to handle a suspend event.
*
- * ec_dev: Device to suspend
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_suspend(struct cros_ec_device *ec_dev);
/**
- * cros_ec_resume - Handle a resume operation for the ChromeOS EC device
+ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+ * @ec_dev: Device to resume.
*
* This can be called by drivers to handle a resume event.
*
- * @ec_dev: Device to resume
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_resume(struct cros_ec_device *ec_dev);
/**
- * cros_ec_prepare_tx - Prepare an outgoing message in the output buffer
+ * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
+ * @ec_dev: Device to register.
+ * @msg: Message to write.
*
* This is intended to be used by all ChromeOS EC drivers, but at present
* only SPI uses it. Once LPC uses the same protocol it can start using it.
* I2C could use it now, with a refactor of the existing code.
*
- * @ec_dev: Device to register
- * @msg: Message to write
+ * Return: 0 on success or negative error code.
*/
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
- * cros_ec_check_result - Check ec_msg->result
+ * cros_ec_check_result() - Check ec_msg->result.
+ * @ec_dev: EC device.
+ * @msg: Message to check.
*
* This is used by ChromeOS EC drivers to check the ec_msg->result for
* errors and to warn about them.
*
- * @ec_dev: EC device
- * @msg: Message to check
+ * Return: 0 on success or negative error code.
*/
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
- * cros_ec_cmd_xfer - Send a command to the ChromeOS EC
+ * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
*
* Call this to send a command to the ChromeOS EC. This should be used
* instead of calling the EC's cmd_xfer() callback directly.
*
- * @ec_dev: EC device
- * @msg: Message to write
+ * Return: 0 on success or negative error code.
*/
int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
- * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
*
* This function is identical to cros_ec_cmd_xfer, except it returns success
* status only if both the command was transmitted successfully and the EC
* replied with success status. It's not necessary to check msg->result when
* using this function.
*
- * @ec_dev: EC device
- * @msg: Message to write
- * @return: Num. of bytes transferred on success, <0 on failure
+ * Return: The number of bytes transferred on success or negative error code.
*/
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
- * cros_ec_remove - Remove a ChromeOS EC
+ * cros_ec_remove() - Remove a ChromeOS EC.
+ * @ec_dev: Device to register.
*
* Call this to deregister a ChromeOS EC, then clean up any private data.
*
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_remove(struct cros_ec_device *ec_dev);
/**
- * cros_ec_register - Register a new ChromeOS EC, using the provided info
+ * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
+ * @ec_dev: Device to register.
*
* Before calling this, allocate a pointer to a new device and then fill
* in all the fields up to the --private-- marker.
*
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_register(struct cros_ec_device *ec_dev);
/**
- * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC
+ * cros_ec_query_all() - Query the protocol version supported by the
+ * ChromeOS EC.
+ * @ec_dev: Device to register.
*
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_query_all(struct cros_ec_device *ec_dev);
/**
- * cros_ec_get_next_event - Fetch next event from the ChromeOS EC
- *
- * @ec_dev: Device to fetch event from
+ * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
* @wake_event: Pointer to a bool set to true upon return if the event might be
* treated as a wake event. Ignored if null.
*
- * Returns: 0 on success, Linux error number on failure
+ * Return: 0 on success or negative error code.
*/
int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
/**
- * cros_ec_get_host_event - Return a mask of event set by the EC.
+ * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
*
- * When MKBP is supported, when the EC raises an interrupt,
- * We collect the events raised and call the functions in the ec notifier.
+ * When MKBP is supported, when the EC raises an interrupt, we collect the
+ * events raised and call the functions in the ec notifier. This function
+ * is a helper to know which events are raised.
*
- * This function is a helper to know which events are raised.
+ * Return: 0 on success or negative error code.
*/
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 5fd0e429f472..9a9631f0559e 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -306,15 +306,18 @@ enum host_event_code {
/* Host event mask */
#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1))
-/* Arguments at EC_LPC_ADDR_HOST_ARGS */
+/**
+ * struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS
+ * @flags: The host argument flags.
+ * @command_version: Command version.
+ * @data_size: The length of data.
+ * @checksum: Checksum; sum of command + flags + command_version + data_size +
+ * all params/response data bytes.
+ */
struct ec_lpc_host_args {
uint8_t flags;
uint8_t command_version;
uint8_t data_size;
- /*
- * Checksum; sum of command + flags + command_version + data_size +
- * all params/response data bytes.
- */
uint8_t checksum;
} __packed;
@@ -468,54 +471,43 @@ struct ec_lpc_host_args {
#define EC_HOST_REQUEST_VERSION 3
-/* Version 3 request from host */
+/**
+ * struct ec_host_request - Version 3 request from host.
+ * @struct_version: Should be 3. The EC will return EC_RES_INVALID_HEADER if it
+ * receives a header with a version it doesn't know how to
+ * parse.
+ * @checksum: Checksum of request and data; sum of all bytes including checksum
+ * should total to 0.
+ * @command: Command to send (EC_CMD_...)
+ * @command_version: Command version.
+ * @reserved: Unused byte in current protocol version; set to 0.
+ * @data_len: Length of data which follows this header.
+ */
struct ec_host_request {
- /* Struct version (=3)
- *
- * EC will return EC_RES_INVALID_HEADER if it receives a header with a
- * version it doesn't know how to parse.
- */
uint8_t struct_version;
-
- /*
- * Checksum of request and data; sum of all bytes including checksum
- * should total to 0.
- */
uint8_t checksum;
-
- /* Command code */
uint16_t command;
-
- /* Command version */
uint8_t command_version;
-
- /* Unused byte in current protocol version; set to 0 */
uint8_t reserved;
-
- /* Length of data which follows this header */
uint16_t data_len;
} __packed;
#define EC_HOST_RESPONSE_VERSION 3
-/* Version 3 response from EC */
+/**
+ * struct ec_host_response - Version 3 response from EC.
+ * @struct_version: Struct version (=3).
+ * @checksum: Checksum of response and data; sum of all bytes including
+ * checksum should total to 0.
+ * @result: EC's response to the command (separate from communication failure)
+ * @data_len: Length of data which follows this header.
+ * @reserved: Unused bytes in current protocol version; set to 0.
+ */
struct ec_host_response {
- /* Struct version (=3) */
uint8_t struct_version;
-
- /*
- * Checksum of response and data; sum of all bytes including checksum
- * should total to 0.
- */
uint8_t checksum;
-
- /* Result code (EC_RES_*) */
uint16_t result;
-
- /* Length of data which follows this header */
uint16_t data_len;
-
- /* Unused bytes in current protocol version; set to 0 */
uint16_t reserved;
} __packed;
@@ -540,6 +532,10 @@ struct ec_host_response {
*/
#define EC_CMD_PROTO_VERSION 0x00
+/**
+ * struct ec_response_proto_version - Response to the proto version command.
+ * @version: The protocol version.
+ */
struct ec_response_proto_version {
uint32_t version;
} __packed;
@@ -550,12 +546,20 @@ struct ec_response_proto_version {
*/
#define EC_CMD_HELLO 0x01
+/**
+ * struct ec_params_hello - Parameters to the hello command.
+ * @in_data: Pass anything here.
+ */
struct ec_params_hello {
- uint32_t in_data; /* Pass anything here */
+ uint32_t in_data;
} __packed;
+/**
+ * struct ec_response_hello - Response to the hello command.
+ * @out_data: Output will be in_data + 0x01020304.
+ */
struct ec_response_hello {
- uint32_t out_data; /* Output will be in_data + 0x01020304 */
+ uint32_t out_data;
} __packed;
/* Get version number */
@@ -567,22 +571,37 @@ enum ec_current_image {
EC_IMAGE_RW
};
+/**
+ * struct ec_response_get_version - Response to the get version command.
+ * @version_string_ro: Null-terminated RO firmware version string.
+ * @version_string_rw: Null-terminated RW firmware version string.
+ * @reserved: Unused bytes; was previously RW-B firmware version string.
+ * @current_image: One of ec_current_image.
+ */
struct ec_response_get_version {
- /* Null-terminated version strings for RO, RW */
char version_string_ro[32];
char version_string_rw[32];
- char reserved[32]; /* Was previously RW-B string */
- uint32_t current_image; /* One of ec_current_image */
+ char reserved[32];
+ uint32_t current_image;
} __packed;
/* Read test */
#define EC_CMD_READ_TEST 0x03
+/**
+ * struct ec_params_read_test - Parameters for the read test command.
+ * @offset: Starting value for read buffer.
+ * @size: Size to read in bytes.
+ */
struct ec_params_read_test {
- uint32_t offset; /* Starting value for read buffer */
- uint32_t size; /* Size to read in bytes */
+ uint32_t offset;
+ uint32_t size;
} __packed;
+/**
+ * struct ec_response_read_test - Response to the read test command.
+ * @data: Data returned by the read test command.
+ */
struct ec_response_read_test {
uint32_t data[32];
} __packed;
@@ -597,18 +616,27 @@ struct ec_response_read_test {
/* Get chip info */
#define EC_CMD_GET_CHIP_INFO 0x05
+/**
+ * struct ec_response_get_chip_info - Response to the get chip info command.
+ * @vendor: Null-terminated string for chip vendor.
+ * @name: Null-terminated string for chip name.
+ * @revision: Null-terminated string for chip mask version.
+ */
struct ec_response_get_chip_info {
- /* Null-terminated strings */
char vendor[32];
char name[32];
- char revision[32]; /* Mask version */
+ char revision[32];
} __packed;
/* Get board HW version */
#define EC_CMD_GET_BOARD_VERSION 0x06
+/**
+ * struct ec_response_board_version - Response to the board version command.
+ * @board_version: A monotonously incrementing number.
+ */
struct ec_response_board_version {
- uint16_t board_version; /* A monotonously incrementing number. */
+ uint16_t board_version;
} __packed;
/*
@@ -621,27 +649,42 @@ struct ec_response_board_version {
*/
#define EC_CMD_READ_MEMMAP 0x07
+/**
+ * struct ec_params_read_memmap - Parameters for the read memory map command.
+ * @offset: Offset in memmap (EC_MEMMAP_*).
+ * @size: Size to read in bytes.
+ */
struct ec_params_read_memmap {
- uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */
- uint8_t size; /* Size to read in bytes */
+ uint8_t offset;
+ uint8_t size;
} __packed;
/* Read versions supported for a command */
#define EC_CMD_GET_CMD_VERSIONS 0x08
+/**
+ * struct ec_params_get_cmd_versions - Parameters for the get command versions.
+ * @cmd: Command to check.
+ */
struct ec_params_get_cmd_versions {
- uint8_t cmd; /* Command to check */
+ uint8_t cmd;
} __packed;
+/**
+ * struct ec_params_get_cmd_versions_v1 - Parameters for the get command
+ * versions (v1)
+ * @cmd: Command to check.
+ */
struct ec_params_get_cmd_versions_v1 {
- uint16_t cmd; /* Command to check */
+ uint16_t cmd;
} __packed;
+/**
+ * struct ec_response_get_cmd_version - Response to the get command versions.
+ * @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with
+ * a desired version.
+ */
struct ec_response_get_cmd_versions {
- /*
- * Mask of supported versions; use EC_VER_MASK() to compare with a
- * desired version.
- */
uint32_t version_mask;
} __packed;
@@ -659,6 +702,11 @@ enum ec_comms_status {
EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */
};
+/**
+ * struct ec_response_get_comms_status - Response to the get comms status
+ * command.
+ * @flags: Mask of enum ec_comms_status.
+ */
struct ec_response_get_comms_status {
uint32_t flags; /* Mask of enum ec_comms_status */
} __packed;
@@ -685,19 +733,19 @@ struct ec_response_test_protocol {
/* EC_RES_IN_PROGRESS may be returned if a command is slow */
#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0)
+/**
+ * struct ec_response_get_protocol_info - Response to the get protocol info.
+ * @protocol_versions: Bitmask of protocol versions supported (1 << n means
+ * version n).
+ * @max_request_packet_size: Maximum request packet size in bytes.
+ * @max_response_packet_size: Maximum response packet size in bytes.
+ * @flags: see EC_PROTOCOL_INFO_*
+ */
struct ec_response_get_protocol_info {
/* Fields which exist if at least protocol version 3 supported */
-
- /* Bitmask of protocol versions supported (1 << n means version n)*/
uint32_t protocol_versions;
-
- /* Maximum request packet size, in bytes */
uint16_t max_request_packet_size;
-
- /* Maximum response packet size, in bytes */
uint16_t max_response_packet_size;
-
- /* Flags; see EC_PROTOCOL_INFO_* */
uint32_t flags;
} __packed;
@@ -708,8 +756,10 @@ struct ec_response_get_protocol_info {
/* The upper byte of .flags tells what to do (nothing means "get") */
#define EC_GSV_SET 0x80000000
-/* The lower three bytes of .flags identifies the parameter, if that has
- meaning for an individual command. */
+/*
+ * The lower three bytes of .flags identifies the parameter, if that has
+ * meaning for an individual command.
+ */
#define EC_GSV_PARAM_MASK 0x00ffffff
struct ec_params_get_set_value {
@@ -810,6 +860,7 @@ enum ec_feature_code {
#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
+
struct ec_response_get_features {
uint32_t flags[2];
} __packed;
@@ -820,24 +871,22 @@ struct ec_response_get_features {
/* Get flash info */
#define EC_CMD_FLASH_INFO 0x10
-/* Version 0 returns these fields */
+/**
+ * struct ec_response_flash_info - Response to the flash info command.
+ * @flash_size: Usable flash size in bytes.
+ * @write_block_size: Write block size. Write offset and size must be a
+ * multiple of this.
+ * @erase_block_size: Erase block size. Erase offset and size must be a
+ * multiple of this.
+ * @protect_block_size: Protection block size. Protection offset and size
+ * must be a multiple of this.
+ *
+ * Version 0 returns these fields.
+ */
struct ec_response_flash_info {
- /* Usable flash size, in bytes */
uint32_t flash_size;
- /*
- * Write block size. Write offset and size must be a multiple
- * of this.
- */
uint32_t write_block_size;
- /*
- * Erase block size. Erase offset and size must be a multiple
- * of this.
- */
uint32_t erase_block_size;
- /*
- * Protection block size. Protection offset and size must be a
- * multiple of this.
- */
uint32_t protect_block_size;
} __packed;
@@ -845,7 +894,22 @@ struct ec_response_flash_info {
/* EC flash erases bits to 0 instead of 1 */
#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0)
-/*
+/**
+ * struct ec_response_flash_info_1 - Response to the flash info v1 command.
+ * @flash_size: Usable flash size in bytes.
+ * @write_block_size: Write block size. Write offset and size must be a
+ * multiple of this.
+ * @erase_block_size: Erase block size. Erase offset and size must be a
+ * multiple of this.
+ * @protect_block_size: Protection block size. Protection offset and size
+ * must be a multiple of this.
+ * @write_ideal_size: Ideal write size in bytes. Writes will be fastest if
+ * size is exactly this and offset is a multiple of this.
+ * For example, an EC may have a write buffer which can do
+ * half-page operations if data is aligned, and a slower
+ * word-at-a-time write mode.
+ * @flags: Flags; see EC_FLASH_INFO_*
+ *
* Version 1 returns the same initial fields as version 0, with additional
* fields following.
*
@@ -860,15 +924,7 @@ struct ec_response_flash_info_1 {
uint32_t protect_block_size;
/* Version 1 adds these fields: */
- /*
- * Ideal write size in bytes. Writes will be fastest if size is
- * exactly this and offset is a multiple of this. For example, an EC
- * may have a write buffer which can do half-page operations if data is
- * aligned, and a slower word-at-a-time write mode.
- */
uint32_t write_ideal_size;
-
- /* Flags; see EC_FLASH_INFO_* */
uint32_t flags;
} __packed;
@@ -879,9 +935,14 @@ struct ec_response_flash_info_1 {
*/
#define EC_CMD_FLASH_READ 0x11
+/**
+ * struct ec_params_flash_read - Parameters for the flash read command.
+ * @offset: Byte offset to read.
+ * @size: Size to read in bytes.
+ */
struct ec_params_flash_read {
- uint32_t offset; /* Byte offset to read */
- uint32_t size; /* Size to read in bytes */
+ uint32_t offset;
+ uint32_t size;
} __packed;
/* Write flash */
@@ -891,18 +952,28 @@ struct ec_params_flash_read {
/* Version 0 of the flash command supported only 64 bytes of data */
#define EC_FLASH_WRITE_VER0_SIZE 64
+/**
+ * struct ec_params_flash_write - Parameters for the flash write command.
+ * @offset: Byte offset to write.
+ * @size: Size to write in bytes.
+ */
struct ec_params_flash_write {
- uint32_t offset; /* Byte offset to write */
- uint32_t size; /* Size to write in bytes */
+ uint32_t offset;
+ uint32_t size;
/* Followed by data to write */
} __packed;
/* Erase flash */
#define EC_CMD_FLASH_ERASE 0x13
+/**
+ * struct ec_params_flash_erase - Parameters for the flash erase command.
+ * @offset: Byte offset to erase.
+ * @size: Size to erase in bytes.
+ */
struct ec_params_flash_erase {
- uint32_t offset; /* Byte offset to erase */
- uint32_t size; /* Size to erase in bytes */
+ uint32_t offset;
+ uint32_t size;
} __packed;
/*
@@ -941,21 +1012,28 @@ struct ec_params_flash_erase {
/* Entile flash code protected when the EC boots */
#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6)
+/**
+ * struct ec_params_flash_protect - Parameters for the flash protect command.
+ * @mask: Bits in flags to apply.
+ * @flags: New flags to apply.
+ */
struct ec_params_flash_protect {
- uint32_t mask; /* Bits in flags to apply */
- uint32_t flags; /* New flags to apply */
+ uint32_t mask;
+ uint32_t flags;
} __packed;
+/**
+ * struct ec_response_flash_protect - Response to the flash protect command.
+ * @flags: Current value of flash protect flags.
+ * @valid_flags: Flags which are valid on this platform. This allows the
+ * caller to distinguish between flags which aren't set vs. flags
+ * which can't be set on this platform.
+ * @writable_flags: Flags which can be changed given the current protection
+ * state.
+ */
struct ec_response_flash_protect {
- /* Current value of flash protect flags */
uint32_t flags;
- /*
- * Flags which are valid on this platform. This allows the caller
- * to distinguish between flags which aren't set vs. flags which can't
- * be set on this platform.
- */
uint32_t valid_flags;
- /* Flags which can be changed given the current protection state */
uint32_t writable_flags;
} __packed;
@@ -982,8 +1060,13 @@ enum ec_flash_region {
EC_FLASH_REGION_COUNT,
};
+/**
+ * struct ec_params_flash_region_info - Parameters for the flash region info
+ * command.
+ * @region: Flash region; see EC_FLASH_REGION_*
+ */
struct ec_params_flash_region_info {
- uint32_t region; /* enum ec_flash_region */
+ uint32_t region;
} __packed;
struct ec_response_flash_region_info {
@@ -1094,7 +1177,9 @@ struct rgb_s {
};
#define LB_BATTERY_LEVELS 4
-/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
+
+/*
+ * List of tweakable parameters. NOTE: It's __packed so it can be sent in a
* host command, but the alignment is the same regardless. Keep it that way.
*/
struct lightbar_params_v0 {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1e52b8fd1685..fcf9cc9d535f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2163,7 +2163,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif
-#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
+#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
void zero_resv_unavail(void);
#else
static inline void zero_resv_unavail(void) {}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f0caccd5833..847705a6d0ec 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -633,9 +633,6 @@ typedef struct pglist_data {
struct page_ext *node_page_ext;
#endif
#endif
-#ifndef CONFIG_NO_BOOTMEM
- struct bootmem_data *bdata;
-#endif
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* Must be held any time you expect node_start_pfn, node_present_pages
@@ -869,7 +866,7 @@ static inline int is_highmem_idx(enum zone_type idx)
}
/**
- * is_highmem - helper function to quickly check if a struct zone is a
+ * is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
* @zone - pointer to struct zone variable
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index f35c7bf76143..0096a05395e3 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -122,8 +122,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#ifdef CONFIG_TREE_SRCU
#define _SRCU_NOTIFIER_HEAD(name, mod) \
- static DEFINE_PER_CPU(struct srcu_data, \
- name##_head_srcu_data); \
+ static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \
mod struct srcu_notifier_head name = \
SRCU_NOTIFIER_INIT(name, name##_head_srcu_data)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 2d2096ba1cfe..1ce8e264a269 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -91,8 +91,7 @@
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
extern __PCPU_ATTRS(sec) __typeof__(type) name; \
- __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
- __typeof__(type) name
+ __PCPU_ATTRS(sec) __weak __typeof__(type) name
#else
/*
* Normal declaration and definition macros.
@@ -101,8 +100,7 @@
extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
- __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
- __typeof__(type) name
+ __PCPU_ATTRS(sec) __typeof__(type) name
#endif
/*
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
new file mode 100644
index 000000000000..53dfc2541960
--- /dev/null
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_X86_ASUS_WMI_H
+#define __PLATFORM_DATA_X86_ASUS_WMI_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+/* WMI Methods */
+#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
+#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */
+#define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */
+#define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */
+#define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */
+#define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */
+#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* FaN? */
+#define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */
+#define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */
+#define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */
+#define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */
+#define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */
+#define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */
+#define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/
+#define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */
+#define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */
+#define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */
+#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */
+#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */
+#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */
+
+#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE
+
+/* Wireless */
+#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
+#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
+#define ASUS_WMI_DEVID_CWAP 0x00010003
+#define ASUS_WMI_DEVID_WLAN 0x00010011
+#define ASUS_WMI_DEVID_WLAN_LED 0x00010012
+#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
+#define ASUS_WMI_DEVID_GPS 0x00010015
+#define ASUS_WMI_DEVID_WIMAX 0x00010017
+#define ASUS_WMI_DEVID_WWAN3G 0x00010019
+#define ASUS_WMI_DEVID_UWB 0x00010021
+
+/* Leds */
+/* 0x000200XX and 0x000400XX */
+#define ASUS_WMI_DEVID_LED1 0x00020011
+#define ASUS_WMI_DEVID_LED2 0x00020012
+#define ASUS_WMI_DEVID_LED3 0x00020013
+#define ASUS_WMI_DEVID_LED4 0x00020014
+#define ASUS_WMI_DEVID_LED5 0x00020015
+#define ASUS_WMI_DEVID_LED6 0x00020016
+
+/* Backlight and Brightness */
+#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
+#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
+#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
+#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
+#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
+#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+
+/* Misc */
+#define ASUS_WMI_DEVID_CAMERA 0x00060013
+
+/* Storage */
+#define ASUS_WMI_DEVID_CARDREADER 0x00080013
+
+/* Input */
+#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011
+#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012
+
+/* Fan, Thermal */
+#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
+#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012
+
+/* Power */
+#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
+
+/* Deep S3 / Resume on LID open */
+#define ASUS_WMI_DEVID_LID_RESUME 0x00120031
+
+/* DSTS masks */
+#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
+#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
+#define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000
+#define ASUS_WMI_DSTS_USER_BIT 0x00020000
+#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000
+#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF
+#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
+#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F
+
+#if IS_REACHABLE(CONFIG_ASUS_WMI)
+int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval);
+#else
+static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
+ u32 *retval)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index af8a61be2d8d..9510c677ac70 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -51,8 +51,8 @@ extern void __rb_insert_augmented(struct rb_node *node,
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
- * rb_augment_inserted() instead of the usual rb_insert_color() call.
- * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * rb_insert_augmented() instead of the usual rb_insert_color() call.
+ * If rb_insert_augmented() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index e3c5d856b6da..507a2b524208 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -305,14 +305,22 @@ struct fw_rsc_vdev {
struct fw_rsc_vdev_vring vring[0];
} __packed;
+struct rproc;
+
/**
* struct rproc_mem_entry - memory entry descriptor
* @va: virtual address
* @dma: dma address
* @len: length, in bytes
* @da: device address
+ * @release: release associated memory
* @priv: associated data
+ * @name: associated memory region name (optional)
* @node: list node
+ * @rsc_offset: offset in resource table
+ * @flags: iommu protection flags
+ * @of_resm_idx: reserved memory phandle index
+ * @alloc: specific memory allocator function
*/
struct rproc_mem_entry {
void *va;
@@ -320,10 +328,15 @@ struct rproc_mem_entry {
int len;
u32 da;
void *priv;
+ char name[32];
struct list_head node;
+ u32 rsc_offset;
+ u32 flags;
+ u32 of_resm_idx;
+ int (*alloc)(struct rproc *rproc, struct rproc_mem_entry *mem);
+ int (*release)(struct rproc *rproc, struct rproc_mem_entry *mem);
};
-struct rproc;
struct firmware;
/**
@@ -399,6 +412,9 @@ enum rproc_crash_type {
* @node: list node related to the rproc segment list
* @da: device address of the segment
* @size: size of the segment
+ * @priv: private data associated with the dump_segment
+ * @dump: custom dump function to fill device memory segment associated
+ * with coredump
*/
struct rproc_dump_segment {
struct list_head node;
@@ -406,6 +422,9 @@ struct rproc_dump_segment {
dma_addr_t da;
size_t size;
+ void *priv;
+ void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment,
+ void *dest);
loff_t offset;
};
@@ -439,7 +458,9 @@ struct rproc_dump_segment {
* @cached_table: copy of the resource table
* @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
+ * @auto_boot: flag to indicate if remote processor should be auto-started
* @dump_segments: list of segments in the firmware
+ * @nb_vdev: number of vdev currently handled by rproc
*/
struct rproc {
struct list_head node;
@@ -472,6 +493,7 @@ struct rproc {
bool has_iommu;
bool auto_boot;
struct list_head dump_segments;
+ int nb_vdev;
};
/**
@@ -499,7 +521,6 @@ struct rproc_subdev {
/**
* struct rproc_vring - remoteproc vring state
* @va: virtual address
- * @dma: dma address
* @len: length, in bytes
* @da: device address
* @align: vring alignment
@@ -509,7 +530,6 @@ struct rproc_subdev {
*/
struct rproc_vring {
void *va;
- dma_addr_t dma;
int len;
u32 da;
u32 align;
@@ -528,6 +548,7 @@ struct rproc_vring {
* @vdev: the virio device
* @vring: the vrings for this vdev
* @rsc_offset: offset of the vdev's resource entry
+ * @index: vdev position versus other vdev declared in resource table
*/
struct rproc_vdev {
struct kref refcount;
@@ -540,6 +561,7 @@ struct rproc_vdev {
struct virtio_device vdev;
struct rproc_vring vring[RVDEV_NUM_VRINGS];
u32 rsc_offset;
+ u32 index;
};
struct rproc *rproc_get_by_phandle(phandle phandle);
@@ -553,10 +575,29 @@ int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
void rproc_free(struct rproc *rproc);
+void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem);
+
+struct rproc_mem_entry *
+rproc_mem_entry_init(struct device *dev,
+ void *va, dma_addr_t dma, int len, u32 da,
+ int (*alloc)(struct rproc *, struct rproc_mem_entry *),
+ int (*release)(struct rproc *, struct rproc_mem_entry *),
+ const char *name, ...);
+
+struct rproc_mem_entry *
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
+ u32 da, const char *name, ...);
+
int rproc_boot(struct rproc *rproc);
void rproc_shutdown(struct rproc *rproc);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
+int rproc_coredump_add_custom_segment(struct rproc *rproc,
+ dma_addr_t da, size_t size,
+ void (*dumpfn)(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest),
+ void *priv);
static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8f8a5418b627..a51c13c2b1a0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1200,6 +1200,11 @@ struct task_struct {
void *security;
#endif
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ unsigned long lowest_stack;
+ unsigned long prev_lowest_stack;
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index 04f1321d14c4..f30954cc059d 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -20,7 +20,6 @@ extern unsigned long nr_running(void);
extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
-extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
static inline int sched_info_on(void)
{
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 200ed96a05af..f428e86f4800 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -129,9 +129,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
b3 = b->sig[3]; b2 = b->sig[2]; \
r->sig[3] = op(a3, b3); \
r->sig[2] = op(a2, b2); \
+ /* fall through */ \
case 2: \
a1 = a->sig[1]; b1 = b->sig[1]; \
r->sig[1] = op(a1, b1); \
+ /* fall through */ \
case 1: \
a0 = a->sig[0]; b0 = b->sig[0]; \
r->sig[0] = op(a0, b0); \
@@ -161,7 +163,9 @@ static inline void name(sigset_t *set) \
switch (_NSIG_WORDS) { \
case 4: set->sig[3] = op(set->sig[3]); \
set->sig[2] = op(set->sig[2]); \
+ /* fall through */ \
case 2: set->sig[1] = op(set->sig[1]); \
+ /* fall through */ \
case 1: set->sig[0] = op(set->sig[0]); \
break; \
default: \
@@ -182,6 +186,7 @@ static inline void sigemptyset(sigset_t *set)
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
+ /* fall through */
case 1: set->sig[0] = 0;
break;
}
@@ -194,6 +199,7 @@ static inline void sigfillset(sigset_t *set)
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
+ /* fall through */
case 1: set->sig[0] = -1;
break;
}
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
new file mode 100644
index 000000000000..3d5c3271a9a8
--- /dev/null
+++ b/include/linux/stackleak.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STACKLEAK_H
+#define _LINUX_STACKLEAK_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+/*
+ * Check that the poison value points to the unused hole in the
+ * virtual memory map for your platform.
+ */
+#define STACKLEAK_POISON -0xBEEF
+#define STACKLEAK_SEARCH_DEPTH 128
+
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#include <asm/stacktrace.h>
+
+static inline void stackleak_task_init(struct task_struct *t)
+{
+ t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
+# ifdef CONFIG_STACKLEAK_METRICS
+ t->prev_lowest_stack = t->lowest_stack;
+# endif
+}
+
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+int stack_erasing_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+#endif
+
+#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
+static inline void stackleak_task_init(struct task_struct *t) { }
+#endif
+
+#endif
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 40d2822f0e2f..5a3e95017fc6 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -67,7 +67,7 @@ struct cache_detail {
struct module * owner;
int hash_size;
struct hlist_head * hash_table;
- rwlock_t hash_lock;
+ spinlock_t hash_lock;
char *name;
void (*cache_put)(struct kref *);
@@ -168,8 +168,8 @@ extern const struct file_operations content_file_operations_pipefs;
extern const struct file_operations cache_flush_operations_pipefs;
extern struct cache_head *
-sunrpc_cache_lookup(struct cache_detail *detail,
- struct cache_head *key, int hash);
+sunrpc_cache_lookup_rcu(struct cache_detail *detail,
+ struct cache_head *key, int hash);
extern struct cache_head *
sunrpc_cache_update(struct cache_detail *detail,
struct cache_head *new, struct cache_head *old, int hash);
@@ -186,6 +186,12 @@ static inline struct cache_head *cache_get(struct cache_head *h)
return h;
}
+static inline struct cache_head *cache_get_rcu(struct cache_head *h)
+{
+ if (kref_get_unless_zero(&h->ref))
+ return h;
+ return NULL;
+}
static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
{
@@ -224,9 +230,9 @@ extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *);
/* Must store cache_detail in seq_file->private if using next three functions */
-extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
-extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
-extern void cache_seq_stop(struct seq_file *file, void *p);
+extern void *cache_seq_start_rcu(struct seq_file *file, loff_t *pos);
+extern void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos);
+extern void cache_seq_stop_rcu(struct seq_file *file, void *p);
extern void qword_add(char **bpp, int *lp, char *str);
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index fd78f78df5c6..e6e26918504c 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -113,13 +113,14 @@ struct svcxprt_rdma {
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
-#define RPCRDMA_LISTEN_BACKLOG 10
-#define RPCRDMA_MAX_REQUESTS 32
-
-/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
- * current NFSv4.1 implementation supports one backchannel slot.
+/*
+ * Default connection parameters
*/
-#define RPCRDMA_MAX_BC_REQUESTS 2
+enum {
+ RPCRDMA_LISTEN_BACKLOG = 10,
+ RPCRDMA_MAX_REQUESTS = 64,
+ RPCRDMA_MAX_BC_REQUESTS = 2,
+};
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 04e404a07882..3e53a6e2ada7 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -82,6 +82,7 @@ struct auth_domain {
struct hlist_node hash;
char *name;
struct auth_ops *flavour;
+ struct rcu_head rcu_head;
};
/*
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 78a010e19ed4..4130a5497d40 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -575,7 +575,8 @@ extern int bpf_get_kprobe_info(const struct perf_event *event,
bool perf_type_tracepoint);
#endif
#ifdef CONFIG_UPROBE_EVENTS
-extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe);
+extern int perf_uprobe_init(struct perf_event *event,
+ unsigned long ref_ctr_offset, bool is_retprobe);
extern void perf_uprobe_destroy(struct perf_event *event);
extern int bpf_get_uprobe_info(const struct perf_event *event,
u32 *fd_type, const char **filename,
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 422b1c01ee0d..55ce99ddb912 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -21,15 +21,16 @@ struct kvec {
size_t iov_len;
};
-enum {
+enum iter_type {
ITER_IOVEC = 0,
ITER_KVEC = 2,
ITER_BVEC = 4,
ITER_PIPE = 8,
+ ITER_DISCARD = 16,
};
struct iov_iter {
- int type;
+ unsigned int type;
size_t iov_offset;
size_t count;
union {
@@ -47,6 +48,41 @@ struct iov_iter {
};
};
+static inline enum iter_type iov_iter_type(const struct iov_iter *i)
+{
+ return i->type & ~(READ | WRITE);
+}
+
+static inline bool iter_is_iovec(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_IOVEC;
+}
+
+static inline bool iov_iter_is_kvec(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_KVEC;
+}
+
+static inline bool iov_iter_is_bvec(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_BVEC;
+}
+
+static inline bool iov_iter_is_pipe(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_PIPE;
+}
+
+static inline bool iov_iter_is_discard(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_DISCARD;
+}
+
+static inline unsigned char iov_iter_rw(const struct iov_iter *i)
+{
+ return i->type & (READ | WRITE);
+}
+
/*
* Total number of bytes covered by an iovec.
*
@@ -74,7 +110,8 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
}
#define iov_for_each(iov, iter, start) \
- if (!((start).type & (ITER_BVEC | ITER_PIPE))) \
+ if (iov_iter_type(start) == ITER_IOVEC || \
+ iov_iter_type(start) == ITER_KVEC) \
for (iter = (start); \
(iter).count && \
((iov = iov_iter_iovec(&(iter))), 1); \
@@ -181,14 +218,15 @@ size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
-void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
+void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
-void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
+void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
unsigned long nr_segs, size_t count);
-void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
+void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
-void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
+void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
size_t count);
+void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
@@ -202,19 +240,6 @@ static inline size_t iov_iter_count(const struct iov_iter *i)
return i->count;
}
-static inline bool iter_is_iovec(const struct iov_iter *i)
-{
- return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
-}
-
-/*
- * Get one of READ or WRITE out of iter->type without any other flags OR'd in
- * with it.
- *
- * The ?: is just for type safety.
- */
-#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
-
/*
* Cap the iov_iter by given limit; note that the second argument is
* *not* the new size - it's upper limit for such. Passing it a value
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index bb9d2084af03..103a48a48872 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -123,6 +123,7 @@ extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_mmap(struct vm_area_struct *vma);
@@ -160,6 +161,10 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
{
return -ENOSYS;
}
+static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
+{
+ return -ENOSYS;
+}
static inline int
uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
{
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 738a0c24874f..fdfd04e348f6 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -246,8 +246,7 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
*
* @bio is a part of the writeback in progress controlled by @wbc. Perform
* writeback specific initialization. This is used to apply the cgroup
- * writeback context. Must be called after the bio has been associated with
- * a device.
+ * writeback context.
*/
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
@@ -258,7 +257,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
* regular writeback instead of writing things out itself.
*/
if (wbc->wb)
- bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
+ bio_associate_blkcg(bio, wbc->wb->blkcg_css);
}
#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/media/media-device.h b/include/media/media-device.h
index bcc6ec434f1f..c8ddbfe8b74c 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -27,6 +27,7 @@
struct ida;
struct device;
+struct media_device;
/**
* struct media_entity_notify - Media Entity Notify
@@ -50,10 +51,32 @@ struct media_entity_notify {
* struct media_device_ops - Media device operations
* @link_notify: Link state change notification callback. This callback is
* called with the graph_mutex held.
+ * @req_alloc: Allocate a request. Set this if you need to allocate a struct
+ * larger then struct media_request. @req_alloc and @req_free must
+ * either both be set or both be NULL.
+ * @req_free: Free a request. Set this if @req_alloc was set as well, leave
+ * to NULL otherwise.
+ * @req_validate: Validate a request, but do not queue yet. The req_queue_mutex
+ * lock is held when this op is called.
+ * @req_queue: Queue a validated request, cannot fail. If something goes
+ * wrong when queueing this request then it should be marked
+ * as such internally in the driver and any related buffers
+ * must eventually return to vb2 with state VB2_BUF_STATE_ERROR.
+ * The req_queue_mutex lock is held when this op is called.
+ * It is important that vb2 buffer objects are queued last after
+ * all other object types are queued: queueing a buffer kickstarts
+ * the request processing, so all other objects related to the
+ * request (and thus the buffer) must be available to the driver.
+ * And once a buffer is queued, then the driver can complete
+ * or delete objects from the request before req_queue exits.
*/
struct media_device_ops {
int (*link_notify)(struct media_link *link, u32 flags,
unsigned int notification);
+ struct media_request *(*req_alloc)(struct media_device *mdev);
+ void (*req_free)(struct media_request *req);
+ int (*req_validate)(struct media_request *req);
+ void (*req_queue)(struct media_request *req);
};
/**
@@ -88,6 +111,9 @@ struct media_device_ops {
* @disable_source: Disable Source Handler function pointer
*
* @ops: Operation handler callbacks
+ * @req_queue_mutex: Serialise the MEDIA_REQUEST_IOC_QUEUE ioctl w.r.t.
+ * other operations that stop or start streaming.
+ * @request_id: Used to generate unique request IDs
*
* This structure represents an abstract high-level media device. It allows easy
* access to entities and provides basic media device-level support. The
@@ -158,6 +184,9 @@ struct media_device {
void (*disable_source)(struct media_entity *entity);
const struct media_device_ops *ops;
+
+ struct mutex req_queue_mutex;
+ atomic_t request_id;
};
/* We don't need to include pci.h or usb.h here */
diff --git a/include/media/media-request.h b/include/media/media-request.h
new file mode 100644
index 000000000000..0ce75c35131f
--- /dev/null
+++ b/include/media/media-request.h
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Media device request objects
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Author: Hans Verkuil <hans.verkuil@cisco.com>
+ * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+ */
+
+#ifndef MEDIA_REQUEST_H
+#define MEDIA_REQUEST_H
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/refcount.h>
+
+#include <media/media-device.h>
+
+/**
+ * enum media_request_state - media request state
+ *
+ * @MEDIA_REQUEST_STATE_IDLE: Idle
+ * @MEDIA_REQUEST_STATE_VALIDATING: Validating the request, no state changes
+ * allowed
+ * @MEDIA_REQUEST_STATE_QUEUED: Queued
+ * @MEDIA_REQUEST_STATE_COMPLETE: Completed, the request is done
+ * @MEDIA_REQUEST_STATE_CLEANING: Cleaning, the request is being re-inited
+ * @MEDIA_REQUEST_STATE_UPDATING: The request is being updated, i.e.
+ * request objects are being added,
+ * modified or removed
+ * @NR_OF_MEDIA_REQUEST_STATE: The number of media request states, used
+ * internally for sanity check purposes
+ */
+enum media_request_state {
+ MEDIA_REQUEST_STATE_IDLE,
+ MEDIA_REQUEST_STATE_VALIDATING,
+ MEDIA_REQUEST_STATE_QUEUED,
+ MEDIA_REQUEST_STATE_COMPLETE,
+ MEDIA_REQUEST_STATE_CLEANING,
+ MEDIA_REQUEST_STATE_UPDATING,
+ NR_OF_MEDIA_REQUEST_STATE,
+};
+
+struct media_request_object;
+
+/**
+ * struct media_request - Media device request
+ * @mdev: Media device this request belongs to
+ * @kref: Reference count
+ * @debug_str: Prefix for debug messages (process name:fd)
+ * @state: The state of the request
+ * @updating_count: count the number of request updates that are in progress
+ * @access_count: count the number of request accesses that are in progress
+ * @objects: List of @struct media_request_object request objects
+ * @num_incomplete_objects: The number of incomplete objects in the request
+ * @poll_wait: Wait queue for poll
+ * @lock: Serializes access to this struct
+ */
+struct media_request {
+ struct media_device *mdev;
+ struct kref kref;
+ char debug_str[TASK_COMM_LEN + 11];
+ enum media_request_state state;
+ unsigned int updating_count;
+ unsigned int access_count;
+ struct list_head objects;
+ unsigned int num_incomplete_objects;
+ struct wait_queue_head poll_wait;
+ spinlock_t lock;
+};
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+
+/**
+ * media_request_lock_for_access - Lock the request to access its objects
+ *
+ * @req: The media request
+ *
+ * Use before accessing a completed request. A reference to the request must
+ * be held during the access. This usually takes place automatically through
+ * a file handle. Use @media_request_unlock_for_access when done.
+ */
+static inline int __must_check
+media_request_lock_for_access(struct media_request *req)
+{
+ unsigned long flags;
+ int ret = -EBUSY;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
+ req->access_count++;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return ret;
+}
+
+/**
+ * media_request_unlock_for_access - Unlock a request previously locked for
+ * access
+ *
+ * @req: The media request
+ *
+ * Unlock a request that has previously been locked using
+ * @media_request_lock_for_access.
+ */
+static inline void media_request_unlock_for_access(struct media_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (!WARN_ON(!req->access_count))
+ req->access_count--;
+ spin_unlock_irqrestore(&req->lock, flags);
+}
+
+/**
+ * media_request_lock_for_update - Lock the request for updating its objects
+ *
+ * @req: The media request
+ *
+ * Use before updating a request, i.e. adding, modifying or removing a request
+ * object in it. A reference to the request must be held during the update. This
+ * usually takes place automatically through a file handle. Use
+ * @media_request_unlock_for_update when done.
+ */
+static inline int __must_check
+media_request_lock_for_update(struct media_request *req)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_IDLE ||
+ req->state == MEDIA_REQUEST_STATE_UPDATING) {
+ req->state = MEDIA_REQUEST_STATE_UPDATING;
+ req->updating_count++;
+ } else {
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return ret;
+}
+
+/**
+ * media_request_unlock_for_update - Unlock a request previously locked for
+ * update
+ *
+ * @req: The media request
+ *
+ * Unlock a request that has previously been locked using
+ * @media_request_lock_for_update.
+ */
+static inline void media_request_unlock_for_update(struct media_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ WARN_ON(req->updating_count <= 0);
+ if (!--req->updating_count)
+ req->state = MEDIA_REQUEST_STATE_IDLE;
+ spin_unlock_irqrestore(&req->lock, flags);
+}
+
+/**
+ * media_request_get - Get the media request
+ *
+ * @req: The media request
+ *
+ * Get the media request.
+ */
+static inline void media_request_get(struct media_request *req)
+{
+ kref_get(&req->kref);
+}
+
+/**
+ * media_request_put - Put the media request
+ *
+ * @req: The media request
+ *
+ * Put the media request. The media request will be released
+ * when the refcount reaches 0.
+ */
+void media_request_put(struct media_request *req);
+
+/**
+ * media_request_get_by_fd - Get a media request by fd
+ *
+ * @mdev: Media device this request belongs to
+ * @request_fd: The file descriptor of the request
+ *
+ * Get the request represented by @request_fd that is owned
+ * by the media device.
+ *
+ * Return a -EACCES error pointer if requests are not supported
+ * by this driver. Return -EINVAL if the request was not found.
+ * Return the pointer to the request if found: the caller will
+ * have to call @media_request_put when it finished using the
+ * request.
+ */
+struct media_request *
+media_request_get_by_fd(struct media_device *mdev, int request_fd);
+
+/**
+ * media_request_alloc - Allocate the media request
+ *
+ * @mdev: Media device this request belongs to
+ * @alloc_fd: Store the request's file descriptor in this int
+ *
+ * Allocated the media request and put the fd in @alloc_fd.
+ */
+int media_request_alloc(struct media_device *mdev,
+ int *alloc_fd);
+
+#else
+
+static inline void media_request_get(struct media_request *req)
+{
+}
+
+static inline void media_request_put(struct media_request *req)
+{
+}
+
+static inline struct media_request *
+media_request_get_by_fd(struct media_device *mdev, int request_fd)
+{
+ return ERR_PTR(-EACCES);
+}
+
+#endif
+
+/**
+ * struct media_request_object_ops - Media request object operations
+ * @prepare: Validate and prepare the request object, optional.
+ * @unprepare: Unprepare the request object, optional.
+ * @queue: Queue the request object, optional.
+ * @unbind: Unbind the request object, optional.
+ * @release: Release the request object, required.
+ */
+struct media_request_object_ops {
+ int (*prepare)(struct media_request_object *object);
+ void (*unprepare)(struct media_request_object *object);
+ void (*queue)(struct media_request_object *object);
+ void (*unbind)(struct media_request_object *object);
+ void (*release)(struct media_request_object *object);
+};
+
+/**
+ * struct media_request_object - An opaque object that belongs to a media
+ * request
+ *
+ * @ops: object's operations
+ * @priv: object's priv pointer
+ * @req: the request this object belongs to (can be NULL)
+ * @list: List entry of the object for @struct media_request
+ * @kref: Reference count of the object, acquire before releasing req->lock
+ * @completed: If true, then this object was completed.
+ *
+ * An object related to the request. This struct is always embedded in
+ * another struct that contains the actual data for this request object.
+ */
+struct media_request_object {
+ const struct media_request_object_ops *ops;
+ void *priv;
+ struct media_request *req;
+ struct list_head list;
+ struct kref kref;
+ bool completed;
+};
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+
+/**
+ * media_request_object_get - Get a media request object
+ *
+ * @obj: The object
+ *
+ * Get a media request object.
+ */
+static inline void media_request_object_get(struct media_request_object *obj)
+{
+ kref_get(&obj->kref);
+}
+
+/**
+ * media_request_object_put - Put a media request object
+ *
+ * @obj: The object
+ *
+ * Put a media request object. Once all references are gone, the
+ * object's memory is released.
+ */
+void media_request_object_put(struct media_request_object *obj);
+
+/**
+ * media_request_object_find - Find an object in a request
+ *
+ * @req: The media request
+ * @ops: Find an object with this ops value
+ * @priv: Find an object with this priv value
+ *
+ * Both @ops and @priv must be non-NULL.
+ *
+ * Returns the object pointer or NULL if not found. The caller must
+ * call media_request_object_put() once it finished using the object.
+ *
+ * Since this function needs to walk the list of objects it takes
+ * the @req->lock spin lock to make this safe.
+ */
+struct media_request_object *
+media_request_object_find(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv);
+
+/**
+ * media_request_object_init - Initialise a media request object
+ *
+ * @obj: The object
+ *
+ * Initialise a media request object. The object will be released using the
+ * release callback of the ops once it has no references (this function
+ * initialises references to one).
+ */
+void media_request_object_init(struct media_request_object *obj);
+
+/**
+ * media_request_object_bind - Bind a media request object to a request
+ *
+ * @req: The media request
+ * @ops: The object ops for this object
+ * @priv: A driver-specific priv pointer associated with this object
+ * @is_buffer: Set to true if the object a buffer object.
+ * @obj: The object
+ *
+ * Bind this object to the request and set the ops and priv values of
+ * the object so it can be found later with media_request_object_find().
+ *
+ * Every bound object must be unbound or completed by the kernel at some
+ * point in time, otherwise the request will never complete. When the
+ * request is released all completed objects will be unbound by the
+ * request core code.
+ *
+ * Buffer objects will be added to the end of the request's object
+ * list, non-buffer objects will be added to the front of the list.
+ * This ensures that all buffer objects are at the end of the list
+ * and that all non-buffer objects that they depend on are processed
+ * first.
+ */
+int media_request_object_bind(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv, bool is_buffer,
+ struct media_request_object *obj);
+
+/**
+ * media_request_object_unbind - Unbind a media request object
+ *
+ * @obj: The object
+ *
+ * Unbind the media request object from the request.
+ */
+void media_request_object_unbind(struct media_request_object *obj);
+
+/**
+ * media_request_object_complete - Mark the media request object as complete
+ *
+ * @obj: The object
+ *
+ * Mark the media request object as complete. Only bound objects can
+ * be completed.
+ */
+void media_request_object_complete(struct media_request_object *obj);
+
+#else
+
+static inline int __must_check
+media_request_lock_for_access(struct media_request *req)
+{
+ return -EINVAL;
+}
+
+static inline void media_request_unlock_for_access(struct media_request *req)
+{
+}
+
+static inline int __must_check
+media_request_lock_for_update(struct media_request *req)
+{
+ return -EINVAL;
+}
+
+static inline void media_request_unlock_for_update(struct media_request *req)
+{
+}
+
+static inline void media_request_object_get(struct media_request_object *obj)
+{
+}
+
+static inline void media_request_object_put(struct media_request_object *obj)
+{
+}
+
+static inline struct media_request_object *
+media_request_object_find(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv)
+{
+ return NULL;
+}
+
+static inline void media_request_object_init(struct media_request_object *obj)
+{
+ obj->ops = NULL;
+ obj->req = NULL;
+}
+
+static inline int media_request_object_bind(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv, bool is_buffer,
+ struct media_request_object *obj)
+{
+ return 0;
+}
+
+static inline void media_request_object_unbind(struct media_request_object *obj)
+{
+}
+
+static inline void media_request_object_complete(struct media_request_object *obj)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index ff89df428f79..83ce0593b275 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/videodev2.h>
+#include <media/media-request.h>
/* forward references */
struct file;
@@ -34,13 +35,15 @@ struct poll_table_struct;
/**
* union v4l2_ctrl_ptr - A pointer to a control value.
- * @p_s32: Pointer to a 32-bit signed value.
- * @p_s64: Pointer to a 64-bit signed value.
- * @p_u8: Pointer to a 8-bit unsigned value.
- * @p_u16: Pointer to a 16-bit unsigned value.
- * @p_u32: Pointer to a 32-bit unsigned value.
- * @p_char: Pointer to a string.
- * @p: Pointer to a compound value.
+ * @p_s32: Pointer to a 32-bit signed value.
+ * @p_s64: Pointer to a 64-bit signed value.
+ * @p_u8: Pointer to a 8-bit unsigned value.
+ * @p_u16: Pointer to a 16-bit unsigned value.
+ * @p_u32: Pointer to a 32-bit unsigned value.
+ * @p_char: Pointer to a string.
+ * @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure.
+ * @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure.
+ * @p: Pointer to a compound value.
*/
union v4l2_ctrl_ptr {
s32 *p_s32;
@@ -49,6 +52,8 @@ union v4l2_ctrl_ptr {
u16 *p_u16;
u32 *p_u32;
char *p_char;
+ struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
+ struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization;
void *p;
};
@@ -247,6 +252,19 @@ struct v4l2_ctrl {
* @ctrl: The actual control information.
* @helper: Pointer to helper struct. Used internally in
* ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``.
+ * @from_other_dev: If true, then @ctrl was defined in another
+ * device than the &struct v4l2_ctrl_handler.
+ * @req_done: Internal flag: if the control handler containing this control
+ * reference is bound to a media request, then this is set when
+ * the control has been applied. This prevents applying controls
+ * from a cluster with multiple controls twice (when the first
+ * control of a cluster is applied, they all are).
+ * @req: If set, this refers to another request that sets this control.
+ * @p_req: If the control handler containing this control reference
+ * is bound to a media request, then this points to the
+ * value of the control that should be applied when the request
+ * is executed, or to the value of the control at the time
+ * that the request was completed.
*
* Each control handler has a list of these refs. The list_head is used to
* keep a sorted-by-control-ID list of all controls, while the next pointer
@@ -257,6 +275,10 @@ struct v4l2_ctrl_ref {
struct v4l2_ctrl_ref *next;
struct v4l2_ctrl *ctrl;
struct v4l2_ctrl_helper *helper;
+ bool from_other_dev;
+ bool req_done;
+ struct v4l2_ctrl_ref *req;
+ union v4l2_ctrl_ptr p_req;
};
/**
@@ -280,6 +302,17 @@ struct v4l2_ctrl_ref {
* @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
* @nr_of_buckets: Total number of buckets in the array.
* @error: The error code of the first failed control addition.
+ * @request_is_queued: True if the request was queued.
+ * @requests: List to keep track of open control handler request objects.
+ * For the parent control handler (@req_obj.req == NULL) this
+ * is the list header. When the parent control handler is
+ * removed, it has to unbind and put all these requests since
+ * they refer to the parent.
+ * @requests_queued: List of the queued requests. This determines the order
+ * in which these controls are applied. Once the request is
+ * completed it is removed from this list.
+ * @req_obj: The &struct media_request_object, used to link into a
+ * &struct media_request. This request object has a refcount.
*/
struct v4l2_ctrl_handler {
struct mutex _lock;
@@ -292,6 +325,10 @@ struct v4l2_ctrl_handler {
void *notify_priv;
u16 nr_of_buckets;
int error;
+ bool request_is_queued;
+ struct list_head requests;
+ struct list_head requests_queued;
+ struct media_request_object req_obj;
};
/**
@@ -633,6 +670,8 @@ typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl);
* @add: The control handler whose controls you want to add to
* the @hdl control handler.
* @filter: This function will filter which controls should be added.
+ * @from_other_dev: If true, then the controls in @add were defined in another
+ * device than @hdl.
*
* Does nothing if either of the two handlers is a NULL pointer.
* If @filter is NULL, then all controls are added. Otherwise only those
@@ -642,7 +681,8 @@ typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl);
*/
int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *add,
- v4l2_ctrl_filter filter);
+ v4l2_ctrl_filter filter,
+ bool from_other_dev);
/**
* v4l2_ctrl_radio_filter() - Standard filter for radio controls.
@@ -1070,6 +1110,84 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
*/
__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
+/**
+ * v4l2_ctrl_request_setup - helper function to apply control values in a request
+ *
+ * @req: The request
+ * @parent: The parent control handler ('priv' in media_request_object_find())
+ *
+ * This is a helper function to call the control handler's s_ctrl callback with
+ * the control values contained in the request. Do note that this approach of
+ * applying control values in a request is only applicable to memory-to-memory
+ * devices.
+ */
+void v4l2_ctrl_request_setup(struct media_request *req,
+ struct v4l2_ctrl_handler *parent);
+
+/**
+ * v4l2_ctrl_request_complete - Complete a control handler request object
+ *
+ * @req: The request
+ * @parent: The parent control handler ('priv' in media_request_object_find())
+ *
+ * This function is to be called on each control handler that may have had a
+ * request object associated with it, i.e. control handlers of a driver that
+ * supports requests.
+ *
+ * The function first obtains the values of any volatile controls in the control
+ * handler and attach them to the request. Then, the function completes the
+ * request object.
+ */
+void v4l2_ctrl_request_complete(struct media_request *req,
+ struct v4l2_ctrl_handler *parent);
+
+/**
+ * v4l2_ctrl_request_hdl_find - Find the control handler in the request
+ *
+ * @req: The request
+ * @parent: The parent control handler ('priv' in media_request_object_find())
+ *
+ * This function finds the control handler in the request. It may return
+ * NULL if not found. When done, you must call v4l2_ctrl_request_put_hdl()
+ * with the returned handler pointer.
+ *
+ * If the request is not in state VALIDATING or QUEUED, then this function
+ * will always return NULL.
+ *
+ * Note that in state VALIDATING the req_queue_mutex is held, so
+ * no objects can be added or deleted from the request.
+ *
+ * In state QUEUED it is the driver that will have to ensure this.
+ */
+struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
+ struct v4l2_ctrl_handler *parent);
+
+/**
+ * v4l2_ctrl_request_hdl_put - Put the control handler
+ *
+ * @hdl: Put this control handler
+ *
+ * This function released the control handler previously obtained from'
+ * v4l2_ctrl_request_hdl_find().
+ */
+static inline void v4l2_ctrl_request_hdl_put(struct v4l2_ctrl_handler *hdl)
+{
+ if (hdl)
+ media_request_object_put(&hdl->req_obj);
+}
+
+/**
+ * v4l2_ctrl_request_ctrl_find() - Find a control with the given ID.
+ *
+ * @hdl: The control handler from the request.
+ * @id: The ID of the control to find.
+ *
+ * This function returns a pointer to the control if this control is
+ * part of the request or NULL otherwise.
+ */
+struct v4l2_ctrl *
+v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id);
+
/* Helpers for ioctl_ops */
/**
@@ -1136,11 +1254,12 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
* :ref:`VIDIOC_G_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl
*
* @hdl: pointer to &struct v4l2_ctrl_handler
+ * @mdev: pointer to &struct media_device
* @c: pointer to &struct v4l2_ext_controls
*
* If hdl == NULL then they will all return -EINVAL.
*/
-int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
struct v4l2_ext_controls *c);
/**
@@ -1148,11 +1267,13 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl,
* :ref:`VIDIOC_TRY_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl
*
* @hdl: pointer to &struct v4l2_ctrl_handler
+ * @mdev: pointer to &struct media_device
* @c: pointer to &struct v4l2_ext_controls
*
* If hdl == NULL then they will all return -EINVAL.
*/
int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct media_device *mdev,
struct v4l2_ext_controls *c);
/**
@@ -1161,11 +1282,13 @@ int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
*
* @fh: pointer to &struct v4l2_fh
* @hdl: pointer to &struct v4l2_ctrl_handler
+ * @mdev: pointer to &struct media_device
* @c: pointer to &struct v4l2_ext_controls
*
* If hdl == NULL then they will all return -EINVAL.
*/
int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct media_device *mdev,
struct v4l2_ext_controls *c);
/**
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index b330e4a08a6b..ac7677a183ff 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -211,6 +211,17 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
sd->v4l2_dev->notify(sd, notification, arg);
}
+/**
+ * v4l2_device_supports_requests - Test if requests are supported.
+ *
+ * @v4l2_dev: pointer to struct v4l2_device
+ */
+static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
+{
+ return v4l2_dev->mdev && v4l2_dev->mdev->ops &&
+ v4l2_dev->mdev->ops->req_queue;
+}
+
/* Helper macros to iterate over all subdevs. */
/**
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index d655720e16a1..58c1ecf3d648 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -622,6 +622,10 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
}
+/* v4l2 request helper */
+
+void vb2_m2m_request_queue(struct media_request *req);
+
/* v4l2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index f6818f732f34..e86981d615ae 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -17,6 +17,7 @@
#include <linux/poll.h>
#include <linux/dma-buf.h>
#include <linux/bitops.h>
+#include <media/media-request.h>
#define VB2_MAX_FRAME (32)
#define VB2_MAX_PLANES (8)
@@ -203,8 +204,8 @@ enum vb2_io_modes {
/**
* enum vb2_buffer_state - current video buffer state.
* @VB2_BUF_STATE_DEQUEUED: buffer under userspace control.
+ * @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request.
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf.
- * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver.
* @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver.
* @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver.
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
@@ -217,8 +218,8 @@ enum vb2_io_modes {
*/
enum vb2_buffer_state {
VB2_BUF_STATE_DEQUEUED,
+ VB2_BUF_STATE_IN_REQUEST,
VB2_BUF_STATE_PREPARING,
- VB2_BUF_STATE_PREPARED,
VB2_BUF_STATE_QUEUED,
VB2_BUF_STATE_REQUEUEING,
VB2_BUF_STATE_ACTIVE,
@@ -238,6 +239,8 @@ struct vb2_queue;
* @num_planes: number of planes in the buffer
* on an internal driver queue.
* @timestamp: frame timestamp in ns.
+ * @req_obj: used to bind this buffer to a request. This
+ * request object has a refcount.
*/
struct vb2_buffer {
struct vb2_queue *vb2_queue;
@@ -246,10 +249,17 @@ struct vb2_buffer {
unsigned int memory;
unsigned int num_planes;
u64 timestamp;
+ struct media_request_object req_obj;
/* private: internal use only
*
* state: current buffer state; do not change
+ * synced: this buffer has been synced for DMA, i.e. the
+ * 'prepare' memop was called. It is cleared again
+ * after the 'finish' memop is called.
+ * prepared: this buffer has been prepared, i.e. the
+ * buf_prepare op was called. It is cleared again
+ * after the 'buf_finish' op is called.
* queued_entry: entry on the queued buffers list, which holds
* all buffers queued from userspace
* done_entry: entry on the list that stores all buffers ready
@@ -257,6 +267,8 @@ struct vb2_buffer {
* vb2_plane: per-plane information; do not change
*/
enum vb2_buffer_state state;
+ bool synced;
+ bool prepared;
struct vb2_plane planes[VB2_MAX_PLANES];
struct list_head queued_entry;
@@ -287,6 +299,7 @@ struct vb2_buffer {
u32 cnt_buf_finish;
u32 cnt_buf_cleanup;
u32 cnt_buf_queue;
+ u32 cnt_buf_request_complete;
/* This counts the number of calls to vb2_buffer_done() */
u32 cnt_buf_done;
@@ -380,6 +393,11 @@ struct vb2_buffer {
* ioctl; might be called before @start_streaming callback
* if user pre-queued buffers before calling
* VIDIOC_STREAMON().
+ * @buf_request_complete: a buffer that was never queued to the driver but is
+ * associated with a queued request was canceled.
+ * The driver will have to mark associated objects in the
+ * request as completed; required if requests are
+ * supported.
*/
struct vb2_ops {
int (*queue_setup)(struct vb2_queue *q,
@@ -398,6 +416,8 @@ struct vb2_ops {
void (*stop_streaming)(struct vb2_queue *q);
void (*buf_queue)(struct vb2_buffer *vb);
+
+ void (*buf_request_complete)(struct vb2_buffer *vb);
};
/**
@@ -406,6 +426,9 @@ struct vb2_ops {
* @verify_planes_array: Verify that a given user space structure contains
* enough planes for the buffer. This is called
* for each dequeued buffer.
+ * @init_buffer: given a &vb2_buffer initialize the extra data after
+ * struct vb2_buffer.
+ * For V4L2 this is a &struct vb2_v4l2_buffer.
* @fill_user_buffer: given a &vb2_buffer fill in the userspace structure.
* For V4L2 this is a &struct v4l2_buffer.
* @fill_vb2_buffer: given a userspace structure, fill in the &vb2_buffer.
@@ -416,9 +439,9 @@ struct vb2_ops {
*/
struct vb2_buf_ops {
int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
+ void (*init_buffer)(struct vb2_buffer *vb);
void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
- int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
- struct vb2_plane *planes);
+ int (*fill_vb2_buffer)(struct vb2_buffer *vb, struct vb2_plane *planes);
void (*copy_timestamp)(struct vb2_buffer *vb, const void *pb);
};
@@ -449,6 +472,13 @@ struct vb2_buf_ops {
* @quirk_poll_must_check_waiting_for_buffers: Return %EPOLLERR at poll when QBUF
* has not been called. This is a vb1 idiom that has been adopted
* also by vb2.
+ * @supports_requests: this queue supports the Request API.
+ * @uses_qbuf: qbuf was used directly for this queue. Set to 1 the first
+ * time this is called. Set to 0 when the queue is canceled.
+ * If this is 1, then you cannot queue buffers from a request.
+ * @uses_requests: requests are used for this queue. Set to 1 the first time
+ * a request is queued. Set to 0 when the queue is canceled.
+ * If this is 1, then you cannot queue buffers directly.
* @lock: pointer to a mutex that protects the &struct vb2_queue. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -516,6 +546,9 @@ struct vb2_queue {
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
unsigned quirk_poll_must_check_waiting_for_buffers:1;
+ unsigned supports_requests:1;
+ unsigned uses_qbuf:1;
+ unsigned uses_requests:1;
struct mutex *lock;
void *owner;
@@ -752,12 +785,17 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
* @index: id number of the buffer
* @pb: buffer structure passed from userspace to
* v4l2_ioctl_ops->vidioc_qbuf handler in driver
+ * @req: pointer to &struct media_request, may be NULL.
*
* Videobuf2 core helper to implement VIDIOC_QBUF() operation. It is called
* internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
*
* This function:
*
+ * #) If @req is non-NULL, then the buffer will be bound to this
+ * media request and it returns. The buffer will be prepared and
+ * queued to the driver (i.e. the next two steps) when the request
+ * itself is queued.
* #) if necessary, calls &vb2_ops->buf_prepare callback in the driver
* (if provided), in which driver-specific buffer initialization can
* be performed;
@@ -766,7 +804,8 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
*
* Return: returns zero on success; an error code otherwise.
*/
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+ struct media_request *req);
/**
* vb2_core_dqbuf() - Dequeue a buffer to the userspace
@@ -1143,4 +1182,19 @@ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
*/
int vb2_verify_memory_type(struct vb2_queue *q,
enum vb2_memory memory, unsigned int type);
+
+/**
+ * vb2_request_object_is_buffer() - return true if the object is a buffer
+ *
+ * @obj: the request object.
+ */
+bool vb2_request_object_is_buffer(struct media_request_object *obj);
+
+/**
+ * vb2_request_buffer_cnt() - return the number of buffers in the request
+ *
+ * @req: the request.
+ */
+unsigned int vb2_request_buffer_cnt(struct media_request *req);
+
#endif /* _MEDIA_VIDEOBUF2_CORE_H */
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 3d5e2d739f05..727855463838 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -32,6 +32,8 @@
* &enum v4l2_field.
* @timecode: frame timecode.
* @sequence: sequence count of this frame.
+ * @request_fd: the request_fd associated with this buffer
+ * @planes: plane information (userptr/fd, length, bytesused, data_offset).
*
* Should contain enough information to be able to cover all the fields
* of &struct v4l2_buffer at ``videodev2.h``.
@@ -43,6 +45,8 @@ struct vb2_v4l2_buffer {
__u32 field;
struct v4l2_timecode timecode;
__u32 sequence;
+ __s32 request_fd;
+ struct vb2_plane planes[VB2_MAX_PLANES];
};
/*
@@ -77,6 +81,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
* vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
*
* @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @mdev: pointer to &struct media_device, may be NULL.
* @b: buffer structure passed from userspace to
* &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver
*
@@ -88,15 +93,19 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
* #) verifies the passed buffer,
* #) calls &vb2_ops->buf_prepare callback in the driver (if provided),
* in which driver-specific buffer initialization can be performed.
+ * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set,
+ * then bind the prepared buffer to the request.
*
* The return values from this function are intended to be directly returned
* from &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver.
*/
-int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b);
/**
* vb2_qbuf() - Queue a buffer from userspace
* @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @mdev: pointer to &struct media_device, may be NULL.
* @b: buffer structure passed from userspace to
* &v4l2_ioctl_ops->vidioc_qbuf handler in driver
*
@@ -105,6 +114,8 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
* This function:
*
* #) verifies the passed buffer;
+ * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set,
+ * then bind the buffer to the request.
* #) if necessary, calls &vb2_ops->buf_prepare callback in the driver
* (if provided), in which driver-specific buffer initialization can
* be performed;
@@ -114,7 +125,8 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
* The return values from this function are intended to be directly returned
* from &v4l2_ioctl_ops->vidioc_qbuf handler in driver.
*/
-int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b);
/**
* vb2_expbuf() - Export a buffer as a file descriptor
@@ -291,4 +303,8 @@ void vb2_ops_wait_prepare(struct vb2_queue *vq);
*/
void vb2_ops_wait_finish(struct vb2_queue *vq);
+struct media_request;
+int vb2_request_validate(struct media_request *req);
+void vb2_request_queue(struct media_request *req);
+
#endif /* _MEDIA_VIDEOBUF2_V4L2_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index e2695c4bf358..ddbba838d048 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -13,7 +13,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp);
void unix_gc(void);
void wait_for_unix_gc(void);
struct sock *unix_get_socket(struct file *filp);
-struct sock *unix_peer_get(struct sock *);
+struct sock *unix_peer_get(struct sock *sk);
#define UNIX_HASH_SIZE 256
#define UNIX_HASH_BITS 8
@@ -40,7 +40,7 @@ struct unix_skb_parms {
u32 consumed;
} __randomize_layout;
-#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
+#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index d0a341bc4540..33d291888ba9 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -54,6 +54,35 @@ enum afs_fs_operation {
afs_FS_StoreData64 = 65538, /* AFS Store file data */
afs_FS_GiveUpAllCallBacks = 65539, /* AFS Give up all our callbacks on a server */
afs_FS_GetCapabilities = 65540, /* AFS Get FS server capabilities */
+
+ yfs_FS_FetchData = 130, /* YFS Fetch file data */
+ yfs_FS_FetchACL = 64131, /* YFS Fetch file ACL */
+ yfs_FS_FetchStatus = 64132, /* YFS Fetch file status */
+ yfs_FS_StoreACL = 64134, /* YFS Store file ACL */
+ yfs_FS_StoreStatus = 64135, /* YFS Store file status */
+ yfs_FS_RemoveFile = 64136, /* YFS Remove a file */
+ yfs_FS_CreateFile = 64137, /* YFS Create a file */
+ yfs_FS_Rename = 64138, /* YFS Rename or move a file or directory */
+ yfs_FS_Symlink = 64139, /* YFS Create a symbolic link */
+ yfs_FS_Link = 64140, /* YFS Create a hard link */
+ yfs_FS_MakeDir = 64141, /* YFS Create a directory */
+ yfs_FS_RemoveDir = 64142, /* YFS Remove a directory */
+ yfs_FS_GetVolumeStatus = 64149, /* YFS Get volume status information */
+ yfs_FS_SetVolumeStatus = 64150, /* YFS Set volume status information */
+ yfs_FS_SetLock = 64156, /* YFS Request a file lock */
+ yfs_FS_ExtendLock = 64157, /* YFS Extend a file lock */
+ yfs_FS_ReleaseLock = 64158, /* YFS Release a file lock */
+ yfs_FS_Lookup = 64161, /* YFS lookup file in directory */
+ yfs_FS_FlushCPS = 64165,
+ yfs_FS_FetchOpaqueACL = 64168,
+ yfs_FS_WhoAmI = 64170,
+ yfs_FS_RemoveACL = 64171,
+ yfs_FS_RemoveFile2 = 64173,
+ yfs_FS_StoreOpaqueACL2 = 64174,
+ yfs_FS_InlineBulkStatus = 64536, /* YFS Fetch multiple file statuses with errors */
+ yfs_FS_FetchData64 = 64537, /* YFS Fetch file data */
+ yfs_FS_StoreData64 = 64538, /* YFS Store file data */
+ yfs_FS_UpdateSymlink = 64540,
};
enum afs_vl_operation {
@@ -84,6 +113,44 @@ enum afs_edit_dir_reason {
afs_edit_dir_for_unlink,
};
+enum afs_eproto_cause {
+ afs_eproto_bad_status,
+ afs_eproto_cb_count,
+ afs_eproto_cb_fid_count,
+ afs_eproto_file_type,
+ afs_eproto_ibulkst_cb_count,
+ afs_eproto_ibulkst_count,
+ afs_eproto_motd_len,
+ afs_eproto_offline_msg_len,
+ afs_eproto_volname_len,
+ afs_eproto_yvl_fsendpt4_len,
+ afs_eproto_yvl_fsendpt6_len,
+ afs_eproto_yvl_fsendpt_num,
+ afs_eproto_yvl_fsendpt_type,
+ afs_eproto_yvl_vlendpt4_len,
+ afs_eproto_yvl_vlendpt6_len,
+ afs_eproto_yvl_vlendpt_type,
+};
+
+enum afs_io_error {
+ afs_io_error_cm_reply,
+ afs_io_error_extract,
+ afs_io_error_fs_probe_fail,
+ afs_io_error_vl_lookup_fail,
+ afs_io_error_vl_probe_fail,
+};
+
+enum afs_file_error {
+ afs_file_error_dir_bad_magic,
+ afs_file_error_dir_big,
+ afs_file_error_dir_missing_page,
+ afs_file_error_dir_over_end,
+ afs_file_error_dir_small,
+ afs_file_error_dir_unmarked_ext,
+ afs_file_error_mntpt,
+ afs_file_error_writeback_fail,
+};
+
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -119,7 +186,34 @@ enum afs_edit_dir_reason {
EM(afs_FS_FetchData64, "FS.FetchData64") \
EM(afs_FS_StoreData64, "FS.StoreData64") \
EM(afs_FS_GiveUpAllCallBacks, "FS.GiveUpAllCallBacks") \
- E_(afs_FS_GetCapabilities, "FS.GetCapabilities")
+ EM(afs_FS_GetCapabilities, "FS.GetCapabilities") \
+ EM(yfs_FS_FetchACL, "YFS.FetchACL") \
+ EM(yfs_FS_FetchStatus, "YFS.FetchStatus") \
+ EM(yfs_FS_StoreACL, "YFS.StoreACL") \
+ EM(yfs_FS_StoreStatus, "YFS.StoreStatus") \
+ EM(yfs_FS_RemoveFile, "YFS.RemoveFile") \
+ EM(yfs_FS_CreateFile, "YFS.CreateFile") \
+ EM(yfs_FS_Rename, "YFS.Rename") \
+ EM(yfs_FS_Symlink, "YFS.Symlink") \
+ EM(yfs_FS_Link, "YFS.Link") \
+ EM(yfs_FS_MakeDir, "YFS.MakeDir") \
+ EM(yfs_FS_RemoveDir, "YFS.RemoveDir") \
+ EM(yfs_FS_GetVolumeStatus, "YFS.GetVolumeStatus") \
+ EM(yfs_FS_SetVolumeStatus, "YFS.SetVolumeStatus") \
+ EM(yfs_FS_SetLock, "YFS.SetLock") \
+ EM(yfs_FS_ExtendLock, "YFS.ExtendLock") \
+ EM(yfs_FS_ReleaseLock, "YFS.ReleaseLock") \
+ EM(yfs_FS_Lookup, "YFS.Lookup") \
+ EM(yfs_FS_FlushCPS, "YFS.FlushCPS") \
+ EM(yfs_FS_FetchOpaqueACL, "YFS.FetchOpaqueACL") \
+ EM(yfs_FS_WhoAmI, "YFS.WhoAmI") \
+ EM(yfs_FS_RemoveACL, "YFS.RemoveACL") \
+ EM(yfs_FS_RemoveFile2, "YFS.RemoveFile2") \
+ EM(yfs_FS_StoreOpaqueACL2, "YFS.StoreOpaqueACL2") \
+ EM(yfs_FS_InlineBulkStatus, "YFS.InlineBulkStatus") \
+ EM(yfs_FS_FetchData64, "YFS.FetchData64") \
+ EM(yfs_FS_StoreData64, "YFS.StoreData64") \
+ E_(yfs_FS_UpdateSymlink, "YFS.UpdateSymlink")
#define afs_vl_operations \
EM(afs_VL_GetEntryByNameU, "VL.GetEntryByNameU") \
@@ -146,6 +240,40 @@ enum afs_edit_dir_reason {
EM(afs_edit_dir_for_symlink, "Symlnk") \
E_(afs_edit_dir_for_unlink, "Unlink")
+#define afs_eproto_causes \
+ EM(afs_eproto_bad_status, "BadStatus") \
+ EM(afs_eproto_cb_count, "CbCount") \
+ EM(afs_eproto_cb_fid_count, "CbFidCount") \
+ EM(afs_eproto_file_type, "FileTYpe") \
+ EM(afs_eproto_ibulkst_cb_count, "IBS.CbCount") \
+ EM(afs_eproto_ibulkst_count, "IBS.FidCount") \
+ EM(afs_eproto_motd_len, "MotdLen") \
+ EM(afs_eproto_offline_msg_len, "OfflineMsgLen") \
+ EM(afs_eproto_volname_len, "VolNameLen") \
+ EM(afs_eproto_yvl_fsendpt4_len, "YVL.FsEnd4Len") \
+ EM(afs_eproto_yvl_fsendpt6_len, "YVL.FsEnd6Len") \
+ EM(afs_eproto_yvl_fsendpt_num, "YVL.FsEndCount") \
+ EM(afs_eproto_yvl_fsendpt_type, "YVL.FsEndType") \
+ EM(afs_eproto_yvl_vlendpt4_len, "YVL.VlEnd4Len") \
+ EM(afs_eproto_yvl_vlendpt6_len, "YVL.VlEnd6Len") \
+ E_(afs_eproto_yvl_vlendpt_type, "YVL.VlEndType")
+
+#define afs_io_errors \
+ EM(afs_io_error_cm_reply, "CM_REPLY") \
+ EM(afs_io_error_extract, "EXTRACT") \
+ EM(afs_io_error_fs_probe_fail, "FS_PROBE_FAIL") \
+ EM(afs_io_error_vl_lookup_fail, "VL_LOOKUP_FAIL") \
+ E_(afs_io_error_vl_probe_fail, "VL_PROBE_FAIL")
+
+#define afs_file_errors \
+ EM(afs_file_error_dir_bad_magic, "DIR_BAD_MAGIC") \
+ EM(afs_file_error_dir_big, "DIR_BIG") \
+ EM(afs_file_error_dir_missing_page, "DIR_MISSING_PAGE") \
+ EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \
+ EM(afs_file_error_dir_small, "DIR_SMALL") \
+ EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \
+ EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \
+ E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED")
/*
* Export enum symbols via userspace.
@@ -160,6 +288,9 @@ afs_fs_operations;
afs_vl_operations;
afs_edit_dir_ops;
afs_edit_dir_reasons;
+afs_eproto_causes;
+afs_io_errors;
+afs_file_errors;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -170,17 +301,16 @@ afs_edit_dir_reasons;
#define EM(a, b) { a, b },
#define E_(a, b) { a, b }
-TRACE_EVENT(afs_recv_data,
- TP_PROTO(struct afs_call *call, unsigned count, unsigned offset,
+TRACE_EVENT(afs_receive_data,
+ TP_PROTO(struct afs_call *call, struct iov_iter *iter,
bool want_more, int ret),
- TP_ARGS(call, count, offset, want_more, ret),
+ TP_ARGS(call, iter, want_more, ret),
TP_STRUCT__entry(
+ __field(loff_t, remain )
__field(unsigned int, call )
__field(enum afs_call_state, state )
- __field(unsigned int, count )
- __field(unsigned int, offset )
__field(unsigned short, unmarshall )
__field(bool, want_more )
__field(int, ret )
@@ -190,17 +320,18 @@ TRACE_EVENT(afs_recv_data,
__entry->call = call->debug_id;
__entry->state = call->state;
__entry->unmarshall = call->unmarshall;
- __entry->count = count;
- __entry->offset = offset;
+ __entry->remain = iov_iter_count(iter);
__entry->want_more = want_more;
__entry->ret = ret;
),
- TP_printk("c=%08x s=%u u=%u %u/%u wm=%u ret=%d",
+ TP_printk("c=%08x r=%llu u=%u w=%u s=%u ret=%d",
__entry->call,
- __entry->state, __entry->unmarshall,
- __entry->offset, __entry->count,
- __entry->want_more, __entry->ret)
+ __entry->remain,
+ __entry->unmarshall,
+ __entry->want_more,
+ __entry->state,
+ __entry->ret)
);
TRACE_EVENT(afs_notify_call,
@@ -301,7 +432,7 @@ TRACE_EVENT(afs_make_fs_call,
}
),
- TP_printk("c=%08x %06x:%06x:%06x %s",
+ TP_printk("c=%08x %06llx:%06llx:%06x %s",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -555,24 +686,70 @@ TRACE_EVENT(afs_edit_dir,
);
TRACE_EVENT(afs_protocol_error,
- TP_PROTO(struct afs_call *call, int error, const void *where),
+ TP_PROTO(struct afs_call *call, int error, enum afs_eproto_cause cause),
+
+ TP_ARGS(call, error, cause),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(int, error )
+ __field(enum afs_eproto_cause, cause )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call ? call->debug_id : 0;
+ __entry->error = error;
+ __entry->cause = cause;
+ ),
+
+ TP_printk("c=%08x r=%d %s",
+ __entry->call, __entry->error,
+ __print_symbolic(__entry->cause, afs_eproto_causes))
+ );
+
+TRACE_EVENT(afs_io_error,
+ TP_PROTO(unsigned int call, int error, enum afs_io_error where),
TP_ARGS(call, error, where),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(int, error )
- __field(const void *, where )
+ __field(enum afs_io_error, where )
),
TP_fast_assign(
- __entry->call = call ? call->debug_id : 0;
+ __entry->call = call;
+ __entry->error = error;
+ __entry->where = where;
+ ),
+
+ TP_printk("c=%08x r=%d %s",
+ __entry->call, __entry->error,
+ __print_symbolic(__entry->where, afs_io_errors))
+ );
+
+TRACE_EVENT(afs_file_error,
+ TP_PROTO(struct afs_vnode *vnode, int error, enum afs_file_error where),
+
+ TP_ARGS(vnode, error, where),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid )
+ __field(int, error )
+ __field(enum afs_file_error, where )
+ ),
+
+ TP_fast_assign(
+ __entry->fid = vnode->fid;
__entry->error = error;
__entry->where = where;
),
- TP_printk("c=%08x r=%d sp=%pSR",
- __entry->call, __entry->error, __entry->where)
+ TP_printk("%llx:%llx:%x r=%d %s",
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+ __entry->error,
+ __print_symbolic(__entry->where, afs_file_errors))
);
TRACE_EVENT(afs_cm_no_server,
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index 31aa10178335..93722e60204c 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -41,6 +41,7 @@
#define EM_TILEPRO 188 /* Tilera TILEPro */
#define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */
#define EM_TILEGX 191 /* Tilera TILE-Gx */
+#define EM_RISCV 243 /* RISC-V */
#define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 92fa24c24c92..b4967d48bfda 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -116,6 +116,12 @@
*
* 7.27
* - add FUSE_ABORT_ERROR
+ *
+ * 7.28
+ * - add FUSE_COPY_FILE_RANGE
+ * - add FOPEN_CACHE_DIR
+ * - add FUSE_MAX_PAGES, add max_pages to init_out
+ * - add FUSE_CACHE_SYMLINKS
*/
#ifndef _LINUX_FUSE_H
@@ -151,7 +157,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 27
+#define FUSE_KERNEL_MINOR_VERSION 28
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -219,10 +225,12 @@ struct fuse_file_lock {
* FOPEN_DIRECT_IO: bypass page cache for this open file
* FOPEN_KEEP_CACHE: don't invalidate the data cache on open
* FOPEN_NONSEEKABLE: the file is not seekable
+ * FOPEN_CACHE_DIR: allow caching this directory
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
#define FOPEN_NONSEEKABLE (1 << 2)
+#define FOPEN_CACHE_DIR (1 << 3)
/**
* INIT request/reply flags
@@ -249,6 +257,8 @@ struct fuse_file_lock {
* FUSE_HANDLE_KILLPRIV: fs handles killing suid/sgid/cap on write/chown/trunc
* FUSE_POSIX_ACL: filesystem supports posix acls
* FUSE_ABORT_ERROR: reading the device after abort returns ECONNABORTED
+ * FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages
+ * FUSE_CACHE_SYMLINKS: cache READLINK responses
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -272,6 +282,8 @@ struct fuse_file_lock {
#define FUSE_HANDLE_KILLPRIV (1 << 19)
#define FUSE_POSIX_ACL (1 << 20)
#define FUSE_ABORT_ERROR (1 << 21)
+#define FUSE_MAX_PAGES (1 << 22)
+#define FUSE_CACHE_SYMLINKS (1 << 23)
/**
* CUSE INIT request/reply flags
@@ -337,53 +349,54 @@ struct fuse_file_lock {
#define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0)
enum fuse_opcode {
- FUSE_LOOKUP = 1,
- FUSE_FORGET = 2, /* no reply */
- FUSE_GETATTR = 3,
- FUSE_SETATTR = 4,
- FUSE_READLINK = 5,
- FUSE_SYMLINK = 6,
- FUSE_MKNOD = 8,
- FUSE_MKDIR = 9,
- FUSE_UNLINK = 10,
- FUSE_RMDIR = 11,
- FUSE_RENAME = 12,
- FUSE_LINK = 13,
- FUSE_OPEN = 14,
- FUSE_READ = 15,
- FUSE_WRITE = 16,
- FUSE_STATFS = 17,
- FUSE_RELEASE = 18,
- FUSE_FSYNC = 20,
- FUSE_SETXATTR = 21,
- FUSE_GETXATTR = 22,
- FUSE_LISTXATTR = 23,
- FUSE_REMOVEXATTR = 24,
- FUSE_FLUSH = 25,
- FUSE_INIT = 26,
- FUSE_OPENDIR = 27,
- FUSE_READDIR = 28,
- FUSE_RELEASEDIR = 29,
- FUSE_FSYNCDIR = 30,
- FUSE_GETLK = 31,
- FUSE_SETLK = 32,
- FUSE_SETLKW = 33,
- FUSE_ACCESS = 34,
- FUSE_CREATE = 35,
- FUSE_INTERRUPT = 36,
- FUSE_BMAP = 37,
- FUSE_DESTROY = 38,
- FUSE_IOCTL = 39,
- FUSE_POLL = 40,
- FUSE_NOTIFY_REPLY = 41,
- FUSE_BATCH_FORGET = 42,
- FUSE_FALLOCATE = 43,
- FUSE_READDIRPLUS = 44,
- FUSE_RENAME2 = 45,
- FUSE_LSEEK = 46,
+ FUSE_LOOKUP = 1,
+ FUSE_FORGET = 2, /* no reply */
+ FUSE_GETATTR = 3,
+ FUSE_SETATTR = 4,
+ FUSE_READLINK = 5,
+ FUSE_SYMLINK = 6,
+ FUSE_MKNOD = 8,
+ FUSE_MKDIR = 9,
+ FUSE_UNLINK = 10,
+ FUSE_RMDIR = 11,
+ FUSE_RENAME = 12,
+ FUSE_LINK = 13,
+ FUSE_OPEN = 14,
+ FUSE_READ = 15,
+ FUSE_WRITE = 16,
+ FUSE_STATFS = 17,
+ FUSE_RELEASE = 18,
+ FUSE_FSYNC = 20,
+ FUSE_SETXATTR = 21,
+ FUSE_GETXATTR = 22,
+ FUSE_LISTXATTR = 23,
+ FUSE_REMOVEXATTR = 24,
+ FUSE_FLUSH = 25,
+ FUSE_INIT = 26,
+ FUSE_OPENDIR = 27,
+ FUSE_READDIR = 28,
+ FUSE_RELEASEDIR = 29,
+ FUSE_FSYNCDIR = 30,
+ FUSE_GETLK = 31,
+ FUSE_SETLK = 32,
+ FUSE_SETLKW = 33,
+ FUSE_ACCESS = 34,
+ FUSE_CREATE = 35,
+ FUSE_INTERRUPT = 36,
+ FUSE_BMAP = 37,
+ FUSE_DESTROY = 38,
+ FUSE_IOCTL = 39,
+ FUSE_POLL = 40,
+ FUSE_NOTIFY_REPLY = 41,
+ FUSE_BATCH_FORGET = 42,
+ FUSE_FALLOCATE = 43,
+ FUSE_READDIRPLUS = 44,
+ FUSE_RENAME2 = 45,
+ FUSE_LSEEK = 46,
+ FUSE_COPY_FILE_RANGE = 47,
/* CUSE specific operations */
- CUSE_INIT = 4096,
+ CUSE_INIT = 4096,
};
enum fuse_notify_code {
@@ -610,7 +623,9 @@ struct fuse_init_out {
uint16_t congestion_threshold;
uint32_t max_write;
uint32_t time_gran;
- uint32_t unused[9];
+ uint16_t max_pages;
+ uint16_t padding;
+ uint32_t unused[8];
};
#define CUSE_INIT_INFO_MAX 4096
@@ -792,4 +807,14 @@ struct fuse_lseek_out {
uint64_t offset;
};
+struct fuse_copy_file_range_in {
+ uint64_t fh_in;
+ uint64_t off_in;
+ uint64_t nodeid_out;
+ uint64_t fh_out;
+ uint64_t off_out;
+ uint64_t len;
+ uint64_t flags;
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 0f3cb13db8e9..f45ee0f69c0c 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -61,6 +61,11 @@
#define KEYCTL_INVALIDATE 21 /* invalidate a key */
#define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */
#define KEYCTL_DH_COMPUTE 23 /* Compute Diffie-Hellman values */
+#define KEYCTL_PKEY_QUERY 24 /* Query public key parameters */
+#define KEYCTL_PKEY_ENCRYPT 25 /* Encrypt a blob using a public key */
+#define KEYCTL_PKEY_DECRYPT 26 /* Decrypt a blob using a public key */
+#define KEYCTL_PKEY_SIGN 27 /* Create a public key signature */
+#define KEYCTL_PKEY_VERIFY 28 /* Verify a public key signature */
#define KEYCTL_RESTRICT_KEYRING 29 /* Restrict keys allowed to link to a keyring */
/* keyctl structures */
@@ -82,4 +87,29 @@ struct keyctl_kdf_params {
__u32 __spare[8];
};
+#define KEYCTL_SUPPORTS_ENCRYPT 0x01
+#define KEYCTL_SUPPORTS_DECRYPT 0x02
+#define KEYCTL_SUPPORTS_SIGN 0x04
+#define KEYCTL_SUPPORTS_VERIFY 0x08
+
+struct keyctl_pkey_query {
+ __u32 supported_ops; /* Which ops are supported */
+ __u32 key_size; /* Size of the key in bits */
+ __u16 max_data_size; /* Maximum size of raw data to sign in bytes */
+ __u16 max_sig_size; /* Maximum size of signature in bytes */
+ __u16 max_enc_size; /* Maximum size of encrypted blob in bytes */
+ __u16 max_dec_size; /* Maximum size of decrypted blob in bytes */
+ __u32 __spare[10];
+};
+
+struct keyctl_pkey_params {
+ __s32 key_id; /* Serial no. of public key to use */
+ __u32 in_len; /* Input data size */
+ union {
+ __u32 out_len; /* Output buffer size (encrypt/decrypt/sign) */
+ __u32 in2_len; /* 2nd input data size (verify) */
+ };
+ __u32 __spare[7];
+};
+
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 36f76e777ef9..e5d0c5c611b5 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -369,6 +369,14 @@ struct media_v2_topology {
#define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum)
#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc)
#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology)
+#define MEDIA_IOC_REQUEST_ALLOC _IOR ('|', 0x05, int)
+
+/*
+ * These ioctls are called on the request file descriptor as returned
+ * by MEDIA_IOC_REQUEST_ALLOC.
+ */
+#define MEDIA_REQUEST_IOC_QUEUE _IO('|', 0x80)
+#define MEDIA_REQUEST_IOC_REINIT _IO('|', 0x81)
#ifndef __KERNEL__
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e4ee10ee917d..51b095898f4b 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -402,6 +402,9 @@ enum v4l2_mpeg_video_multi_slice_mode {
#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228)
#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
+
#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300)
#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301)
#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302)
@@ -1092,4 +1095,66 @@ enum v4l2_detect_md_mode {
#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3)
#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4)
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
+
+struct v4l2_mpeg2_sequence {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
+ __u16 horizontal_size;
+ __u16 vertical_size;
+ __u32 vbv_buffer_size;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
+ __u8 profile_and_level_indication;
+ __u8 progressive_sequence;
+ __u8 chroma_format;
+};
+
+struct v4l2_mpeg2_picture {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
+ __u8 picture_coding_type;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
+ __u8 f_code[2][2];
+ __u8 intra_dc_precision;
+ __u8 picture_structure;
+ __u8 top_field_first;
+ __u8 frame_pred_frame_dct;
+ __u8 concealment_motion_vectors;
+ __u8 q_scale_type;
+ __u8 intra_vlc_format;
+ __u8 alternate_scan;
+ __u8 repeat_first_field;
+ __u8 progressive_frame;
+};
+
+struct v4l2_ctrl_mpeg2_slice_params {
+ __u32 bit_size;
+ __u32 data_bit_offset;
+
+ struct v4l2_mpeg2_sequence sequence;
+ struct v4l2_mpeg2_picture picture;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
+ __u8 quantiser_scale_code;
+
+ __u8 backward_ref_index;
+ __u8 forward_ref_index;
+};
+
+struct v4l2_ctrl_mpeg2_quantization {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
+ __u8 load_intra_quantiser_matrix;
+ __u8 load_non_intra_quantiser_matrix;
+ __u8 load_chroma_intra_quantiser_matrix;
+ __u8 load_chroma_non_intra_quantiser_matrix;
+
+ __u8 intra_quantiser_matrix[64];
+ __u8 non_intra_quantiser_matrix[64];
+ __u8 chroma_intra_quantiser_matrix[64];
+ __u8 chroma_non_intra_quantiser_matrix[64];
+};
+
#endif
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index f378b9802d8b..813102810f53 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -303,6 +303,56 @@ struct vfio_region_info_cap_type {
#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
+#define VFIO_REGION_TYPE_GFX (1)
+#define VFIO_REGION_SUBTYPE_GFX_EDID (1)
+
+/**
+ * struct vfio_region_gfx_edid - EDID region layout.
+ *
+ * Set display link state and EDID blob.
+ *
+ * The EDID blob has monitor information such as brand, name, serial
+ * number, physical size, supported video modes and more.
+ *
+ * This special region allows userspace (typically qemu) set a virtual
+ * EDID for the virtual monitor, which allows a flexible display
+ * configuration.
+ *
+ * For the edid blob spec look here:
+ * https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
+ *
+ * On linux systems you can find the EDID blob in sysfs:
+ * /sys/class/drm/${card}/${connector}/edid
+ *
+ * You can use the edid-decode ulility (comes with xorg-x11-utils) to
+ * decode the EDID blob.
+ *
+ * @edid_offset: location of the edid blob, relative to the
+ * start of the region (readonly).
+ * @edid_max_size: max size of the edid blob (readonly).
+ * @edid_size: actual edid size (read/write).
+ * @link_state: display link state (read/write).
+ * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
+ * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
+ * @max_xres: max display width (0 == no limitation, readonly).
+ * @max_yres: max display height (0 == no limitation, readonly).
+ *
+ * EDID update protocol:
+ * (1) set link-state to down.
+ * (2) update edid blob and size.
+ * (3) set link-state to up.
+ */
+struct vfio_region_gfx_edid {
+ __u32 edid_offset;
+ __u32 edid_max_size;
+ __u32 edid_size;
+ __u32 max_xres;
+ __u32 max_yres;
+ __u32 link_state;
+#define VFIO_DEVICE_GFX_LINK_STATE_UP 1
+#define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2
+};
+
/*
* The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
* which allows direct access to non-MSIX registers which happened to be within
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 29729d580452..c8e8ff810190 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -646,6 +646,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_H263 v4l2_fourcc('H', '2', '6', '3') /* H263 */
#define V4L2_PIX_FMT_MPEG1 v4l2_fourcc('M', 'P', 'G', '1') /* MPEG-1 ES */
#define V4L2_PIX_FMT_MPEG2 v4l2_fourcc('M', 'P', 'G', '2') /* MPEG-2 ES */
+#define V4L2_PIX_FMT_MPEG2_SLICE v4l2_fourcc('M', 'G', '2', 'S') /* MPEG-2 parsed slice data */
#define V4L2_PIX_FMT_MPEG4 v4l2_fourcc('M', 'P', 'G', '4') /* MPEG-4 part 2 ES */
#define V4L2_PIX_FMT_XVID v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid */
#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
@@ -687,6 +688,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
+#define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */
/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
@@ -868,9 +870,16 @@ struct v4l2_requestbuffers {
__u32 count;
__u32 type; /* enum v4l2_buf_type */
__u32 memory; /* enum v4l2_memory */
- __u32 reserved[2];
+ __u32 capabilities;
+ __u32 reserved[1];
};
+/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
+#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
+#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
+#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
+#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
+
/**
* struct v4l2_plane - plane info for multi-planar buffers
* @bytesused: number of bytes occupied by data in the plane (payload)
@@ -929,6 +938,7 @@ struct v4l2_plane {
* @length: size in bytes of the buffer (NOT its payload) for single-plane
* buffers (when type != *_MPLANE); number of elements in the
* planes array for multi-plane buffers
+ * @request_fd: fd of the request that this buffer should use
*
* Contains data exchanged by application and driver using one of the Streaming
* I/O methods.
@@ -953,7 +963,10 @@ struct v4l2_buffer {
} m;
__u32 length;
__u32 reserved2;
- __u32 reserved;
+ union {
+ __s32 request_fd;
+ __u32 reserved;
+ };
};
/* Flags for 'flags' field */
@@ -971,6 +984,8 @@ struct v4l2_buffer {
#define V4L2_BUF_FLAG_BFRAME 0x00000020
/* Buffer is ready, but the data contained within is corrupted. */
#define V4L2_BUF_FLAG_ERROR 0x00000040
+/* Buffer is added to an unqueued request */
+#define V4L2_BUF_FLAG_IN_REQUEST 0x00000080
/* timecode field is valid */
#define V4L2_BUF_FLAG_TIMECODE 0x00000100
/* Buffer is prepared for queuing */
@@ -989,6 +1004,8 @@ struct v4l2_buffer {
#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000
/* mem2mem encoder/decoder */
#define V4L2_BUF_FLAG_LAST 0x00100000
+/* request_fd is valid */
+#define V4L2_BUF_FLAG_REQUEST_FD 0x00800000
/**
* struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
@@ -1605,6 +1622,8 @@ struct v4l2_ext_control {
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
+ struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params;
+ struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1618,7 +1637,8 @@ struct v4l2_ext_controls {
};
__u32 count;
__u32 error_idx;
- __u32 reserved[2];
+ __s32 request_fd;
+ __u32 reserved[1];
struct v4l2_ext_control *controls;
};
@@ -1631,6 +1651,7 @@ struct v4l2_ext_controls {
#define V4L2_CTRL_MAX_DIMS (4)
#define V4L2_CTRL_WHICH_CUR_VAL 0
#define V4L2_CTRL_WHICH_DEF_VAL 0x0f000000
+#define V4L2_CTRL_WHICH_REQUEST_VAL 0x0f010000
enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_INTEGER = 1,
@@ -1648,6 +1669,8 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_U8 = 0x0100,
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
+ V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103,
+ V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
@@ -2321,6 +2344,7 @@ struct v4l2_dbg_chip_info {
* return: number of created buffers
* @memory: enum v4l2_memory; buffer memory type
* @format: frame format, for which buffers are requested
+ * @capabilities: capabilities of this buffer type.
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2328,7 +2352,8 @@ struct v4l2_create_buffers {
__u32 count;
__u32 memory;
struct v4l2_format format;
- __u32 reserved[8];
+ __u32 capabilities;
+ __u32 reserved[7];
};
/*
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 13b8cb563892..a1966cd7b677 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -34,15 +34,23 @@
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */
+#define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */
+#define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
+#define VIRTIO_BALLOON_CMD_ID_STOP 0
+#define VIRTIO_BALLOON_CMD_ID_DONE 1
struct virtio_balloon_config {
/* Number of pages host wants Guest to give up. */
__u32 num_pages;
/* Number of pages we've actually got in balloon. */
__u32 actual;
+ /* Free page report command id, readonly by guest */
+ __u32 free_page_report_cmd_id;
+ /* Stores PAGE_POISON if page poisoning is in use */
+ __u32 poison_val;
};
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index 3abd327bada6..7d09e54ae54e 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -36,12 +36,9 @@ struct dlfb_data {
struct usb_device *udev;
struct fb_info *info;
struct urb_list urbs;
- struct kref kref;
char *backing_buffer;
int fb_count;
bool virtualized; /* true when physical usb device not present */
- struct delayed_work init_framebuffer_work;
- struct delayed_work free_framebuffer_work;
atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
char *edid; /* null until we read edid from hw or get from sysfs */
diff --git a/init/do_mounts.c b/init/do_mounts.c
index e1c9afa9d8c9..a754e3ba9831 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -167,6 +167,24 @@ done:
}
return res;
}
+
+/**
+ * match_dev_by_label - callback for finding a partition using its label
+ * @dev: device passed in by the caller
+ * @data: opaque pointer to the label to match
+ *
+ * Returns 1 if the device matches, and 0 otherwise.
+ */
+static int match_dev_by_label(struct device *dev, const void *data)
+{
+ const char *label = data;
+ struct hd_struct *part = dev_to_part(dev);
+
+ if (part->info && !strcmp(label, part->info->volname))
+ return 1;
+
+ return 0;
+}
#endif
/*
@@ -190,6 +208,8 @@ done:
* a partition with a known unique id.
* 8) <major>:<minor> major and minor number of the device separated by
* a colon.
+ * 9) PARTLABEL=<name> with name being the GPT partition label.
+ * MSDOS partitions do not support labels!
*
* If name doesn't have fall into the categories above, we return (0,0).
* block_class is used to check if something is a disk name. If the disk
@@ -211,6 +231,17 @@ dev_t name_to_dev_t(const char *name)
if (!res)
goto fail;
goto done;
+ } else if (strncmp(name, "PARTLABEL=", 10) == 0) {
+ struct device *dev;
+
+ dev = class_find_device(&block_class, NULL, name + 10,
+ &match_dev_by_label);
+ if (!dev)
+ goto fail;
+
+ res = dev->devt;
+ put_device(dev);
+ goto done;
}
#endif
diff --git a/init/main.c b/init/main.c
index 1c3f90264280..ee147103ba1b 100644
--- a/init/main.c
+++ b/init/main.c
@@ -25,7 +25,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/nmi.h>
@@ -375,10 +375,11 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
static void __init setup_command_line(char *command_line)
{
saved_command_line =
- memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
+ memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES);
initcall_command_line =
- memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
- static_command_line = memblock_virt_alloc(strlen(command_line) + 1, 0);
+ memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES);
+ static_command_line = memblock_alloc(strlen(command_line) + 1,
+ SMP_CACHE_BYTES);
strcpy(saved_command_line, boot_command_line);
strcpy(static_command_line, command_line);
}
@@ -773,8 +774,10 @@ static int __init initcall_blacklist(char *str)
str_entry = strsep(&str, ",");
if (str_entry) {
pr_debug("blacklisting initcall %s\n", str_entry);
- entry = alloc_bootmem(sizeof(*entry));
- entry->buf = alloc_bootmem(strlen(str_entry) + 1);
+ entry = memblock_alloc(sizeof(*entry),
+ SMP_CACHE_BYTES);
+ entry->buf = memblock_alloc(strlen(str_entry) + 1,
+ SMP_CACHE_BYTES);
strcpy(entry->buf, str_entry);
list_add(&entry->next, &blacklisted_initcalls);
}
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 8ad93c29f511..49f9bf4ffc7f 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -88,17 +88,39 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
}
+static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, semmni;
+ struct ipc_namespace *ns = current->nsproxy->ipc_ns;
+
+ semmni = ns->sem_ctls[3];
+ ret = proc_ipc_dointvec(table, write, buffer, lenp, ppos);
+
+ if (!ret)
+ ret = sem_check_semmni(current->nsproxy->ipc_ns);
+
+ /*
+ * Reset the semmni value if an error happens.
+ */
+ if (ret)
+ ns->sem_ctls[3] = semmni;
+ return ret;
+}
+
#else
#define proc_ipc_doulongvec_minmax NULL
#define proc_ipc_dointvec NULL
#define proc_ipc_dointvec_minmax NULL
#define proc_ipc_dointvec_minmax_orphans NULL
#define proc_ipc_auto_msgmni NULL
+#define proc_ipc_sem_dointvec NULL
#endif
static int zero;
static int one = 1;
static int int_max = INT_MAX;
+static int ipc_mni = IPCMNI;
static struct ctl_table ipc_kern_table[] = {
{
@@ -120,7 +142,9 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.shm_ctlmni,
.maxlen = sizeof(init_ipc_ns.shm_ctlmni),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec,
+ .proc_handler = proc_ipc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &ipc_mni,
},
{
.procname = "shm_rmid_forced",
@@ -147,7 +171,7 @@ static struct ctl_table ipc_kern_table[] = {
.mode = 0644,
.proc_handler = proc_ipc_dointvec_minmax,
.extra1 = &zero,
- .extra2 = &int_max,
+ .extra2 = &ipc_mni,
},
{
.procname = "auto_msgmni",
@@ -172,7 +196,7 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.sem_ctls,
.maxlen = 4*sizeof(int),
.mode = 0644,
- .proc_handler = proc_ipc_dointvec,
+ .proc_handler = proc_ipc_sem_dointvec,
},
#ifdef CONFIG_CHECKPOINT_RESTORE
{
diff --git a/ipc/util.h b/ipc/util.h
index 1ee81bce25e9..d768fdbed515 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -217,6 +217,15 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
void (*free)(struct ipc_namespace *, struct kern_ipc_perm *));
+static inline int sem_check_semmni(struct ipc_namespace *ns) {
+ /*
+ * Check semmni range [0, IPCMNI]
+ * semmni is the last element of sem_ctls[4] array
+ */
+ return ((ns->sem_ctls[3] < 0) || (ns->sem_ctls[3] > IPCMNI))
+ ? -ERANGE : 0;
+}
+
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
struct compat_ipc_perm {
diff --git a/kernel/Makefile b/kernel/Makefile
index 7a63d567fdb5..7343b3a9bff0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -117,6 +117,10 @@ obj-$(CONFIG_HAS_IOMEM) += iomem.o
obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_RSEQ) += rseq.o
+obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
+KASAN_SANITIZE_stackleak.o := n
+KCOV_INSTRUMENT_stackleak.o := n
+
$(obj)/configs.o: $(obj)/config_data.h
targets += config_data.gz
diff --git a/kernel/bounds.c b/kernel/bounds.c
index c373e887c066..9795d75b09b2 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -13,7 +13,7 @@
#include <linux/log2.h>
#include <linux/spinlock_types.h>
-void foo(void)
+int main(void)
{
/* The enum constants to put into include/generated/bounds.h */
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
@@ -23,4 +23,6 @@ void foo(void)
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
/* End of constants */
+
+ return 0;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 171a2c88e77d..1971ca325fb4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2852,10 +2852,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
fn->ret_type == RET_PTR_TO_MAP_VALUE) {
- if (fn->ret_type == RET_PTR_TO_MAP_VALUE)
- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
- else
- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
/* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0);
/* remember map_ptr, so that check_map_access()
@@ -2868,7 +2864,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return -EINVAL;
}
regs[BPF_REG_0].map_ptr = meta.map_ptr;
- regs[BPF_REG_0].id = ++env->id_gen;
+ if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
+ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
+ } else {
+ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
+ regs[BPF_REG_0].id = ++env->id_gen;
+ }
} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
int id = acquire_reference_state(env, insn_idx);
if (id < 0)
@@ -3046,7 +3047,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst_reg->umax_value = umax_ptr;
dst_reg->var_off = ptr_reg->var_off;
dst_reg->off = ptr_reg->off + smin_val;
- dst_reg->range = ptr_reg->range;
+ dst_reg->raw = ptr_reg->raw;
break;
}
/* A new variable offset is created. Note that off_reg->off
@@ -3076,10 +3077,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
}
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
+ dst_reg->raw = ptr_reg->raw;
if (reg_is_pkt_pointer(ptr_reg)) {
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
- dst_reg->range = 0;
+ dst_reg->raw = 0;
}
break;
case BPF_SUB:
@@ -3108,7 +3110,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst_reg->var_off = ptr_reg->var_off;
dst_reg->id = ptr_reg->id;
dst_reg->off = ptr_reg->off - smin_val;
- dst_reg->range = ptr_reg->range;
+ dst_reg->raw = ptr_reg->raw;
break;
}
/* A new variable offset is created. If the subtrahend is known
@@ -3134,11 +3136,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
}
dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
+ dst_reg->raw = ptr_reg->raw;
if (reg_is_pkt_pointer(ptr_reg)) {
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
if (smin_val < 0)
- dst_reg->range = 0;
+ dst_reg->raw = 0;
}
break;
case BPF_AND:
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 8b79318810ad..6aaf5dd5383b 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -493,7 +493,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
}
/**
- * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
+ * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
*
@@ -502,8 +502,8 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
* enabled. If @ss is associated with the hierarchy @cgrp is on, this
* function is guaranteed to return non-NULL css.
*/
-static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
- struct cgroup_subsys *ss)
+static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
{
lockdep_assert_held(&cgroup_mutex);
@@ -524,35 +524,6 @@ static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
}
/**
- * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
- * @cgrp: the cgroup of interest
- * @ss: the subsystem of interest
- *
- * Find and get the effective css of @cgrp for @ss. The effective css is
- * defined as the matching css of the nearest ancestor including self which
- * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
- * the root css is returned, so this function always returns a valid css.
- *
- * The returned css is not guaranteed to be online, and therefore it is the
- * callers responsiblity to tryget a reference for it.
- */
-struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
- struct cgroup_subsys *ss)
-{
- struct cgroup_subsys_state *css;
-
- do {
- css = cgroup_css(cgrp, ss);
-
- if (css)
- return css;
- cgrp = cgroup_parent(cgrp);
- } while (cgrp);
-
- return init_css_set.subsys[ss->id];
-}
-
-/**
* cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
@@ -634,11 +605,10 @@ EXPORT_SYMBOL_GPL(of_css);
*
* Should be called under cgroup_[tree_]mutex.
*/
-#define for_each_e_css(css, ssid, cgrp) \
- for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
- if (!((css) = cgroup_e_css_by_mask(cgrp, \
- cgroup_subsys[(ssid)]))) \
- ; \
+#define for_each_e_css(css, ssid, cgrp) \
+ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
+ if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
+ ; \
else
/**
@@ -1037,7 +1007,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
* @ss is in this hierarchy, so we want the
* effective css from @cgrp.
*/
- template[i] = cgroup_e_css_by_mask(cgrp, ss);
+ template[i] = cgroup_e_css(cgrp, ss);
} else {
/*
* @ss is not in this hierarchy, so we don't want
@@ -3054,7 +3024,7 @@ static int cgroup_apply_control(struct cgroup *cgrp)
return ret;
/*
- * At this point, cgroup_e_css_by_mask() results reflect the new csses
+ * At this point, cgroup_e_css() results reflect the new csses
* making the following cgroup_update_dfl_csses() properly update
* css associations of all tasks in the subtree.
*/
diff --git a/kernel/configs/kvm_guest.config b/kernel/configs/kvm_guest.config
index 108fecc20fc1..208481d91090 100644
--- a/kernel/configs/kvm_guest.config
+++ b/kernel/configs/kvm_guest.config
@@ -20,6 +20,7 @@ CONFIG_PARAVIRT=y
CONFIG_KVM_GUEST=y
CONFIG_S390_GUEST=y
CONFIG_VIRTIO=y
+CONFIG_VIRTIO_MENU=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BLK=y
CONFIG_VIRTIO_CONSOLE=y
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index f14c376937e5..22a12ab5a5e9 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -4,7 +4,7 @@
*
* DMA operations that map physical memory directly without using an IOMMU.
*/
-#include <linux/bootmem.h> /* for max_pfn */
+#include <linux/memblock.h> /* for max_pfn */
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-direct.h>
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index ebecaf255ea2..5731daa09a32 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -40,7 +40,7 @@
#include <asm/dma.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/iommu-helper.h>
#define CREATE_TRACE_POINTS
@@ -204,10 +204,10 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
- io_tlb_list = memblock_virt_alloc(
+ io_tlb_list = memblock_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
PAGE_SIZE);
- io_tlb_orig_addr = memblock_virt_alloc(
+ io_tlb_orig_addr = memblock_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
PAGE_SIZE);
for (i = 0; i < io_tlb_nslabs; i++) {
@@ -242,7 +242,7 @@ swiotlb_init(int verbose)
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* Get IO TLB memory from the low pages */
- vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
+ vstart = memblock_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5a97f34bc14c..8c490130c4fb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8376,30 +8376,39 @@ static struct pmu perf_tracepoint = {
*
* PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
* if not set, create kprobe/uprobe
+ *
+ * The following values specify a reference counter (or semaphore in the
+ * terminology of tools like dtrace, systemtap, etc.) Userspace Statically
+ * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset.
+ *
+ * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset
+ * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left
*/
enum perf_probe_config {
PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */
+ PERF_UPROBE_REF_CTR_OFFSET_BITS = 32,
+ PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS,
};
PMU_FORMAT_ATTR(retprobe, "config:0");
+#endif
-static struct attribute *probe_attrs[] = {
+#ifdef CONFIG_KPROBE_EVENTS
+static struct attribute *kprobe_attrs[] = {
&format_attr_retprobe.attr,
NULL,
};
-static struct attribute_group probe_format_group = {
+static struct attribute_group kprobe_format_group = {
.name = "format",
- .attrs = probe_attrs,
+ .attrs = kprobe_attrs,
};
-static const struct attribute_group *probe_attr_groups[] = {
- &probe_format_group,
+static const struct attribute_group *kprobe_attr_groups[] = {
+ &kprobe_format_group,
NULL,
};
-#endif
-#ifdef CONFIG_KPROBE_EVENTS
static int perf_kprobe_event_init(struct perf_event *event);
static struct pmu perf_kprobe = {
.task_ctx_nr = perf_sw_context,
@@ -8409,7 +8418,7 @@ static struct pmu perf_kprobe = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
- .attr_groups = probe_attr_groups,
+ .attr_groups = kprobe_attr_groups,
};
static int perf_kprobe_event_init(struct perf_event *event)
@@ -8441,6 +8450,24 @@ static int perf_kprobe_event_init(struct perf_event *event)
#endif /* CONFIG_KPROBE_EVENTS */
#ifdef CONFIG_UPROBE_EVENTS
+PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63");
+
+static struct attribute *uprobe_attrs[] = {
+ &format_attr_retprobe.attr,
+ &format_attr_ref_ctr_offset.attr,
+ NULL,
+};
+
+static struct attribute_group uprobe_format_group = {
+ .name = "format",
+ .attrs = uprobe_attrs,
+};
+
+static const struct attribute_group *uprobe_attr_groups[] = {
+ &uprobe_format_group,
+ NULL,
+};
+
static int perf_uprobe_event_init(struct perf_event *event);
static struct pmu perf_uprobe = {
.task_ctx_nr = perf_sw_context,
@@ -8450,12 +8477,13 @@ static struct pmu perf_uprobe = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
- .attr_groups = probe_attr_groups,
+ .attr_groups = uprobe_attr_groups,
};
static int perf_uprobe_event_init(struct perf_event *event)
{
int err;
+ unsigned long ref_ctr_offset;
bool is_retprobe;
if (event->attr.type != perf_uprobe.type)
@@ -8471,7 +8499,8 @@ static int perf_uprobe_event_init(struct perf_event *event)
return -EOPNOTSUPP;
is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
- err = perf_uprobe_init(event, is_retprobe);
+ ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
+ err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe);
if (err)
return err;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 2bf792d22087..96d4bee83489 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -73,6 +73,7 @@ struct uprobe {
struct uprobe_consumer *consumers;
struct inode *inode; /* Also hold a ref to inode */
loff_t offset;
+ loff_t ref_ctr_offset;
unsigned long flags;
/*
@@ -88,6 +89,15 @@ struct uprobe {
struct arch_uprobe arch;
};
+struct delayed_uprobe {
+ struct list_head list;
+ struct uprobe *uprobe;
+ struct mm_struct *mm;
+};
+
+static DEFINE_MUTEX(delayed_uprobe_lock);
+static LIST_HEAD(delayed_uprobe_list);
+
/*
* Execute out of line area: anonymous executable mapping installed
* by the probed task to execute the copy of the original instruction
@@ -282,6 +292,166 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t
return 1;
}
+static struct delayed_uprobe *
+delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
+{
+ struct delayed_uprobe *du;
+
+ list_for_each_entry(du, &delayed_uprobe_list, list)
+ if (du->uprobe == uprobe && du->mm == mm)
+ return du;
+ return NULL;
+}
+
+static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
+{
+ struct delayed_uprobe *du;
+
+ if (delayed_uprobe_check(uprobe, mm))
+ return 0;
+
+ du = kzalloc(sizeof(*du), GFP_KERNEL);
+ if (!du)
+ return -ENOMEM;
+
+ du->uprobe = uprobe;
+ du->mm = mm;
+ list_add(&du->list, &delayed_uprobe_list);
+ return 0;
+}
+
+static void delayed_uprobe_delete(struct delayed_uprobe *du)
+{
+ if (WARN_ON(!du))
+ return;
+ list_del(&du->list);
+ kfree(du);
+}
+
+static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
+{
+ struct list_head *pos, *q;
+ struct delayed_uprobe *du;
+
+ if (!uprobe && !mm)
+ return;
+
+ list_for_each_safe(pos, q, &delayed_uprobe_list) {
+ du = list_entry(pos, struct delayed_uprobe, list);
+
+ if (uprobe && du->uprobe != uprobe)
+ continue;
+ if (mm && du->mm != mm)
+ continue;
+
+ delayed_uprobe_delete(du);
+ }
+}
+
+static bool valid_ref_ctr_vma(struct uprobe *uprobe,
+ struct vm_area_struct *vma)
+{
+ unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
+
+ return uprobe->ref_ctr_offset &&
+ vma->vm_file &&
+ file_inode(vma->vm_file) == uprobe->inode &&
+ (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
+ vma->vm_start <= vaddr &&
+ vma->vm_end > vaddr;
+}
+
+static struct vm_area_struct *
+find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
+{
+ struct vm_area_struct *tmp;
+
+ for (tmp = mm->mmap; tmp; tmp = tmp->vm_next)
+ if (valid_ref_ctr_vma(uprobe, tmp))
+ return tmp;
+
+ return NULL;
+}
+
+static int
+__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
+{
+ void *kaddr;
+ struct page *page;
+ struct vm_area_struct *vma;
+ int ret;
+ short *ptr;
+
+ if (!vaddr || !d)
+ return -EINVAL;
+
+ ret = get_user_pages_remote(NULL, mm, vaddr, 1,
+ FOLL_WRITE, &page, &vma, NULL);
+ if (unlikely(ret <= 0)) {
+ /*
+ * We are asking for 1 page. If get_user_pages_remote() fails,
+ * it may return 0, in that case we have to return error.
+ */
+ return ret == 0 ? -EBUSY : ret;
+ }
+
+ kaddr = kmap_atomic(page);
+ ptr = kaddr + (vaddr & ~PAGE_MASK);
+
+ if (unlikely(*ptr + d < 0)) {
+ pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
+ "curr val: %d, delta: %d\n", vaddr, *ptr, d);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *ptr += d;
+ ret = 0;
+out:
+ kunmap_atomic(kaddr);
+ put_page(page);
+ return ret;
+}
+
+static void update_ref_ctr_warn(struct uprobe *uprobe,
+ struct mm_struct *mm, short d)
+{
+ pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
+ "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
+ d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
+ (unsigned long long) uprobe->offset,
+ (unsigned long long) uprobe->ref_ctr_offset, mm);
+}
+
+static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
+ short d)
+{
+ struct vm_area_struct *rc_vma;
+ unsigned long rc_vaddr;
+ int ret = 0;
+
+ rc_vma = find_ref_ctr_vma(uprobe, mm);
+
+ if (rc_vma) {
+ rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
+ ret = __update_ref_ctr(mm, rc_vaddr, d);
+ if (ret)
+ update_ref_ctr_warn(uprobe, mm, d);
+
+ if (d > 0)
+ return ret;
+ }
+
+ mutex_lock(&delayed_uprobe_lock);
+ if (d > 0)
+ ret = delayed_uprobe_add(uprobe, mm);
+ else
+ delayed_uprobe_remove(uprobe, mm);
+ mutex_unlock(&delayed_uprobe_lock);
+
+ return ret;
+}
+
/*
* NOTE:
* Expect the breakpoint instruction to be the smallest size instruction for
@@ -302,9 +472,13 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t
int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long vaddr, uprobe_opcode_t opcode)
{
+ struct uprobe *uprobe;
struct page *old_page, *new_page;
struct vm_area_struct *vma;
- int ret;
+ int ret, is_register, ref_ctr_updated = 0;
+
+ is_register = is_swbp_insn(&opcode);
+ uprobe = container_of(auprobe, struct uprobe, arch);
retry:
/* Read the page with vaddr into memory */
@@ -317,6 +491,15 @@ retry:
if (ret <= 0)
goto put_old;
+ /* We are going to replace instruction, update ref_ctr. */
+ if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
+ ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
+ if (ret)
+ goto put_old;
+
+ ref_ctr_updated = 1;
+ }
+
ret = anon_vma_prepare(vma);
if (ret)
goto put_old;
@@ -337,6 +520,11 @@ put_old:
if (unlikely(ret == -EAGAIN))
goto retry;
+
+ /* Revert back reference counter if instruction update failed. */
+ if (ret && is_register && ref_ctr_updated)
+ update_ref_ctr(uprobe, mm, -1);
+
return ret;
}
@@ -378,8 +566,15 @@ static struct uprobe *get_uprobe(struct uprobe *uprobe)
static void put_uprobe(struct uprobe *uprobe)
{
- if (atomic_dec_and_test(&uprobe->ref))
+ if (atomic_dec_and_test(&uprobe->ref)) {
+ /*
+ * If application munmap(exec_vma) before uprobe_unregister()
+ * gets called, we don't get a chance to remove uprobe from
+ * delayed_uprobe_list from remove_breakpoint(). Do it here.
+ */
+ delayed_uprobe_remove(uprobe, NULL);
kfree(uprobe);
+ }
}
static int match_uprobe(struct uprobe *l, struct uprobe *r)
@@ -484,7 +679,18 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
return u;
}
-static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
+static void
+ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
+{
+ pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
+ "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
+ uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
+ (unsigned long long) cur_uprobe->ref_ctr_offset,
+ (unsigned long long) uprobe->ref_ctr_offset);
+}
+
+static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
+ loff_t ref_ctr_offset)
{
struct uprobe *uprobe, *cur_uprobe;
@@ -494,6 +700,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
uprobe->inode = inode;
uprobe->offset = offset;
+ uprobe->ref_ctr_offset = ref_ctr_offset;
init_rwsem(&uprobe->register_rwsem);
init_rwsem(&uprobe->consumer_rwsem);
@@ -501,6 +708,12 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
cur_uprobe = insert_uprobe(uprobe);
/* a uprobe exists for this inode:offset combination */
if (cur_uprobe) {
+ if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
+ ref_ctr_mismatch_warn(cur_uprobe, uprobe);
+ put_uprobe(cur_uprobe);
+ kfree(uprobe);
+ return ERR_PTR(-EINVAL);
+ }
kfree(uprobe);
uprobe = cur_uprobe;
}
@@ -895,7 +1108,7 @@ EXPORT_SYMBOL_GPL(uprobe_unregister);
* else return 0 (success)
*/
static int __uprobe_register(struct inode *inode, loff_t offset,
- struct uprobe_consumer *uc)
+ loff_t ref_ctr_offset, struct uprobe_consumer *uc)
{
struct uprobe *uprobe;
int ret;
@@ -912,9 +1125,12 @@ static int __uprobe_register(struct inode *inode, loff_t offset,
return -EINVAL;
retry:
- uprobe = alloc_uprobe(inode, offset);
+ uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
if (!uprobe)
return -ENOMEM;
+ if (IS_ERR(uprobe))
+ return PTR_ERR(uprobe);
+
/*
* We can race with uprobe_unregister()->delete_uprobe().
* Check uprobe_is_active() and retry if it is false.
@@ -938,10 +1154,17 @@ static int __uprobe_register(struct inode *inode, loff_t offset,
int uprobe_register(struct inode *inode, loff_t offset,
struct uprobe_consumer *uc)
{
- return __uprobe_register(inode, offset, uc);
+ return __uprobe_register(inode, offset, 0, uc);
}
EXPORT_SYMBOL_GPL(uprobe_register);
+int uprobe_register_refctr(struct inode *inode, loff_t offset,
+ loff_t ref_ctr_offset, struct uprobe_consumer *uc)
+{
+ return __uprobe_register(inode, offset, ref_ctr_offset, uc);
+}
+EXPORT_SYMBOL_GPL(uprobe_register_refctr);
+
/*
* uprobe_apply - unregister an already registered probe.
* @inode: the file in which the probe has to be removed.
@@ -1060,6 +1283,35 @@ static void build_probe_list(struct inode *inode,
spin_unlock(&uprobes_treelock);
}
+/* @vma contains reference counter, not the probed instruction. */
+static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
+{
+ struct list_head *pos, *q;
+ struct delayed_uprobe *du;
+ unsigned long vaddr;
+ int ret = 0, err = 0;
+
+ mutex_lock(&delayed_uprobe_lock);
+ list_for_each_safe(pos, q, &delayed_uprobe_list) {
+ du = list_entry(pos, struct delayed_uprobe, list);
+
+ if (du->mm != vma->vm_mm ||
+ !valid_ref_ctr_vma(du->uprobe, vma))
+ continue;
+
+ vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
+ ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
+ if (ret) {
+ update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
+ if (!err)
+ err = ret;
+ }
+ delayed_uprobe_delete(du);
+ }
+ mutex_unlock(&delayed_uprobe_lock);
+ return err;
+}
+
/*
* Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
*
@@ -1072,7 +1324,15 @@ int uprobe_mmap(struct vm_area_struct *vma)
struct uprobe *uprobe, *u;
struct inode *inode;
- if (no_uprobe_events() || !valid_vma(vma, true))
+ if (no_uprobe_events())
+ return 0;
+
+ if (vma->vm_file &&
+ (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
+ test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
+ delayed_ref_ctr_inc(vma);
+
+ if (!valid_vma(vma, true))
return 0;
inode = file_inode(vma->vm_file);
@@ -1246,6 +1506,10 @@ void uprobe_clear_state(struct mm_struct *mm)
{
struct xol_area *area = mm->uprobes_state.xol_area;
+ mutex_lock(&delayed_uprobe_lock);
+ delayed_uprobe_remove(NULL, mm);
+ mutex_unlock(&delayed_uprobe_lock);
+
if (!area)
return;
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index bc80a4e268c0..17f75b545f66 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -173,8 +173,7 @@ static void fei_debugfs_remove_attr(struct fei_attr *attr)
struct dentry *dir;
dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir);
- if (dir)
- debugfs_remove_recursive(dir);
+ debugfs_remove_recursive(dir);
}
static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
diff --git a/kernel/fork.c b/kernel/fork.c
index 8f82a3bdcb8f..07cddff89c7b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -91,6 +91,7 @@
#include <linux/kcov.h>
#include <linux/livepatch.h>
#include <linux/thread_info.h>
+#include <linux/stackleak.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -1926,6 +1927,8 @@ static __latent_entropy struct task_struct *copy_process(
if (retval)
goto bad_fork_cleanup_io;
+ stackleak_task_init(p);
+
if (pid != &init_struct_pid) {
pid = alloc_pid(p->nsproxy->pid_ns_for_children);
if (IS_ERR(pid)) {
diff --git a/kernel/futex.c b/kernel/futex.c
index 3e2de8fc1891..f423f9b6577e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -65,7 +65,7 @@
#include <linux/sched/mm.h>
#include <linux/hugetlb.h>
#include <linux/freezer.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/fault-inject.h>
#include <asm/futex.h>
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index b9132d1269ef..cb8e3e8ac7b9 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -15,6 +15,7 @@
#include <linux/lockdep.h>
#include <linux/export.h>
#include <linux/sysctl.h>
+#include <linux/suspend.h>
#include <linux/utsname.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
@@ -242,6 +243,28 @@ void reset_hung_task_detector(void)
}
EXPORT_SYMBOL_GPL(reset_hung_task_detector);
+static bool hung_detector_suspended;
+
+static int hungtask_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ hung_detector_suspended = true;
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ hung_detector_suspended = false;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
/*
* kthread which checks for tasks stuck in D state
*/
@@ -261,7 +284,8 @@ static int watchdog(void *dummy)
interval = min_t(unsigned long, interval, timeout);
t = hung_timeout_jiffies(hung_last_checked, interval);
if (t <= 0) {
- if (!atomic_xchg(&reset_hung_task, 0))
+ if (!atomic_xchg(&reset_hung_task, 0) &&
+ !hung_detector_suspended)
check_hung_uninterruptible_tasks(timeout);
hung_last_checked = jiffies;
continue;
@@ -275,6 +299,10 @@ static int watchdog(void *dummy)
static int __init hung_task_init(void)
{
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+ /* Disable hung task detector on suspend */
+ pm_notifier(hungtask_pm_notify, 0);
+
watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
return 0;
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index c6a3b6851372..35cf0ad29718 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -25,8 +25,6 @@
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/kernel.h>
-#include <linux/kexec.h>
-#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/vmalloc.h>
#include "kexec_internal.h"
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 0130e488ebfe..8f36c27c1794 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,7 +4,7 @@
#endif
#include <linux/hash.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/debug_locks.h>
/*
diff --git a/kernel/panic.c b/kernel/panic.c
index 8b2e002d52eb..f6d549a29a5c 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -136,7 +136,7 @@ void panic(const char *fmt, ...)
{
static char buf[1024];
va_list args;
- long i, i_next = 0;
+ long i, i_next = 0, len;
int state = 0;
int old_cpu, this_cpu;
bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
@@ -173,8 +173,12 @@ void panic(const char *fmt, ...)
console_verbose();
bust_spinlocks(1);
va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
+ len = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
+
+ if (len && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
pr_emerg("Kernel panic - not syncing: %s\n", buf);
#ifdef CONFIG_DEBUG_BUGVERBOSE
/*
@@ -631,7 +635,7 @@ device_initcall(register_warn_debugfs);
*/
__visible void __stack_chk_fail(void)
{
- panic("stack-protector: Kernel stack is corrupted in: %pB\n",
+ panic("stack-protector: Kernel stack is corrupted in: %pB",
__builtin_return_address(0));
}
EXPORT_SYMBOL(__stack_chk_fail);
diff --git a/kernel/pid.c b/kernel/pid.c
index cdf63e53a014..b2f6c506035d 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -31,7 +31,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rculist.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/hash.h>
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 3d37c279c090..b0308a2c6000 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -23,7 +23,7 @@
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/nmi.h>
#include <linux/syscalls.h>
#include <linux/console.h>
@@ -963,7 +963,8 @@ void __init __register_nosave_region(unsigned long start_pfn,
BUG_ON(!region);
} else {
/* This allocation cannot fail */
- region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
+ region = memblock_alloc(sizeof(struct nosave_region),
+ SMP_CACHE_BYTES);
}
region->start_pfn = start_pfn;
region->end_pfn = end_pfn;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b77150ad1965..1b2a029360b7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -31,7 +31,6 @@
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/security.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/syscalls.h>
#include <linux/crash_core.h>
@@ -1111,9 +1110,9 @@ void __init setup_log_buf(int early)
if (early) {
new_log_buf =
- memblock_virt_alloc(new_log_buf_len, LOG_ALIGN);
+ memblock_alloc(new_log_buf_len, LOG_ALIGN);
} else {
- new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len,
+ new_log_buf = memblock_alloc_nopanic(new_log_buf_len,
LOG_ALIGN);
}
diff --git a/kernel/profile.c b/kernel/profile.c
index 9aa2a4445b0d..9c08a2c7cb1d 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -16,7 +16,7 @@
#include <linux/export.h>
#include <linux/profile.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/notifier.h>
#include <linux/mm.h>
#include <linux/cpumask.h>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fd2fce8a001b..f12225f26b70 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2881,6 +2881,18 @@ unsigned long long nr_context_switches(void)
}
/*
+ * Consumers of these two interfaces, like for example the cpuidle menu
+ * governor, are using nonsensical data. Preferring shallow idle state selection
+ * for a CPU that has IO-wait which might not even end up running the task when
+ * it does become runnable.
+ */
+
+unsigned long nr_iowait_cpu(int cpu)
+{
+ return atomic_read(&cpu_rq(cpu)->nr_iowait);
+}
+
+/*
* IO-wait accounting, and how its mostly bollocks (on SMP).
*
* The idea behind IO-wait account is to account the idle time that we could
@@ -2915,31 +2927,11 @@ unsigned long nr_iowait(void)
unsigned long i, sum = 0;
for_each_possible_cpu(i)
- sum += atomic_read(&cpu_rq(i)->nr_iowait);
+ sum += nr_iowait_cpu(i);
return sum;
}
-/*
- * Consumers of these two interfaces, like for example the cpuidle menu
- * governor, are using nonsensical data. Preferring shallow idle state selection
- * for a CPU that has IO-wait which might not even end up running the task when
- * it does become runnable.
- */
-
-unsigned long nr_iowait_cpu(int cpu)
-{
- struct rq *this = cpu_rq(cpu);
- return atomic_read(&this->nr_iowait);
-}
-
-void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
-{
- struct rq *rq = this_rq();
- *nr_waiters = atomic_read(&rq->nr_iowait);
- *load = rq->load.weight;
-}
-
#ifdef CONFIG_SMP
/*
diff --git a/kernel/signal.c b/kernel/signal.c
index 17565240b1c6..9a32bc2088c9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -892,7 +892,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
/*
* The first thread which returns from do_signal_stop()
* will take ->siglock, notice SIGNAL_CLD_MASK, and
- * notify its parent. See get_signal_to_deliver().
+ * notify its parent. See get_signal().
*/
signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
signal->group_stop_count = 0;
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
new file mode 100644
index 000000000000..e42892926244
--- /dev/null
+++ b/kernel/stackleak.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code fills the used part of the kernel stack with a poison value
+ * before returning to userspace. It's part of the STACKLEAK feature
+ * ported from grsecurity/PaX.
+ *
+ * Author: Alexander Popov <alex.popov@linux.com>
+ *
+ * STACKLEAK reduces the information which kernel stack leak bugs can
+ * reveal and blocks some uninitialized stack variable attacks.
+ */
+
+#include <linux/stackleak.h>
+
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+#include <linux/jump_label.h>
+#include <linux/sysctl.h>
+
+static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
+
+int stack_erasing_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret = 0;
+ int state = !static_branch_unlikely(&stack_erasing_bypass);
+ int prev_state = state;
+
+ table->data = &state;
+ table->maxlen = sizeof(int);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ state = !!state;
+ if (ret || !write || state == prev_state)
+ return ret;
+
+ if (state)
+ static_branch_disable(&stack_erasing_bypass);
+ else
+ static_branch_enable(&stack_erasing_bypass);
+
+ pr_warn("stackleak: kernel stack erasing is %s\n",
+ state ? "enabled" : "disabled");
+ return ret;
+}
+
+#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
+#else
+#define skip_erasing() false
+#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
+
+asmlinkage void stackleak_erase(void)
+{
+ /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
+ unsigned long kstack_ptr = current->lowest_stack;
+ unsigned long boundary = (unsigned long)end_of_stack(current);
+ unsigned int poison_count = 0;
+ const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+
+ if (skip_erasing())
+ return;
+
+ /* Check that 'lowest_stack' value is sane */
+ if (unlikely(kstack_ptr - boundary >= THREAD_SIZE))
+ kstack_ptr = boundary;
+
+ /* Search for the poison value in the kernel stack */
+ while (kstack_ptr > boundary && poison_count <= depth) {
+ if (*(unsigned long *)kstack_ptr == STACKLEAK_POISON)
+ poison_count++;
+ else
+ poison_count = 0;
+
+ kstack_ptr -= sizeof(unsigned long);
+ }
+
+ /*
+ * One 'long int' at the bottom of the thread stack is reserved and
+ * should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y).
+ */
+ if (kstack_ptr == boundary)
+ kstack_ptr += sizeof(unsigned long);
+
+#ifdef CONFIG_STACKLEAK_METRICS
+ current->prev_lowest_stack = kstack_ptr;
+#endif
+
+ /*
+ * Now write the poison value to the kernel stack. Start from
+ * 'kstack_ptr' and move up till the new 'boundary'. We assume that
+ * the stack pointer doesn't change when we write poison.
+ */
+ if (on_thread_stack())
+ boundary = current_stack_pointer;
+ else
+ boundary = current_top_of_stack();
+
+ while (kstack_ptr < boundary) {
+ *(unsigned long *)kstack_ptr = STACKLEAK_POISON;
+ kstack_ptr += sizeof(unsigned long);
+ }
+
+ /* Reset the 'lowest_stack' value for the next syscall */
+ current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
+}
+
+void __used stackleak_track_stack(void)
+{
+ /*
+ * N.B. stackleak_erase() fills the kernel stack with the poison value,
+ * which has the register width. That code assumes that the value
+ * of 'lowest_stack' is aligned on the register width boundary.
+ *
+ * That is true for x86 and x86_64 because of the kernel stack
+ * alignment on these platforms (for details, see 'cc_stack_align' in
+ * arch/x86/Makefile). Take care of that when you port STACKLEAK to
+ * new platforms.
+ */
+ unsigned long sp = (unsigned long)&sp;
+
+ /*
+ * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than
+ * STACKLEAK_SEARCH_DEPTH makes the poison search in
+ * stackleak_erase() unreliable. Let's prevent that.
+ */
+ BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH);
+
+ if (sp < current->lowest_stack &&
+ sp >= (unsigned long)task_stack_page(current) +
+ sizeof(unsigned long)) {
+ current->lowest_stack = sp;
+ }
+}
+EXPORT_SYMBOL(stackleak_track_stack);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index cc02050fd0c4..5fc724e4e454 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -66,7 +66,6 @@
#include <linux/kexec.h>
#include <linux/bpf.h>
#include <linux/mount.h>
-#include <linux/pipe_fs_i.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
@@ -91,7 +90,9 @@
#ifdef CONFIG_CHR_DEV_SG
#include <scsi/sg.h>
#endif
-
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+#include <linux/stackleak.h>
+#endif
#ifdef CONFIG_LOCKUP_DETECTOR
#include <linux/nmi.h>
#endif
@@ -1233,6 +1234,17 @@ static struct ctl_table kern_table[] = {
.extra2 = &one,
},
#endif
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+ {
+ .procname = "stack_erasing",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = stack_erasing_sysctl,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
{ }
};
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index fac0ddf8a8e2..2868d85f1fb1 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -764,9 +764,9 @@ blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
return NULL;
- if (!bio->bi_blkg)
+ if (!bio->bi_css)
return NULL;
- return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
+ return cgroup_get_kernfs_id(bio->bi_css->cgroup);
}
#else
static union kernfs_node_id *
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bf6f1d70484d..ff1c4b20cd0a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2727,6 +2727,7 @@ void trace_dump_stack(int skip)
__ftrace_trace_stack(global_trace.trace_buffer.buffer,
flags, skip, preempt_count(), NULL);
}
+EXPORT_SYMBOL_GPL(trace_dump_stack);
static DEFINE_PER_CPU(int, user_stack_count);
@@ -4621,13 +4622,18 @@ static const char readme_msg[] =
"place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
#endif
#ifdef CONFIG_UPROBE_EVENTS
- "\t place: <path>:<offset>\n"
+ " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
#endif
"\t args: <name>=fetcharg[:type]\n"
"\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
+#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
+ "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
+#else
"\t $stack<index>, $stack, $retval, $comm\n"
- "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
- "\t b<bit-width>@<bit-offset>/<container-size>\n"
+#endif
+ "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
+ "\t b<bit-width>@<bit-offset>/<container-size>,\n"
+ "\t <type>\\[<array-size>\\]\n"
#endif
" events/\t\t- Directory containing all trace event subsystems:\n"
" enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 69a3fe926e8c..76217bbef815 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -290,7 +290,8 @@ void perf_kprobe_destroy(struct perf_event *p_event)
#endif /* CONFIG_KPROBE_EVENTS */
#ifdef CONFIG_UPROBE_EVENTS
-int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
+int perf_uprobe_init(struct perf_event *p_event,
+ unsigned long ref_ctr_offset, bool is_retprobe)
{
int ret;
char *path = NULL;
@@ -312,8 +313,8 @@ int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
goto out;
}
- tp_event = create_local_trace_uprobe(
- path, p_event->attr.probe_offset, is_retprobe);
+ tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
+ ref_ctr_offset, is_retprobe);
if (IS_ERR(tp_event)) {
ret = PTR_ERR(tp_event);
goto out;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index d239004aaf29..eb908ef2ecec 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -1063,8 +1063,10 @@ static int create_synth_event(int argc, char **argv)
event = NULL;
ret = -EEXIST;
goto out;
- } else if (delete_event)
+ } else if (delete_event) {
+ ret = -ENOENT;
goto out;
+ }
if (argc < 2) {
ret = -EINVAL;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index c30032367aab..fec67188c4d2 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -14,6 +14,7 @@
#include "trace_kprobe_selftest.h"
#include "trace_probe.h"
+#include "trace_probe_tmpl.h"
#define KPROBE_EVENT_SYSTEM "kprobes"
#define KRETPROBE_MAXACTIVE_MAX 4096
@@ -61,9 +62,23 @@ static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}
-static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
+static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
{
- return !!strchr(trace_kprobe_symbol(tk), ':');
+ char *p;
+ bool ret;
+
+ if (!tk->symbol)
+ return false;
+ p = strchr(tk->symbol, ':');
+ if (!p)
+ return true;
+ *p = '\0';
+ mutex_lock(&module_mutex);
+ ret = !!find_module(tk->symbol);
+ mutex_unlock(&module_mutex);
+ *p = ':';
+
+ return ret;
}
static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
@@ -120,184 +135,6 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
struct pt_regs *regs);
-/* Memory fetching by symbol */
-struct symbol_cache {
- char *symbol;
- long offset;
- unsigned long addr;
-};
-
-unsigned long update_symbol_cache(struct symbol_cache *sc)
-{
- sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
-
- if (sc->addr)
- sc->addr += sc->offset;
-
- return sc->addr;
-}
-
-void free_symbol_cache(struct symbol_cache *sc)
-{
- kfree(sc->symbol);
- kfree(sc);
-}
-
-struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
-{
- struct symbol_cache *sc;
-
- if (!sym || strlen(sym) == 0)
- return NULL;
-
- sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
- if (!sc)
- return NULL;
-
- sc->symbol = kstrdup(sym, GFP_KERNEL);
- if (!sc->symbol) {
- kfree(sc);
- return NULL;
- }
- sc->offset = offset;
- update_symbol_cache(sc);
-
- return sc;
-}
-
-/*
- * Kprobes-specific fetch functions
- */
-#define DEFINE_FETCH_stack(type) \
-static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
- void *offset, void *dest) \
-{ \
- *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
- (unsigned int)((unsigned long)offset)); \
-} \
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
-
-DEFINE_BASIC_FETCH_FUNCS(stack)
-/* No string on the stack entry */
-#define fetch_stack_string NULL
-#define fetch_stack_string_size NULL
-
-#define DEFINE_FETCH_memory(type) \
-static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
- void *addr, void *dest) \
-{ \
- type retval; \
- if (probe_kernel_address(addr, retval)) \
- *(type *)dest = 0; \
- else \
- *(type *)dest = retval; \
-} \
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
-
-DEFINE_BASIC_FETCH_FUNCS(memory)
-/*
- * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
- * length and relative data location.
- */
-static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
- void *addr, void *dest)
-{
- int maxlen = get_rloc_len(*(u32 *)dest);
- u8 *dst = get_rloc_data(dest);
- long ret;
-
- if (!maxlen)
- return;
-
- /*
- * Try to get string again, since the string can be changed while
- * probing.
- */
- ret = strncpy_from_unsafe(dst, addr, maxlen);
-
- if (ret < 0) { /* Failed to fetch string */
- dst[0] = '\0';
- *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
- } else {
- *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
- }
-}
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
-
-/* Return the length of string -- including null terminal byte */
-static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
- void *addr, void *dest)
-{
- mm_segment_t old_fs;
- int ret, len = 0;
- u8 c;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- pagefault_disable();
-
- do {
- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
- len++;
- } while (c && ret == 0 && len < MAX_STRING_SIZE);
-
- pagefault_enable();
- set_fs(old_fs);
-
- if (ret < 0) /* Failed to check the length */
- *(u32 *)dest = 0;
- else
- *(u32 *)dest = len;
-}
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
-
-#define DEFINE_FETCH_symbol(type) \
-void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
-{ \
- struct symbol_cache *sc = data; \
- if (sc->addr) \
- fetch_memory_##type(regs, (void *)sc->addr, dest); \
- else \
- *(type *)dest = 0; \
-} \
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
-
-DEFINE_BASIC_FETCH_FUNCS(symbol)
-DEFINE_FETCH_symbol(string)
-DEFINE_FETCH_symbol(string_size)
-
-/* kprobes don't support file_offset fetch methods */
-#define fetch_file_offset_u8 NULL
-#define fetch_file_offset_u16 NULL
-#define fetch_file_offset_u32 NULL
-#define fetch_file_offset_u64 NULL
-#define fetch_file_offset_string NULL
-#define fetch_file_offset_string_size NULL
-
-/* Fetch type information table */
-static const struct fetch_type kprobes_fetch_type_table[] = {
- /* Special types */
- [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
- sizeof(u32), 1, "__data_loc char[]"),
- [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
- string_size, sizeof(u32), 0, "u32"),
- /* Basic types */
- ASSIGN_FETCH_TYPE(u8, u8, 0),
- ASSIGN_FETCH_TYPE(u16, u16, 0),
- ASSIGN_FETCH_TYPE(u32, u32, 0),
- ASSIGN_FETCH_TYPE(u64, u64, 0),
- ASSIGN_FETCH_TYPE(s8, u8, 1),
- ASSIGN_FETCH_TYPE(s16, u16, 1),
- ASSIGN_FETCH_TYPE(s32, u32, 1),
- ASSIGN_FETCH_TYPE(s64, u64, 1),
- ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
- ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
- ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
- ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
-
- ASSIGN_FETCH_TYPE_END
-};
-
/*
* Allocate new trace_probe and initialize it (including kprobes).
*/
@@ -540,8 +377,11 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
return -EINVAL;
}
- for (i = 0; i < tk->tp.nr_args; i++)
- traceprobe_update_arg(&tk->tp.args[i]);
+ for (i = 0; i < tk->tp.nr_args; i++) {
+ ret = traceprobe_update_arg(&tk->tp.args[i]);
+ if (ret)
+ return ret;
+ }
/* Set/clear disabled flag according to tp->flag */
if (trace_probe_is_enabled(&tk->tp))
@@ -554,19 +394,13 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
else
ret = register_kprobe(&tk->rp.kp);
- if (ret == 0)
+ if (ret == 0) {
tk->tp.flags |= TP_FLAG_REGISTERED;
- else {
- if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
- pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
- ret = 0;
- } else if (ret == -EILSEQ) {
- pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
- tk->rp.kp.addr);
- ret = -EINVAL;
- }
+ } else if (ret == -EILSEQ) {
+ pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
+ tk->rp.kp.addr);
+ ret = -EINVAL;
}
-
return ret;
}
@@ -629,6 +463,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
/* Register k*probe */
ret = __register_trace_kprobe(tk);
+ if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
+ pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
+ ret = 0;
+ }
+
if (ret < 0)
unregister_kprobe_event(tk);
else
@@ -713,13 +552,15 @@ static int create_trace_kprobe(int argc, char **argv)
long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
+ unsigned int flags = TPARG_FL_KERNEL;
/* argc must be >= 1 */
if (argv[0][0] == 'p')
is_return = false;
- else if (argv[0][0] == 'r')
+ else if (argv[0][0] == 'r') {
is_return = true;
- else if (argv[0][0] == '-')
+ flags |= TPARG_FL_RETURN;
+ } else if (argv[0][0] == '-')
is_delete = true;
else {
pr_info("Probe definition must be started with 'p', 'r' or"
@@ -749,10 +590,13 @@ static int create_trace_kprobe(int argc, char **argv)
}
if (event) {
- if (strchr(event, '/')) {
+ char *slash;
+
+ slash = strchr(event, '/');
+ if (slash) {
group = event;
- event = strchr(group, '/') + 1;
- event[-1] = '\0';
+ event = slash + 1;
+ slash[0] = '\0';
if (strlen(group) == 0) {
pr_info("Group name is not specified\n");
return -EINVAL;
@@ -802,8 +646,9 @@ static int create_trace_kprobe(int argc, char **argv)
pr_info("Failed to parse either an address or a symbol.\n");
return ret;
}
- if (offset && is_return &&
- !kprobe_on_func_entry(NULL, symbol, offset)) {
+ if (kprobe_on_func_entry(NULL, symbol, offset))
+ flags |= TPARG_FL_FENTRY;
+ if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
pr_info("Given offset is not valid for return probe.\n");
return -EINVAL;
}
@@ -873,8 +718,7 @@ static int create_trace_kprobe(int argc, char **argv)
/* Parse fetch argument */
ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
- is_return, true,
- kprobes_fetch_type_table);
+ flags);
if (ret) {
pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
goto error;
@@ -1028,6 +872,106 @@ static const struct file_operations kprobe_profile_ops = {
.release = seq_release,
};
+/* Kprobe specific fetch functions */
+
+/* Return the length of string -- including null terminal byte */
+static nokprobe_inline int
+fetch_store_strlen(unsigned long addr)
+{
+ mm_segment_t old_fs;
+ int ret, len = 0;
+ u8 c;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+
+ do {
+ ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
+ len++;
+ } while (c && ret == 0 && len < MAX_STRING_SIZE);
+
+ pagefault_enable();
+ set_fs(old_fs);
+
+ return (ret < 0) ? ret : len;
+}
+
+/*
+ * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
+ * length and relative data location.
+ */
+static nokprobe_inline int
+fetch_store_string(unsigned long addr, void *dest, void *base)
+{
+ int maxlen = get_loc_len(*(u32 *)dest);
+ u8 *dst = get_loc_data(dest, base);
+ long ret;
+
+ if (unlikely(!maxlen))
+ return -ENOMEM;
+ /*
+ * Try to get string again, since the string can be changed while
+ * probing.
+ */
+ ret = strncpy_from_unsafe(dst, (void *)addr, maxlen);
+
+ if (ret >= 0)
+ *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
+ return ret;
+}
+
+static nokprobe_inline int
+probe_mem_read(void *dest, void *src, size_t size)
+{
+ return probe_kernel_read(dest, src, size);
+}
+
+/* Note that we don't verify it, since the code does not come from user space */
+static int
+process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
+ void *base)
+{
+ unsigned long val;
+
+retry:
+ /* 1st stage: get value from context */
+ switch (code->op) {
+ case FETCH_OP_REG:
+ val = regs_get_register(regs, code->param);
+ break;
+ case FETCH_OP_STACK:
+ val = regs_get_kernel_stack_nth(regs, code->param);
+ break;
+ case FETCH_OP_STACKP:
+ val = kernel_stack_pointer(regs);
+ break;
+ case FETCH_OP_RETVAL:
+ val = regs_return_value(regs);
+ break;
+ case FETCH_OP_IMM:
+ val = code->immediate;
+ break;
+ case FETCH_OP_COMM:
+ val = (unsigned long)current->comm;
+ break;
+#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
+ case FETCH_OP_ARG:
+ val = regs_get_kernel_argument(regs, code->param);
+ break;
+#endif
+ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
+ code++;
+ goto retry;
+ default:
+ return -EILSEQ;
+ }
+ code++;
+
+ return process_fetch_insn_bottom(code, val, dest, base);
+}
+NOKPROBE_SYMBOL(process_fetch_insn)
+
/* Kprobe handler */
static nokprobe_inline void
__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
@@ -1059,7 +1003,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
entry = ring_buffer_event_data(event);
entry->ip = (unsigned long)tk->rp.kp.addr;
- store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
+ store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
event_trigger_unlock_commit_regs(trace_file, buffer, event,
entry, irq_flags, pc, regs);
@@ -1108,7 +1052,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry = ring_buffer_event_data(event);
entry->func = (unsigned long)tk->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
- store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
+ store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
event_trigger_unlock_commit_regs(trace_file, buffer, event,
entry, irq_flags, pc, regs);
@@ -1133,8 +1077,6 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
struct kprobe_trace_entry_head *field;
struct trace_seq *s = &iter->seq;
struct trace_probe *tp;
- u8 *data;
- int i;
field = (struct kprobe_trace_entry_head *)iter->ent;
tp = container_of(event, struct trace_probe, call.event);
@@ -1146,11 +1088,9 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
trace_seq_putc(s, ')');
- data = (u8 *)&field[1];
- for (i = 0; i < tp->nr_args; i++)
- if (!tp->args[i].type->print(s, tp->args[i].name,
- data + tp->args[i].offset, field))
- goto out;
+ if (print_probe_args(s, tp->args, tp->nr_args,
+ (u8 *)&field[1], field) < 0)
+ goto out;
trace_seq_putc(s, '\n');
out:
@@ -1164,8 +1104,6 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
struct kretprobe_trace_entry_head *field;
struct trace_seq *s = &iter->seq;
struct trace_probe *tp;
- u8 *data;
- int i;
field = (struct kretprobe_trace_entry_head *)iter->ent;
tp = container_of(event, struct trace_probe, call.event);
@@ -1182,11 +1120,9 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
trace_seq_putc(s, ')');
- data = (u8 *)&field[1];
- for (i = 0; i < tp->nr_args; i++)
- if (!tp->args[i].type->print(s, tp->args[i].name,
- data + tp->args[i].offset, field))
- goto out;
+ if (print_probe_args(s, tp->args, tp->nr_args,
+ (u8 *)&field[1], field) < 0)
+ goto out;
trace_seq_putc(s, '\n');
@@ -1197,49 +1133,25 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
static int kprobe_event_define_fields(struct trace_event_call *event_call)
{
- int ret, i;
+ int ret;
struct kprobe_trace_entry_head field;
struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
- /* Set argument names as fields */
- for (i = 0; i < tk->tp.nr_args; i++) {
- struct probe_arg *parg = &tk->tp.args[i];
- ret = trace_define_field(event_call, parg->type->fmttype,
- parg->name,
- sizeof(field) + parg->offset,
- parg->type->size,
- parg->type->is_signed,
- FILTER_OTHER);
- if (ret)
- return ret;
- }
- return 0;
+ return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
}
static int kretprobe_event_define_fields(struct trace_event_call *event_call)
{
- int ret, i;
+ int ret;
struct kretprobe_trace_entry_head field;
struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
- /* Set argument names as fields */
- for (i = 0; i < tk->tp.nr_args; i++) {
- struct probe_arg *parg = &tk->tp.args[i];
- ret = trace_define_field(event_call, parg->type->fmttype,
- parg->name,
- sizeof(field) + parg->offset,
- parg->type->size,
- parg->type->is_signed,
- FILTER_OTHER);
- if (ret)
- return ret;
- }
- return 0;
+ return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
}
#ifdef CONFIG_PERF_EVENTS
@@ -1286,7 +1198,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
entry->ip = (unsigned long)tk->rp.kp.addr;
memset(&entry[1], 0, dsize);
- store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
+ store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL);
return 0;
@@ -1322,7 +1234,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry->func = (unsigned long)tk->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
- store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
+ store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL);
}
@@ -1457,7 +1369,7 @@ static int register_kprobe_event(struct trace_kprobe *tk)
init_trace_event_call(tk, call);
- if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
+ if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
return -ENOMEM;
ret = register_trace_event(&call->event);
if (!ret) {
@@ -1514,7 +1426,7 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
init_trace_event_call(tk, &tk->tp.call);
- if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
+ if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
ret = -ENOMEM;
goto error;
}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index b0875b327f5c..c3fd849d4a8f 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -115,7 +115,7 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self,
* section, then we need to read the link list pointers. The trick is
* we pass the address of the string to the seq function just like
* we do for the kernel core formats. To get back the structure that
- * holds the format, we simply use containerof() and then go to the
+ * holds the format, we simply use container_of() and then go to the
* next format in the list.
*/
static const char **
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index e99c3ce7aa65..3ef15a6683c0 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -26,14 +26,12 @@ const char *reserved_field_names[] = {
/* Printing in basic type function template */
#define DEFINE_BASIC_PRINT_TYPE_FUNC(tname, type, fmt) \
-int PRINT_TYPE_FUNC_NAME(tname)(struct trace_seq *s, const char *name, \
- void *data, void *ent) \
+int PRINT_TYPE_FUNC_NAME(tname)(struct trace_seq *s, void *data, void *ent)\
{ \
- trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \
+ trace_seq_printf(s, fmt, *(type *)data); \
return !trace_seq_has_overflowed(s); \
} \
-const char PRINT_TYPE_FMT_NAME(tname)[] = fmt; \
-NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(tname));
+const char PRINT_TYPE_FMT_NAME(tname)[] = fmt;
DEFINE_BASIC_PRINT_TYPE_FUNC(u8, u8, "%u")
DEFINE_BASIC_PRINT_TYPE_FUNC(u16, u16, "%u")
@@ -48,193 +46,52 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(x16, u16, "0x%x")
DEFINE_BASIC_PRINT_TYPE_FUNC(x32, u32, "0x%x")
DEFINE_BASIC_PRINT_TYPE_FUNC(x64, u64, "0x%Lx")
+int PRINT_TYPE_FUNC_NAME(symbol)(struct trace_seq *s, void *data, void *ent)
+{
+ trace_seq_printf(s, "%pS", (void *)*(unsigned long *)data);
+ return !trace_seq_has_overflowed(s);
+}
+const char PRINT_TYPE_FMT_NAME(symbol)[] = "%pS";
+
/* Print type function for string type */
-int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name,
- void *data, void *ent)
+int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent)
{
int len = *(u32 *)data >> 16;
if (!len)
- trace_seq_printf(s, " %s=(fault)", name);
+ trace_seq_puts(s, "(fault)");
else
- trace_seq_printf(s, " %s=\"%s\"", name,
+ trace_seq_printf(s, "\"%s\"",
(const char *)get_loc_data(data, ent));
return !trace_seq_has_overflowed(s);
}
-NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string));
const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
-#define CHECK_FETCH_FUNCS(method, fn) \
- (((FETCH_FUNC_NAME(method, u8) == fn) || \
- (FETCH_FUNC_NAME(method, u16) == fn) || \
- (FETCH_FUNC_NAME(method, u32) == fn) || \
- (FETCH_FUNC_NAME(method, u64) == fn) || \
- (FETCH_FUNC_NAME(method, string) == fn) || \
- (FETCH_FUNC_NAME(method, string_size) == fn)) \
- && (fn != NULL))
-
-/* Data fetch function templates */
-#define DEFINE_FETCH_reg(type) \
-void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, void *offset, void *dest) \
-{ \
- *(type *)dest = (type)regs_get_register(regs, \
- (unsigned int)((unsigned long)offset)); \
-} \
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(reg, type));
-DEFINE_BASIC_FETCH_FUNCS(reg)
-/* No string on the register */
-#define fetch_reg_string NULL
-#define fetch_reg_string_size NULL
-
-#define DEFINE_FETCH_retval(type) \
-void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \
- void *dummy, void *dest) \
-{ \
- *(type *)dest = (type)regs_return_value(regs); \
-} \
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(retval, type));
-DEFINE_BASIC_FETCH_FUNCS(retval)
-/* No string on the retval */
-#define fetch_retval_string NULL
-#define fetch_retval_string_size NULL
-
-/* Dereference memory access function */
-struct deref_fetch_param {
- struct fetch_param orig;
- long offset;
- fetch_func_t fetch;
- fetch_func_t fetch_size;
+/* Fetch type information table */
+static const struct fetch_type probe_fetch_types[] = {
+ /* Special types */
+ __ASSIGN_FETCH_TYPE("string", string, string, sizeof(u32), 1,
+ "__data_loc char[]"),
+ /* Basic types */
+ ASSIGN_FETCH_TYPE(u8, u8, 0),
+ ASSIGN_FETCH_TYPE(u16, u16, 0),
+ ASSIGN_FETCH_TYPE(u32, u32, 0),
+ ASSIGN_FETCH_TYPE(u64, u64, 0),
+ ASSIGN_FETCH_TYPE(s8, u8, 1),
+ ASSIGN_FETCH_TYPE(s16, u16, 1),
+ ASSIGN_FETCH_TYPE(s32, u32, 1),
+ ASSIGN_FETCH_TYPE(s64, u64, 1),
+ ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
+ ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
+ ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
+ ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
+ ASSIGN_FETCH_TYPE_ALIAS(symbol, ADDR_FETCH_TYPE, ADDR_FETCH_TYPE, 0),
+
+ ASSIGN_FETCH_TYPE_END
};
-#define DEFINE_FETCH_deref(type) \
-void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \
- void *data, void *dest) \
-{ \
- struct deref_fetch_param *dprm = data; \
- unsigned long addr; \
- call_fetch(&dprm->orig, regs, &addr); \
- if (addr) { \
- addr += dprm->offset; \
- dprm->fetch(regs, (void *)addr, dest); \
- } else \
- *(type *)dest = 0; \
-} \
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, type));
-DEFINE_BASIC_FETCH_FUNCS(deref)
-DEFINE_FETCH_deref(string)
-
-void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs,
- void *data, void *dest)
-{
- struct deref_fetch_param *dprm = data;
- unsigned long addr;
-
- call_fetch(&dprm->orig, regs, &addr);
- if (addr && dprm->fetch_size) {
- addr += dprm->offset;
- dprm->fetch_size(regs, (void *)addr, dest);
- } else
- *(string_size *)dest = 0;
-}
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, string_size));
-
-static void update_deref_fetch_param(struct deref_fetch_param *data)
-{
- if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
- update_deref_fetch_param(data->orig.data);
- else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
- update_symbol_cache(data->orig.data);
-}
-NOKPROBE_SYMBOL(update_deref_fetch_param);
-
-static void free_deref_fetch_param(struct deref_fetch_param *data)
-{
- if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
- free_deref_fetch_param(data->orig.data);
- else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
- free_symbol_cache(data->orig.data);
- kfree(data);
-}
-NOKPROBE_SYMBOL(free_deref_fetch_param);
-
-/* Bitfield fetch function */
-struct bitfield_fetch_param {
- struct fetch_param orig;
- unsigned char hi_shift;
- unsigned char low_shift;
-};
-
-#define DEFINE_FETCH_bitfield(type) \
-void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \
- void *data, void *dest) \
-{ \
- struct bitfield_fetch_param *bprm = data; \
- type buf = 0; \
- call_fetch(&bprm->orig, regs, &buf); \
- if (buf) { \
- buf <<= bprm->hi_shift; \
- buf >>= bprm->low_shift; \
- } \
- *(type *)dest = buf; \
-} \
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(bitfield, type));
-DEFINE_BASIC_FETCH_FUNCS(bitfield)
-#define fetch_bitfield_string NULL
-#define fetch_bitfield_string_size NULL
-
-static void
-update_bitfield_fetch_param(struct bitfield_fetch_param *data)
-{
- /*
- * Don't check the bitfield itself, because this must be the
- * last fetch function.
- */
- if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
- update_deref_fetch_param(data->orig.data);
- else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
- update_symbol_cache(data->orig.data);
-}
-
-static void
-free_bitfield_fetch_param(struct bitfield_fetch_param *data)
-{
- /*
- * Don't check the bitfield itself, because this must be the
- * last fetch function.
- */
- if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
- free_deref_fetch_param(data->orig.data);
- else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
- free_symbol_cache(data->orig.data);
-
- kfree(data);
-}
-
-void FETCH_FUNC_NAME(comm, string)(struct pt_regs *regs,
- void *data, void *dest)
-{
- int maxlen = get_rloc_len(*(u32 *)dest);
- u8 *dst = get_rloc_data(dest);
- long ret;
-
- if (!maxlen)
- return;
-
- ret = strlcpy(dst, current->comm, maxlen);
- *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
-}
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(comm, string));
-
-void FETCH_FUNC_NAME(comm, string_size)(struct pt_regs *regs,
- void *data, void *dest)
-{
- *(u32 *)dest = strlen(current->comm) + 1;
-}
-NOKPROBE_SYMBOL(FETCH_FUNC_NAME(comm, string_size));
-
-static const struct fetch_type *find_fetch_type(const char *type,
- const struct fetch_type *ftbl)
+static const struct fetch_type *find_fetch_type(const char *type)
{
int i;
@@ -255,58 +112,27 @@ static const struct fetch_type *find_fetch_type(const char *type,
switch (bs) {
case 8:
- return find_fetch_type("u8", ftbl);
+ return find_fetch_type("u8");
case 16:
- return find_fetch_type("u16", ftbl);
+ return find_fetch_type("u16");
case 32:
- return find_fetch_type("u32", ftbl);
+ return find_fetch_type("u32");
case 64:
- return find_fetch_type("u64", ftbl);
+ return find_fetch_type("u64");
default:
goto fail;
}
}
- for (i = 0; ftbl[i].name; i++) {
- if (strcmp(type, ftbl[i].name) == 0)
- return &ftbl[i];
+ for (i = 0; probe_fetch_types[i].name; i++) {
+ if (strcmp(type, probe_fetch_types[i].name) == 0)
+ return &probe_fetch_types[i];
}
fail:
return NULL;
}
-/* Special function : only accept unsigned long */
-static void fetch_kernel_stack_address(struct pt_regs *regs, void *dummy, void *dest)
-{
- *(unsigned long *)dest = kernel_stack_pointer(regs);
-}
-NOKPROBE_SYMBOL(fetch_kernel_stack_address);
-
-static void fetch_user_stack_address(struct pt_regs *regs, void *dummy, void *dest)
-{
- *(unsigned long *)dest = user_stack_pointer(regs);
-}
-NOKPROBE_SYMBOL(fetch_user_stack_address);
-
-static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
- fetch_func_t orig_fn,
- const struct fetch_type *ftbl)
-{
- int i;
-
- if (type != &ftbl[FETCH_TYPE_STRING])
- return NULL; /* Only string type needs size function */
-
- for (i = 0; i < FETCH_MTD_END; i++)
- if (type->fetch[i] == orig_fn)
- return ftbl[FETCH_TYPE_STRSIZE].fetch[i];
-
- WARN_ON(1); /* This should not happen */
-
- return NULL;
-}
-
/* Split symbol and offset. */
int traceprobe_split_symbol_offset(char *symbol, long *offset)
{
@@ -331,41 +157,44 @@ int traceprobe_split_symbol_offset(char *symbol, long *offset)
#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
static int parse_probe_vars(char *arg, const struct fetch_type *t,
- struct fetch_param *f, bool is_return,
- bool is_kprobe)
+ struct fetch_insn *code, unsigned int flags)
{
int ret = 0;
unsigned long param;
if (strcmp(arg, "retval") == 0) {
- if (is_return)
- f->fn = t->fetch[FETCH_MTD_retval];
+ if (flags & TPARG_FL_RETURN)
+ code->op = FETCH_OP_RETVAL;
else
ret = -EINVAL;
} else if (strncmp(arg, "stack", 5) == 0) {
if (arg[5] == '\0') {
- if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR))
- return -EINVAL;
-
- if (is_kprobe)
- f->fn = fetch_kernel_stack_address;
- else
- f->fn = fetch_user_stack_address;
+ code->op = FETCH_OP_STACKP;
} else if (isdigit(arg[5])) {
ret = kstrtoul(arg + 5, 10, &param);
- if (ret || (is_kprobe && param > PARAM_MAX_STACK))
+ if (ret || ((flags & TPARG_FL_KERNEL) &&
+ param > PARAM_MAX_STACK))
ret = -EINVAL;
else {
- f->fn = t->fetch[FETCH_MTD_stack];
- f->data = (void *)param;
+ code->op = FETCH_OP_STACK;
+ code->param = (unsigned int)param;
}
} else
ret = -EINVAL;
} else if (strcmp(arg, "comm") == 0) {
- if (strcmp(t->name, "string") != 0 &&
- strcmp(t->name, "string_size") != 0)
+ code->op = FETCH_OP_COMM;
+#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
+ } else if (((flags & TPARG_FL_MASK) ==
+ (TPARG_FL_KERNEL | TPARG_FL_FENTRY)) &&
+ strncmp(arg, "arg", 3) == 0) {
+ if (!isdigit(arg[3]))
+ return -EINVAL;
+ ret = kstrtoul(arg + 3, 10, &param);
+ if (ret || !param || param > PARAM_MAX_STACK)
return -EINVAL;
- f->fn = t->fetch[FETCH_MTD_comm];
+ code->op = FETCH_OP_ARG;
+ code->param = (unsigned int)param - 1;
+#endif
} else
ret = -EINVAL;
@@ -373,25 +202,27 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
}
/* Recursive argument parser */
-static int parse_probe_arg(char *arg, const struct fetch_type *t,
- struct fetch_param *f, bool is_return, bool is_kprobe,
- const struct fetch_type *ftbl)
+static int
+parse_probe_arg(char *arg, const struct fetch_type *type,
+ struct fetch_insn **pcode, struct fetch_insn *end,
+ unsigned int flags)
{
+ struct fetch_insn *code = *pcode;
unsigned long param;
- long offset;
+ long offset = 0;
char *tmp;
int ret = 0;
switch (arg[0]) {
case '$':
- ret = parse_probe_vars(arg + 1, t, f, is_return, is_kprobe);
+ ret = parse_probe_vars(arg + 1, type, code, flags);
break;
case '%': /* named register */
ret = regs_query_register_offset(arg + 1);
if (ret >= 0) {
- f->fn = t->fetch[FETCH_MTD_reg];
- f->data = (void *)(unsigned long)ret;
+ code->op = FETCH_OP_REG;
+ code->param = (unsigned int)ret;
ret = 0;
}
break;
@@ -401,33 +232,42 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
ret = kstrtoul(arg + 1, 0, &param);
if (ret)
break;
-
- f->fn = t->fetch[FETCH_MTD_memory];
- f->data = (void *)param;
+ /* load address */
+ code->op = FETCH_OP_IMM;
+ code->immediate = param;
} else if (arg[1] == '+') {
/* kprobes don't support file offsets */
- if (is_kprobe)
+ if (flags & TPARG_FL_KERNEL)
return -EINVAL;
ret = kstrtol(arg + 2, 0, &offset);
if (ret)
break;
- f->fn = t->fetch[FETCH_MTD_file_offset];
- f->data = (void *)offset;
+ code->op = FETCH_OP_FOFFS;
+ code->immediate = (unsigned long)offset; // imm64?
} else {
/* uprobes don't support symbols */
- if (!is_kprobe)
+ if (!(flags & TPARG_FL_KERNEL))
return -EINVAL;
- ret = traceprobe_split_symbol_offset(arg + 1, &offset);
- if (ret)
- break;
+ /* Preserve symbol for updating */
+ code->op = FETCH_NOP_SYMBOL;
+ code->data = kstrdup(arg + 1, GFP_KERNEL);
+ if (!code->data)
+ return -ENOMEM;
+ if (++code == end)
+ return -E2BIG;
- f->data = alloc_symbol_cache(arg + 1, offset);
- if (f->data)
- f->fn = t->fetch[FETCH_MTD_symbol];
+ code->op = FETCH_OP_IMM;
+ code->immediate = 0;
}
+ /* These are fetching from memory */
+ if (++code == end)
+ return -E2BIG;
+ *pcode = code;
+ code->op = FETCH_OP_DEREF;
+ code->offset = offset;
break;
case '+': /* deref memory */
@@ -435,11 +275,10 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
case '-':
tmp = strchr(arg, '(');
if (!tmp)
- break;
+ return -EINVAL;
*tmp = '\0';
ret = kstrtol(arg, 0, &offset);
-
if (ret)
break;
@@ -447,36 +286,27 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
tmp = strrchr(arg, ')');
if (tmp) {
- struct deref_fetch_param *dprm;
- const struct fetch_type *t2;
+ const struct fetch_type *t2 = find_fetch_type(NULL);
- t2 = find_fetch_type(NULL, ftbl);
*tmp = '\0';
- dprm = kzalloc(sizeof(struct deref_fetch_param), GFP_KERNEL);
-
- if (!dprm)
- return -ENOMEM;
-
- dprm->offset = offset;
- dprm->fetch = t->fetch[FETCH_MTD_memory];
- dprm->fetch_size = get_fetch_size_function(t,
- dprm->fetch, ftbl);
- ret = parse_probe_arg(arg, t2, &dprm->orig, is_return,
- is_kprobe, ftbl);
+ ret = parse_probe_arg(arg, t2, &code, end, flags);
if (ret)
- kfree(dprm);
- else {
- f->fn = t->fetch[FETCH_MTD_deref];
- f->data = (void *)dprm;
- }
+ break;
+ if (code->op == FETCH_OP_COMM)
+ return -EINVAL;
+ if (++code == end)
+ return -E2BIG;
+ *pcode = code;
+
+ code->op = FETCH_OP_DEREF;
+ code->offset = offset;
}
break;
}
- if (!ret && !f->fn) { /* Parsed, but do not find fetch method */
- pr_info("%s type has no corresponding fetch method.\n", t->name);
+ if (!ret && code->op == FETCH_OP_NOP) {
+ /* Parsed, but do not find fetch method */
ret = -EINVAL;
}
-
return ret;
}
@@ -485,22 +315,15 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
/* Bitfield type needs to be parsed into a fetch function */
static int __parse_bitfield_probe_arg(const char *bf,
const struct fetch_type *t,
- struct fetch_param *f)
+ struct fetch_insn **pcode)
{
- struct bitfield_fetch_param *bprm;
+ struct fetch_insn *code = *pcode;
unsigned long bw, bo;
char *tail;
if (*bf != 'b')
return 0;
- bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
- if (!bprm)
- return -ENOMEM;
-
- bprm->orig = *f;
- f->fn = t->fetch[FETCH_MTD_bitfield];
- f->data = (void *)bprm;
bw = simple_strtoul(bf + 1, &tail, 0); /* Use simple one */
if (bw == 0 || *tail != '@')
@@ -511,20 +334,26 @@ static int __parse_bitfield_probe_arg(const char *bf,
if (tail == bf || *tail != '/')
return -EINVAL;
+ code++;
+ if (code->op != FETCH_OP_NOP)
+ return -E2BIG;
+ *pcode = code;
- bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo);
- bprm->low_shift = bprm->hi_shift + bo;
+ code->op = FETCH_OP_MOD_BF;
+ code->lshift = BYTES_TO_BITS(t->size) - (bw + bo);
+ code->rshift = BYTES_TO_BITS(t->size) - bw;
+ code->basesize = t->size;
return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0;
}
/* String length checking wrapper */
int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
- struct probe_arg *parg, bool is_return, bool is_kprobe,
- const struct fetch_type *ftbl)
+ struct probe_arg *parg, unsigned int flags)
{
- const char *t;
- int ret;
+ struct fetch_insn *code, *scode, *tmp = NULL;
+ char *t, *t2;
+ int ret, len;
if (strlen(arg) > MAX_ARGSTR_LEN) {
pr_info("Argument is too long.: %s\n", arg);
@@ -535,36 +364,128 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
pr_info("Failed to allocate memory for command '%s'.\n", arg);
return -ENOMEM;
}
- t = strchr(parg->comm, ':');
+ t = strchr(arg, ':');
if (t) {
- arg[t - parg->comm] = '\0';
- t++;
+ *t = '\0';
+ t2 = strchr(++t, '[');
+ if (t2) {
+ *t2 = '\0';
+ parg->count = simple_strtoul(t2 + 1, &t2, 0);
+ if (strcmp(t2, "]") || parg->count == 0)
+ return -EINVAL;
+ if (parg->count > MAX_ARRAY_LEN)
+ return -E2BIG;
+ }
}
/*
* The default type of $comm should be "string", and it can't be
* dereferenced.
*/
if (!t && strcmp(arg, "$comm") == 0)
- t = "string";
- parg->type = find_fetch_type(t, ftbl);
+ parg->type = find_fetch_type("string");
+ else
+ parg->type = find_fetch_type(t);
if (!parg->type) {
pr_info("Unsupported type: %s\n", t);
return -EINVAL;
}
parg->offset = *size;
- *size += parg->type->size;
- ret = parse_probe_arg(arg, parg->type, &parg->fetch, is_return,
- is_kprobe, ftbl);
-
- if (ret >= 0 && t != NULL)
- ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch);
-
- if (ret >= 0) {
- parg->fetch_size.fn = get_fetch_size_function(parg->type,
- parg->fetch.fn,
- ftbl);
- parg->fetch_size.data = parg->fetch.data;
+ *size += parg->type->size * (parg->count ?: 1);
+
+ if (parg->count) {
+ len = strlen(parg->type->fmttype) + 6;
+ parg->fmt = kmalloc(len, GFP_KERNEL);
+ if (!parg->fmt)
+ return -ENOMEM;
+ snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype,
+ parg->count);
+ }
+
+ code = tmp = kzalloc(sizeof(*code) * FETCH_INSN_MAX, GFP_KERNEL);
+ if (!code)
+ return -ENOMEM;
+ code[FETCH_INSN_MAX - 1].op = FETCH_OP_END;
+
+ ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1],
+ flags);
+ if (ret)
+ goto fail;
+
+ /* Store operation */
+ if (!strcmp(parg->type->name, "string")) {
+ if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_IMM &&
+ code->op != FETCH_OP_COMM) {
+ pr_info("string only accepts memory or address.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (code->op != FETCH_OP_DEREF || parg->count) {
+ /*
+ * IMM and COMM is pointing actual address, those must
+ * be kept, and if parg->count != 0, this is an array
+ * of string pointers instead of string address itself.
+ */
+ code++;
+ if (code->op != FETCH_OP_NOP) {
+ ret = -E2BIG;
+ goto fail;
+ }
+ }
+ code->op = FETCH_OP_ST_STRING; /* In DEREF case, replace it */
+ code->size = parg->type->size;
+ parg->dynamic = true;
+ } else if (code->op == FETCH_OP_DEREF) {
+ code->op = FETCH_OP_ST_MEM;
+ code->size = parg->type->size;
+ } else {
+ code++;
+ if (code->op != FETCH_OP_NOP) {
+ ret = -E2BIG;
+ goto fail;
+ }
+ code->op = FETCH_OP_ST_RAW;
+ code->size = parg->type->size;
+ }
+ scode = code;
+ /* Modify operation */
+ if (t != NULL) {
+ ret = __parse_bitfield_probe_arg(t, parg->type, &code);
+ if (ret)
+ goto fail;
}
+ /* Loop(Array) operation */
+ if (parg->count) {
+ if (scode->op != FETCH_OP_ST_MEM &&
+ scode->op != FETCH_OP_ST_STRING) {
+ pr_info("array only accepts memory or address\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ code++;
+ if (code->op != FETCH_OP_NOP) {
+ ret = -E2BIG;
+ goto fail;
+ }
+ code->op = FETCH_OP_LP_ARRAY;
+ code->param = parg->count;
+ }
+ code++;
+ code->op = FETCH_OP_END;
+
+ /* Shrink down the code buffer */
+ parg->code = kzalloc(sizeof(*code) * (code - tmp + 1), GFP_KERNEL);
+ if (!parg->code)
+ ret = -ENOMEM;
+ else
+ memcpy(parg->code, tmp, sizeof(*code) * (code - tmp + 1));
+
+fail:
+ if (ret) {
+ for (code = tmp; code < tmp + FETCH_INSN_MAX; code++)
+ if (code->op == FETCH_NOP_SYMBOL)
+ kfree(code->data);
+ }
+ kfree(tmp);
return ret;
}
@@ -586,35 +507,63 @@ int traceprobe_conflict_field_name(const char *name,
return 0;
}
-void traceprobe_update_arg(struct probe_arg *arg)
-{
- if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
- update_bitfield_fetch_param(arg->fetch.data);
- else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
- update_deref_fetch_param(arg->fetch.data);
- else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
- update_symbol_cache(arg->fetch.data);
-}
-
void traceprobe_free_probe_arg(struct probe_arg *arg)
{
- if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
- free_bitfield_fetch_param(arg->fetch.data);
- else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
- free_deref_fetch_param(arg->fetch.data);
- else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
- free_symbol_cache(arg->fetch.data);
+ struct fetch_insn *code = arg->code;
+ while (code && code->op != FETCH_OP_END) {
+ if (code->op == FETCH_NOP_SYMBOL)
+ kfree(code->data);
+ code++;
+ }
+ kfree(arg->code);
kfree(arg->name);
kfree(arg->comm);
+ kfree(arg->fmt);
}
+int traceprobe_update_arg(struct probe_arg *arg)
+{
+ struct fetch_insn *code = arg->code;
+ long offset;
+ char *tmp;
+ char c;
+ int ret = 0;
+
+ while (code && code->op != FETCH_OP_END) {
+ if (code->op == FETCH_NOP_SYMBOL) {
+ if (code[1].op != FETCH_OP_IMM)
+ return -EINVAL;
+
+ tmp = strpbrk("+-", code->data);
+ if (tmp)
+ c = *tmp;
+ ret = traceprobe_split_symbol_offset(code->data,
+ &offset);
+ if (ret)
+ return ret;
+
+ code[1].immediate =
+ (unsigned long)kallsyms_lookup_name(code->data);
+ if (tmp)
+ *tmp = c;
+ if (!code[1].immediate)
+ return -ENOENT;
+ code[1].immediate += offset;
+ }
+ code++;
+ }
+ return 0;
+}
+
+/* When len=0, we just calculate the needed length */
+#define LEN_OR_ZERO (len ? len - pos : 0)
static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
bool is_return)
{
- int i;
+ struct probe_arg *parg;
+ int i, j;
int pos = 0;
-
const char *fmt, *arg;
if (!is_return) {
@@ -625,35 +574,51 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
}
- /* When len=0, we just calculate the needed length */
-#define LEN_OR_ZERO (len ? len - pos : 0)
-
pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
for (i = 0; i < tp->nr_args; i++) {
- pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
- tp->args[i].name, tp->args[i].type->fmt);
+ parg = tp->args + i;
+ pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=", parg->name);
+ if (parg->count) {
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "{%s",
+ parg->type->fmt);
+ for (j = 1; j < parg->count; j++)
+ pos += snprintf(buf + pos, LEN_OR_ZERO, ",%s",
+ parg->type->fmt);
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "}");
+ } else
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "%s",
+ parg->type->fmt);
}
pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
for (i = 0; i < tp->nr_args; i++) {
- if (strcmp(tp->args[i].type->name, "string") == 0)
+ parg = tp->args + i;
+ if (parg->count) {
+ if (strcmp(parg->type->name, "string") == 0)
+ fmt = ", __get_str(%s[%d])";
+ else
+ fmt = ", REC->%s[%d]";
+ for (j = 0; j < parg->count; j++)
+ pos += snprintf(buf + pos, LEN_OR_ZERO,
+ fmt, parg->name, j);
+ } else {
+ if (strcmp(parg->type->name, "string") == 0)
+ fmt = ", __get_str(%s)";
+ else
+ fmt = ", REC->%s";
pos += snprintf(buf + pos, LEN_OR_ZERO,
- ", __get_str(%s)",
- tp->args[i].name);
- else
- pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
- tp->args[i].name);
+ fmt, parg->name);
+ }
}
-#undef LEN_OR_ZERO
-
/* return the length of print_fmt */
return pos;
}
+#undef LEN_OR_ZERO
-int set_print_fmt(struct trace_probe *tp, bool is_return)
+int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return)
{
int len;
char *print_fmt;
@@ -670,3 +635,28 @@ int set_print_fmt(struct trace_probe *tp, bool is_return)
return 0;
}
+
+int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ size_t offset, struct trace_probe *tp)
+{
+ int ret, i;
+
+ /* Set argument names as fields */
+ for (i = 0; i < tp->nr_args; i++) {
+ struct probe_arg *parg = &tp->args[i];
+ const char *fmt = parg->type->fmttype;
+ int size = parg->type->size;
+
+ if (parg->fmt)
+ fmt = parg->fmt;
+ if (parg->count)
+ size *= parg->count;
+ ret = trace_define_field(event_call, fmt, parg->name,
+ offset + parg->offset, size,
+ parg->type->is_signed,
+ FILTER_OTHER);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 5f52668e165d..974afc1a3e73 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -23,6 +23,7 @@
#include <linux/stringify.h>
#include <linux/limits.h>
#include <linux/uaccess.h>
+#include <linux/bitops.h>
#include <asm/bitsperlong.h>
#include "trace.h"
@@ -30,6 +31,7 @@
#define MAX_TRACE_ARGS 128
#define MAX_ARGSTR_LEN 63
+#define MAX_ARRAY_LEN 64
#define MAX_STRING_SIZE PATH_MAX
/* Reserved field names */
@@ -54,50 +56,74 @@
#define TP_FLAG_PROFILE 2
#define TP_FLAG_REGISTERED 4
+/* data_loc: data location, compatible with u32 */
+#define make_data_loc(len, offs) \
+ (((u32)(len) << 16) | ((u32)(offs) & 0xffff))
+#define get_loc_len(dl) ((u32)(dl) >> 16)
+#define get_loc_offs(dl) ((u32)(dl) & 0xffff)
-/* data_rloc: data relative location, compatible with u32 */
-#define make_data_rloc(len, roffs) \
- (((u32)(len) << 16) | ((u32)(roffs) & 0xffff))
-#define get_rloc_len(dl) ((u32)(dl) >> 16)
-#define get_rloc_offs(dl) ((u32)(dl) & 0xffff)
-
-/*
- * Convert data_rloc to data_loc:
- * data_rloc stores the offset from data_rloc itself, but data_loc
- * stores the offset from event entry.
- */
-#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs))
-
-static nokprobe_inline void *get_rloc_data(u32 *dl)
+static nokprobe_inline void *get_loc_data(u32 *dl, void *ent)
{
- return (u8 *)dl + get_rloc_offs(*dl);
+ return (u8 *)ent + get_loc_offs(*dl);
}
-/* For data_loc conversion */
-static nokprobe_inline void *get_loc_data(u32 *dl, void *ent)
+static nokprobe_inline u32 update_data_loc(u32 loc, int consumed)
{
- return (u8 *)ent + get_rloc_offs(*dl);
+ u32 maxlen = get_loc_len(loc);
+ u32 offset = get_loc_offs(loc);
+
+ return make_data_loc(maxlen - consumed, offset + consumed);
}
-/* Data fetch function type */
-typedef void (*fetch_func_t)(struct pt_regs *, void *, void *);
/* Printing function type */
-typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, void *);
-
-/* Fetch types */
-enum {
- FETCH_MTD_reg = 0,
- FETCH_MTD_stack,
- FETCH_MTD_retval,
- FETCH_MTD_comm,
- FETCH_MTD_memory,
- FETCH_MTD_symbol,
- FETCH_MTD_deref,
- FETCH_MTD_bitfield,
- FETCH_MTD_file_offset,
- FETCH_MTD_END,
+typedef int (*print_type_func_t)(struct trace_seq *, void *, void *);
+
+enum fetch_op {
+ FETCH_OP_NOP = 0,
+ // Stage 1 (load) ops
+ FETCH_OP_REG, /* Register : .param = offset */
+ FETCH_OP_STACK, /* Stack : .param = index */
+ FETCH_OP_STACKP, /* Stack pointer */
+ FETCH_OP_RETVAL, /* Return value */
+ FETCH_OP_IMM, /* Immediate : .immediate */
+ FETCH_OP_COMM, /* Current comm */
+ FETCH_OP_ARG, /* Function argument : .param */
+ FETCH_OP_FOFFS, /* File offset: .immediate */
+ // Stage 2 (dereference) op
+ FETCH_OP_DEREF, /* Dereference: .offset */
+ // Stage 3 (store) ops
+ FETCH_OP_ST_RAW, /* Raw: .size */
+ FETCH_OP_ST_MEM, /* Mem: .offset, .size */
+ FETCH_OP_ST_STRING, /* String: .offset, .size */
+ // Stage 4 (modify) op
+ FETCH_OP_MOD_BF, /* Bitfield: .basesize, .lshift, .rshift */
+ // Stage 5 (loop) op
+ FETCH_OP_LP_ARRAY, /* Array: .param = loop count */
+ FETCH_OP_END,
+ FETCH_NOP_SYMBOL, /* Unresolved Symbol holder */
};
+struct fetch_insn {
+ enum fetch_op op;
+ union {
+ unsigned int param;
+ struct {
+ unsigned int size;
+ int offset;
+ };
+ struct {
+ unsigned char basesize;
+ unsigned char lshift;
+ unsigned char rshift;
+ };
+ unsigned long immediate;
+ void *data;
+ };
+};
+
+/* fetch + deref*N + store + mod + end <= 16, this allows N=12, enough */
+#define FETCH_INSN_MAX 16
+
/* Fetch type information table */
struct fetch_type {
const char *name; /* Name of type */
@@ -106,13 +132,6 @@ struct fetch_type {
print_type_func_t print; /* Print functions */
const char *fmt; /* Fromat string */
const char *fmttype; /* Name in format file */
- /* Fetch functions */
- fetch_func_t fetch[FETCH_MTD_END];
-};
-
-struct fetch_param {
- fetch_func_t fn;
- void *data;
};
/* For defining macros, define string/string_size types */
@@ -124,8 +143,7 @@ typedef u32 string_size;
/* Printing in basic type function template */
#define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \
-int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \
- void *data, void *ent); \
+int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, void *data, void *ent);\
extern const char PRINT_TYPE_FMT_NAME(type)[]
DECLARE_BASIC_PRINT_TYPE_FUNC(u8);
@@ -142,57 +160,7 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(x32);
DECLARE_BASIC_PRINT_TYPE_FUNC(x64);
DECLARE_BASIC_PRINT_TYPE_FUNC(string);
-
-#define FETCH_FUNC_NAME(method, type) fetch_##method##_##type
-
-/* Declare macro for basic types */
-#define DECLARE_FETCH_FUNC(method, type) \
-extern void FETCH_FUNC_NAME(method, type)(struct pt_regs *regs, \
- void *data, void *dest)
-
-#define DECLARE_BASIC_FETCH_FUNCS(method) \
-DECLARE_FETCH_FUNC(method, u8); \
-DECLARE_FETCH_FUNC(method, u16); \
-DECLARE_FETCH_FUNC(method, u32); \
-DECLARE_FETCH_FUNC(method, u64)
-
-DECLARE_BASIC_FETCH_FUNCS(reg);
-#define fetch_reg_string NULL
-#define fetch_reg_string_size NULL
-
-DECLARE_BASIC_FETCH_FUNCS(retval);
-#define fetch_retval_string NULL
-#define fetch_retval_string_size NULL
-
-DECLARE_BASIC_FETCH_FUNCS(symbol);
-DECLARE_FETCH_FUNC(symbol, string);
-DECLARE_FETCH_FUNC(symbol, string_size);
-
-DECLARE_BASIC_FETCH_FUNCS(deref);
-DECLARE_FETCH_FUNC(deref, string);
-DECLARE_FETCH_FUNC(deref, string_size);
-
-DECLARE_BASIC_FETCH_FUNCS(bitfield);
-#define fetch_bitfield_string NULL
-#define fetch_bitfield_string_size NULL
-
-/* comm only makes sense as a string */
-#define fetch_comm_u8 NULL
-#define fetch_comm_u16 NULL
-#define fetch_comm_u32 NULL
-#define fetch_comm_u64 NULL
-DECLARE_FETCH_FUNC(comm, string);
-DECLARE_FETCH_FUNC(comm, string_size);
-
-/*
- * Define macro for basic types - we don't need to define s* types, because
- * we have to care only about bitwidth at recording time.
- */
-#define DEFINE_BASIC_FETCH_FUNCS(method) \
-DEFINE_FETCH_##method(u8) \
-DEFINE_FETCH_##method(u16) \
-DEFINE_FETCH_##method(u32) \
-DEFINE_FETCH_##method(u64)
+DECLARE_BASIC_PRINT_TYPE_FUNC(symbol);
/* Default (unsigned long) fetch type */
#define __DEFAULT_FETCH_TYPE(t) x##t
@@ -200,8 +168,9 @@ DEFINE_FETCH_##method(u64)
#define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG)
#define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE)
-#define ASSIGN_FETCH_FUNC(method, type) \
- [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type)
+#define __ADDR_FETCH_TYPE(t) u##t
+#define _ADDR_FETCH_TYPE(t) __ADDR_FETCH_TYPE(t)
+#define ADDR_FETCH_TYPE _ADDR_FETCH_TYPE(BITS_PER_LONG)
#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \
{.name = _name, \
@@ -210,64 +179,23 @@ DEFINE_FETCH_##method(u64)
.print = PRINT_TYPE_FUNC_NAME(ptype), \
.fmt = PRINT_TYPE_FMT_NAME(ptype), \
.fmttype = _fmttype, \
- .fetch = { \
-ASSIGN_FETCH_FUNC(reg, ftype), \
-ASSIGN_FETCH_FUNC(stack, ftype), \
-ASSIGN_FETCH_FUNC(retval, ftype), \
-ASSIGN_FETCH_FUNC(comm, ftype), \
-ASSIGN_FETCH_FUNC(memory, ftype), \
-ASSIGN_FETCH_FUNC(symbol, ftype), \
-ASSIGN_FETCH_FUNC(deref, ftype), \
-ASSIGN_FETCH_FUNC(bitfield, ftype), \
-ASSIGN_FETCH_FUNC(file_offset, ftype), \
- } \
}
-
+#define _ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \
+ __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, #_fmttype)
#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \
- __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype)
+ _ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, ptype)
/* If ptype is an alias of atype, use this macro (show atype in format) */
#define ASSIGN_FETCH_TYPE_ALIAS(ptype, atype, ftype, sign) \
- __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #atype)
+ _ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, atype)
#define ASSIGN_FETCH_TYPE_END {}
-
-#define FETCH_TYPE_STRING 0
-#define FETCH_TYPE_STRSIZE 1
+#define MAX_ARRAY_LEN 64
#ifdef CONFIG_KPROBE_EVENTS
-struct symbol_cache;
-unsigned long update_symbol_cache(struct symbol_cache *sc);
-void free_symbol_cache(struct symbol_cache *sc);
-struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
bool trace_kprobe_on_func_entry(struct trace_event_call *call);
bool trace_kprobe_error_injectable(struct trace_event_call *call);
#else
-/* uprobes do not support symbol fetch methods */
-#define fetch_symbol_u8 NULL
-#define fetch_symbol_u16 NULL
-#define fetch_symbol_u32 NULL
-#define fetch_symbol_u64 NULL
-#define fetch_symbol_string NULL
-#define fetch_symbol_string_size NULL
-
-struct symbol_cache {
-};
-static inline unsigned long __used update_symbol_cache(struct symbol_cache *sc)
-{
- return 0;
-}
-
-static inline void __used free_symbol_cache(struct symbol_cache *sc)
-{
-}
-
-static inline struct symbol_cache * __used
-alloc_symbol_cache(const char *sym, long offset)
-{
- return NULL;
-}
-
static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{
return false;
@@ -280,11 +208,13 @@ static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
#endif /* CONFIG_KPROBE_EVENTS */
struct probe_arg {
- struct fetch_param fetch;
- struct fetch_param fetch_size;
+ struct fetch_insn *code;
+ bool dynamic;/* Dynamic array (string) is used */
unsigned int offset; /* Offset from argument entry */
+ unsigned int count; /* Array count */
const char *name; /* Name of this argument */
const char *comm; /* Command of this argument */
+ char *fmt; /* Format string if needed */
const struct fetch_type *type; /* Type of this argument */
};
@@ -313,12 +243,6 @@ static inline bool trace_probe_is_registered(struct trace_probe *tp)
return !!(tp->flags & TP_FLAG_REGISTERED);
}
-static nokprobe_inline void call_fetch(struct fetch_param *fprm,
- struct pt_regs *regs, void *dest)
-{
- return fprm->fn(regs, fprm->data, dest);
-}
-
/* Check the name is good for event/group/fields */
static inline bool is_good_name(const char *name)
{
@@ -343,67 +267,23 @@ find_event_file_link(struct trace_probe *tp, struct trace_event_file *file)
return NULL;
}
+#define TPARG_FL_RETURN BIT(0)
+#define TPARG_FL_KERNEL BIT(1)
+#define TPARG_FL_FENTRY BIT(2)
+#define TPARG_FL_MASK GENMASK(2, 0)
+
extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
- struct probe_arg *parg, bool is_return, bool is_kprobe,
- const struct fetch_type *ftbl);
+ struct probe_arg *parg, unsigned int flags);
extern int traceprobe_conflict_field_name(const char *name,
struct probe_arg *args, int narg);
-extern void traceprobe_update_arg(struct probe_arg *arg);
+extern int traceprobe_update_arg(struct probe_arg *arg);
extern void traceprobe_free_probe_arg(struct probe_arg *arg);
extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
-/* Sum up total data length for dynamic arraies (strings) */
-static nokprobe_inline int
-__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
-{
- int i, ret = 0;
- u32 len;
-
- for (i = 0; i < tp->nr_args; i++)
- if (unlikely(tp->args[i].fetch_size.fn)) {
- call_fetch(&tp->args[i].fetch_size, regs, &len);
- ret += len;
- }
-
- return ret;
-}
-
-/* Store the value of each argument */
-static nokprobe_inline void
-store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs,
- u8 *data, int maxlen)
-{
- int i;
- u32 end = tp->size;
- u32 *dl; /* Data (relative) location */
-
- for (i = 0; i < tp->nr_args; i++) {
- if (unlikely(tp->args[i].fetch_size.fn)) {
- /*
- * First, we set the relative location and
- * maximum data length to *dl
- */
- dl = (u32 *)(data + tp->args[i].offset);
- *dl = make_data_rloc(maxlen, end - tp->args[i].offset);
- /* Then try to fetch string or dynamic array data */
- call_fetch(&tp->args[i].fetch, regs, dl);
- /* Reduce maximum length */
- end += get_rloc_len(*dl);
- maxlen -= get_rloc_len(*dl);
- /* Trick here, convert data_rloc to data_loc */
- *dl = convert_rloc_to_loc(*dl,
- ent_size + tp->args[i].offset);
- } else
- /* Just fetching data normally */
- call_fetch(&tp->args[i].fetch, regs,
- data + tp->args[i].offset);
- }
-}
-
-extern int set_print_fmt(struct trace_probe *tp, bool is_return);
+extern int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return);
#ifdef CONFIG_PERF_EVENTS
extern struct trace_event_call *
@@ -412,6 +292,9 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
extern void destroy_local_trace_kprobe(struct trace_event_call *event_call);
extern struct trace_event_call *
-create_local_trace_uprobe(char *name, unsigned long offs, bool is_return);
+create_local_trace_uprobe(char *name, unsigned long offs,
+ unsigned long ref_ctr_offset, bool is_return);
extern void destroy_local_trace_uprobe(struct trace_event_call *event_call);
#endif
+extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ size_t offset, struct trace_probe *tp);
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
new file mode 100644
index 000000000000..5c56afc17cf8
--- /dev/null
+++ b/kernel/trace/trace_probe_tmpl.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Traceprobe fetch helper inlines
+ */
+
+static nokprobe_inline void
+fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf)
+{
+ switch (code->size) {
+ case 1:
+ *(u8 *)buf = (u8)val;
+ break;
+ case 2:
+ *(u16 *)buf = (u16)val;
+ break;
+ case 4:
+ *(u32 *)buf = (u32)val;
+ break;
+ case 8:
+ //TBD: 32bit signed
+ *(u64 *)buf = (u64)val;
+ break;
+ default:
+ *(unsigned long *)buf = val;
+ }
+}
+
+static nokprobe_inline void
+fetch_apply_bitfield(struct fetch_insn *code, void *buf)
+{
+ switch (code->basesize) {
+ case 1:
+ *(u8 *)buf <<= code->lshift;
+ *(u8 *)buf >>= code->rshift;
+ break;
+ case 2:
+ *(u16 *)buf <<= code->lshift;
+ *(u16 *)buf >>= code->rshift;
+ break;
+ case 4:
+ *(u32 *)buf <<= code->lshift;
+ *(u32 *)buf >>= code->rshift;
+ break;
+ case 8:
+ *(u64 *)buf <<= code->lshift;
+ *(u64 *)buf >>= code->rshift;
+ break;
+ }
+}
+
+/*
+ * These functions must be defined for each callsite.
+ * Return consumed dynamic data size (>= 0), or error (< 0).
+ * If dest is NULL, don't store result and return required dynamic data size.
+ */
+static int
+process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs,
+ void *dest, void *base);
+static nokprobe_inline int fetch_store_strlen(unsigned long addr);
+static nokprobe_inline int
+fetch_store_string(unsigned long addr, void *dest, void *base);
+static nokprobe_inline int
+probe_mem_read(void *dest, void *src, size_t size);
+
+/* From the 2nd stage, routine is same */
+static nokprobe_inline int
+process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val,
+ void *dest, void *base)
+{
+ struct fetch_insn *s3 = NULL;
+ int total = 0, ret = 0, i = 0;
+ u32 loc = 0;
+ unsigned long lval = val;
+
+stage2:
+ /* 2nd stage: dereference memory if needed */
+ while (code->op == FETCH_OP_DEREF) {
+ lval = val;
+ ret = probe_mem_read(&val, (void *)val + code->offset,
+ sizeof(val));
+ if (ret)
+ return ret;
+ code++;
+ }
+
+ s3 = code;
+stage3:
+ /* 3rd stage: store value to buffer */
+ if (unlikely(!dest)) {
+ if (code->op == FETCH_OP_ST_STRING) {
+ ret += fetch_store_strlen(val + code->offset);
+ code++;
+ goto array;
+ } else
+ return -EILSEQ;
+ }
+
+ switch (code->op) {
+ case FETCH_OP_ST_RAW:
+ fetch_store_raw(val, code, dest);
+ break;
+ case FETCH_OP_ST_MEM:
+ probe_mem_read(dest, (void *)val + code->offset, code->size);
+ break;
+ case FETCH_OP_ST_STRING:
+ loc = *(u32 *)dest;
+ ret = fetch_store_string(val + code->offset, dest, base);
+ break;
+ default:
+ return -EILSEQ;
+ }
+ code++;
+
+ /* 4th stage: modify stored value if needed */
+ if (code->op == FETCH_OP_MOD_BF) {
+ fetch_apply_bitfield(code, dest);
+ code++;
+ }
+
+array:
+ /* the last stage: Loop on array */
+ if (code->op == FETCH_OP_LP_ARRAY) {
+ total += ret;
+ if (++i < code->param) {
+ code = s3;
+ if (s3->op != FETCH_OP_ST_STRING) {
+ dest += s3->size;
+ val += s3->size;
+ goto stage3;
+ }
+ code--;
+ val = lval + sizeof(char *);
+ if (dest) {
+ dest += sizeof(u32);
+ *(u32 *)dest = update_data_loc(loc, ret);
+ }
+ goto stage2;
+ }
+ code++;
+ ret = total;
+ }
+
+ return code->op == FETCH_OP_END ? ret : -EILSEQ;
+}
+
+/* Sum up total data length for dynamic arraies (strings) */
+static nokprobe_inline int
+__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
+{
+ struct probe_arg *arg;
+ int i, len, ret = 0;
+
+ for (i = 0; i < tp->nr_args; i++) {
+ arg = tp->args + i;
+ if (unlikely(arg->dynamic)) {
+ len = process_fetch_insn(arg->code, regs, NULL, NULL);
+ if (len > 0)
+ ret += len;
+ }
+ }
+
+ return ret;
+}
+
+/* Store the value of each argument */
+static nokprobe_inline void
+store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
+ int header_size, int maxlen)
+{
+ struct probe_arg *arg;
+ void *base = data - header_size;
+ void *dyndata = data + tp->size;
+ u32 *dl; /* Data location */
+ int ret, i;
+
+ for (i = 0; i < tp->nr_args; i++) {
+ arg = tp->args + i;
+ dl = data + arg->offset;
+ /* Point the dynamic data area if needed */
+ if (unlikely(arg->dynamic))
+ *dl = make_data_loc(maxlen, dyndata - base);
+ ret = process_fetch_insn(arg->code, regs, dl, base);
+ if (unlikely(ret < 0 && arg->dynamic))
+ *dl = make_data_loc(0, dyndata - base);
+ else
+ dyndata += ret;
+ }
+}
+
+static inline int
+print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
+ u8 *data, void *field)
+{
+ void *p;
+ int i, j;
+
+ for (i = 0; i < nr_args; i++) {
+ struct probe_arg *a = args + i;
+
+ trace_seq_printf(s, " %s=", a->name);
+ if (likely(!a->count)) {
+ if (!a->type->print(s, data + a->offset, field))
+ return -ENOMEM;
+ continue;
+ }
+ trace_seq_putc(s, '{');
+ p = data + a->offset;
+ for (j = 0; j < a->count; j++) {
+ if (!a->type->print(s, p, field))
+ return -ENOMEM;
+ trace_seq_putc(s, j == a->count - 1 ? '}' : ',');
+ p += a->type->size;
+ }
+ }
+ return 0;
+}
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 4237eba4ef20..2b0d1ee3241c 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -111,7 +111,7 @@ check_stack(unsigned long ip, unsigned long *stack)
stack_trace_max_size = this_size;
stack_trace_max.nr_entries = 0;
- stack_trace_max.skip = 3;
+ stack_trace_max.skip = 0;
save_stack_trace(&stack_trace_max);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index e696667da29a..31ea48eceda1 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -15,6 +15,7 @@
#include <linux/rculist.h>
#include "trace_probe.h"
+#include "trace_probe_tmpl.h"
#define UPROBE_EVENT_SYSTEM "uprobes"
@@ -47,6 +48,7 @@ struct trace_uprobe {
struct inode *inode;
char *filename;
unsigned long offset;
+ unsigned long ref_ctr_offset;
unsigned long nhit;
struct trace_probe tp;
};
@@ -98,74 +100,52 @@ static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
/*
* Uprobes-specific fetch functions
*/
-#define DEFINE_FETCH_stack(type) \
-static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
- void *offset, void *dest) \
-{ \
- *(type *)dest = (type)get_user_stack_nth(regs, \
- ((unsigned long)offset)); \
-}
-DEFINE_BASIC_FETCH_FUNCS(stack)
-/* No string on the stack entry */
-#define fetch_stack_string NULL
-#define fetch_stack_string_size NULL
-
-#define DEFINE_FETCH_memory(type) \
-static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
- void *addr, void *dest) \
-{ \
- type retval; \
- void __user *vaddr = (void __force __user *) addr; \
- \
- if (copy_from_user(&retval, vaddr, sizeof(type))) \
- *(type *)dest = 0; \
- else \
- *(type *) dest = retval; \
+static nokprobe_inline int
+probe_mem_read(void *dest, void *src, size_t size)
+{
+ void __user *vaddr = (void __force __user *)src;
+
+ return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
}
-DEFINE_BASIC_FETCH_FUNCS(memory)
/*
* Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
* length and relative data location.
*/
-static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
- void *addr, void *dest)
+static nokprobe_inline int
+fetch_store_string(unsigned long addr, void *dest, void *base)
{
long ret;
- u32 rloc = *(u32 *)dest;
- int maxlen = get_rloc_len(rloc);
- u8 *dst = get_rloc_data(dest);
+ u32 loc = *(u32 *)dest;
+ int maxlen = get_loc_len(loc);
+ u8 *dst = get_loc_data(dest, base);
void __user *src = (void __force __user *) addr;
- if (!maxlen)
- return;
+ if (unlikely(!maxlen))
+ return -ENOMEM;
ret = strncpy_from_user(dst, src, maxlen);
- if (ret == maxlen)
- dst[--ret] = '\0';
-
- if (ret < 0) { /* Failed to fetch string */
- ((u8 *)get_rloc_data(dest))[0] = '\0';
- *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
- } else {
- *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
+ if (ret >= 0) {
+ if (ret == maxlen)
+ dst[ret - 1] = '\0';
+ *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
}
+
+ return ret;
}
-static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
- void *addr, void *dest)
+/* Return the length of string -- including null terminal byte */
+static nokprobe_inline int
+fetch_store_strlen(unsigned long addr)
{
int len;
void __user *vaddr = (void __force __user *) addr;
len = strnlen_user(vaddr, MAX_STRING_SIZE);
- if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
- *(u32 *)dest = 0;
- else
- *(u32 *)dest = len;
+ return (len > MAX_STRING_SIZE) ? 0 : len;
}
-static unsigned long translate_user_vaddr(void *file_offset)
+static unsigned long translate_user_vaddr(unsigned long file_offset)
{
unsigned long base_addr;
struct uprobe_dispatch_data *udd;
@@ -173,44 +153,44 @@ static unsigned long translate_user_vaddr(void *file_offset)
udd = (void *) current->utask->vaddr;
base_addr = udd->bp_addr - udd->tu->offset;
- return base_addr + (unsigned long)file_offset;
+ return base_addr + file_offset;
}
-#define DEFINE_FETCH_file_offset(type) \
-static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
- void *offset, void *dest)\
-{ \
- void *vaddr = (void *)translate_user_vaddr(offset); \
- \
- FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
+/* Note that we don't verify it, since the code does not come from user space */
+static int
+process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
+ void *base)
+{
+ unsigned long val;
+
+ /* 1st stage: get value from context */
+ switch (code->op) {
+ case FETCH_OP_REG:
+ val = regs_get_register(regs, code->param);
+ break;
+ case FETCH_OP_STACK:
+ val = get_user_stack_nth(regs, code->param);
+ break;
+ case FETCH_OP_STACKP:
+ val = user_stack_pointer(regs);
+ break;
+ case FETCH_OP_RETVAL:
+ val = regs_return_value(regs);
+ break;
+ case FETCH_OP_IMM:
+ val = code->immediate;
+ break;
+ case FETCH_OP_FOFFS:
+ val = translate_user_vaddr(code->immediate);
+ break;
+ default:
+ return -EILSEQ;
+ }
+ code++;
+
+ return process_fetch_insn_bottom(code, val, dest, base);
}
-DEFINE_BASIC_FETCH_FUNCS(file_offset)
-DEFINE_FETCH_file_offset(string)
-DEFINE_FETCH_file_offset(string_size)
-
-/* Fetch type information table */
-static const struct fetch_type uprobes_fetch_type_table[] = {
- /* Special types */
- [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
- sizeof(u32), 1, "__data_loc char[]"),
- [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
- string_size, sizeof(u32), 0, "u32"),
- /* Basic types */
- ASSIGN_FETCH_TYPE(u8, u8, 0),
- ASSIGN_FETCH_TYPE(u16, u16, 0),
- ASSIGN_FETCH_TYPE(u32, u32, 0),
- ASSIGN_FETCH_TYPE(u64, u64, 0),
- ASSIGN_FETCH_TYPE(s8, u8, 1),
- ASSIGN_FETCH_TYPE(s16, u16, 1),
- ASSIGN_FETCH_TYPE(s32, u32, 1),
- ASSIGN_FETCH_TYPE(s64, u64, 1),
- ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
- ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
- ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
- ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
-
- ASSIGN_FETCH_TYPE_END
-};
+NOKPROBE_SYMBOL(process_fetch_insn)
static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
{
@@ -311,6 +291,35 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu)
return 0;
}
+/*
+ * Uprobe with multiple reference counter is not allowed. i.e.
+ * If inode and offset matches, reference counter offset *must*
+ * match as well. Though, there is one exception: If user is
+ * replacing old trace_uprobe with new one(same group/event),
+ * then we allow same uprobe with new reference counter as far
+ * as the new one does not conflict with any other existing
+ * ones.
+ */
+static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
+{
+ struct trace_uprobe *tmp, *old = NULL;
+ struct inode *new_inode = d_real_inode(new->path.dentry);
+
+ old = find_probe_event(trace_event_name(&new->tp.call),
+ new->tp.call.class->system);
+
+ list_for_each_entry(tmp, &uprobe_list, list) {
+ if ((old ? old != tmp : true) &&
+ new_inode == d_real_inode(tmp->path.dentry) &&
+ new->offset == tmp->offset &&
+ new->ref_ctr_offset != tmp->ref_ctr_offset) {
+ pr_warn("Reference counter offset mismatch.");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ return old;
+}
+
/* Register a trace_uprobe and probe_event */
static int register_trace_uprobe(struct trace_uprobe *tu)
{
@@ -320,8 +329,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
mutex_lock(&uprobe_lock);
/* register as an event */
- old_tu = find_probe_event(trace_event_name(&tu->tp.call),
- tu->tp.call.class->system);
+ old_tu = find_old_trace_uprobe(tu);
+ if (IS_ERR(old_tu)) {
+ ret = PTR_ERR(old_tu);
+ goto end;
+ }
+
if (old_tu) {
/* delete old event */
ret = unregister_trace_uprobe(old_tu);
@@ -352,10 +365,10 @@ end:
static int create_trace_uprobe(int argc, char **argv)
{
struct trace_uprobe *tu;
- char *arg, *event, *group, *filename;
+ char *arg, *event, *group, *filename, *rctr, *rctr_end;
char buf[MAX_EVENT_NAME_LEN];
struct path path;
- unsigned long offset;
+ unsigned long offset, ref_ctr_offset;
bool is_delete, is_return;
int i, ret;
@@ -364,6 +377,7 @@ static int create_trace_uprobe(int argc, char **argv)
is_return = false;
event = NULL;
group = NULL;
+ ref_ctr_offset = 0;
/* argc must be >= 1 */
if (argv[0][0] == '-')
@@ -438,6 +452,26 @@ static int create_trace_uprobe(int argc, char **argv)
goto fail_address_parse;
}
+ /* Parse reference counter offset if specified. */
+ rctr = strchr(arg, '(');
+ if (rctr) {
+ rctr_end = strchr(rctr, ')');
+ if (rctr > rctr_end || *(rctr_end + 1) != 0) {
+ ret = -EINVAL;
+ pr_info("Invalid reference counter offset.\n");
+ goto fail_address_parse;
+ }
+
+ *rctr++ = '\0';
+ *rctr_end = '\0';
+ ret = kstrtoul(rctr, 0, &ref_ctr_offset);
+ if (ret) {
+ pr_info("Invalid reference counter offset.\n");
+ goto fail_address_parse;
+ }
+ }
+
+ /* Parse uprobe offset. */
ret = kstrtoul(arg, 0, &offset);
if (ret)
goto fail_address_parse;
@@ -472,6 +506,7 @@ static int create_trace_uprobe(int argc, char **argv)
goto fail_address_parse;
}
tu->offset = offset;
+ tu->ref_ctr_offset = ref_ctr_offset;
tu->path = path;
tu->filename = kstrdup(filename, GFP_KERNEL);
@@ -522,8 +557,7 @@ static int create_trace_uprobe(int argc, char **argv)
/* Parse fetch argument */
ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
- is_return, false,
- uprobes_fetch_type_table);
+ is_return ? TPARG_FL_RETURN : 0);
if (ret) {
pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
goto error;
@@ -590,6 +624,9 @@ static int probes_seq_show(struct seq_file *m, void *v)
trace_event_name(&tu->tp.call), tu->filename,
(int)(sizeof(void *) * 2), tu->offset);
+ if (tu->ref_ctr_offset)
+ seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
+
for (i = 0; i < tu->tp.nr_args; i++)
seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
@@ -833,7 +870,6 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
struct trace_seq *s = &iter->seq;
struct trace_uprobe *tu;
u8 *data;
- int i;
entry = (struct uprobe_trace_entry_head *)iter->ent;
tu = container_of(event, struct trace_uprobe, tp.call.event);
@@ -850,12 +886,8 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
data = DATAOF_TRACE_ENTRY(entry, false);
}
- for (i = 0; i < tu->tp.nr_args; i++) {
- struct probe_arg *parg = &tu->tp.args[i];
-
- if (!parg->type->print(s, parg->name, data + parg->offset, entry))
- goto out;
- }
+ if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
+ goto out;
trace_seq_putc(s, '\n');
@@ -905,7 +937,13 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
tu->consumer.filter = filter;
tu->inode = d_real_inode(tu->path.dentry);
- ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
+ if (tu->ref_ctr_offset) {
+ ret = uprobe_register_refctr(tu->inode, tu->offset,
+ tu->ref_ctr_offset, &tu->consumer);
+ } else {
+ ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
+ }
+
if (ret)
goto err_buffer;
@@ -958,7 +996,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
static int uprobe_event_define_fields(struct trace_event_call *event_call)
{
- int ret, i, size;
+ int ret, size;
struct uprobe_trace_entry_head field;
struct trace_uprobe *tu = event_call->data;
@@ -970,19 +1008,8 @@ static int uprobe_event_define_fields(struct trace_event_call *event_call)
DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
size = SIZEOF_TRACE_ENTRY(false);
}
- /* Set argument names as fields */
- for (i = 0; i < tu->tp.nr_args; i++) {
- struct probe_arg *parg = &tu->tp.args[i];
-
- ret = trace_define_field(event_call, parg->type->fmttype,
- parg->name, size + parg->offset,
- parg->type->size, parg->type->is_signed,
- FILTER_OTHER);
- if (ret)
- return ret;
- }
- return 0;
+ return traceprobe_define_arg_fields(event_call, size, &tu->tp);
}
#ifdef CONFIG_PERF_EVENTS
@@ -1233,7 +1260,7 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
ucb = uprobe_buffer_get();
- store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
+ store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
if (tu->tp.flags & TP_FLAG_TRACE)
ret |= uprobe_trace_func(tu, regs, ucb, dsize);
@@ -1268,7 +1295,7 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
ucb = uprobe_buffer_get();
- store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
+ store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
if (tu->tp.flags & TP_FLAG_TRACE)
uretprobe_trace_func(tu, func, regs, ucb, dsize);
@@ -1304,7 +1331,7 @@ static int register_uprobe_event(struct trace_uprobe *tu)
init_trace_event_call(tu, call);
- if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
+ if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
return -ENOMEM;
ret = register_trace_event(&call->event);
@@ -1340,7 +1367,8 @@ static int unregister_uprobe_event(struct trace_uprobe *tu)
#ifdef CONFIG_PERF_EVENTS
struct trace_event_call *
-create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
+create_local_trace_uprobe(char *name, unsigned long offs,
+ unsigned long ref_ctr_offset, bool is_return)
{
struct trace_uprobe *tu;
struct path path;
@@ -1372,10 +1400,11 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
tu->offset = offs;
tu->path = path;
+ tu->ref_ctr_offset = ref_ctr_offset;
tu->filename = kstrdup(name, GFP_KERNEL);
init_trace_event_call(tu, &tu->tp.call);
- if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
+ if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
ret = -ENOMEM;
goto error;
}
diff --git a/lib/Kconfig b/lib/Kconfig
index d1573a16aa92..a9965f4af4dd 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -624,6 +624,3 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
-
-config GENERIC_LIB_UMODDI3
- bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e0ba05e6f6bd..1af29b8224fd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1292,7 +1292,7 @@ config DEBUG_KOBJECT
depends on DEBUG_KERNEL
help
If you say Y here, some extra kobject debugging messages will be sent
- to the syslog.
+ to the syslog.
config DEBUG_KOBJECT_RELEASE
bool "kobject release debugging"
@@ -1980,7 +1980,6 @@ endif # RUNTIME_TESTING_MENU
config MEMTEST
bool "Memtest"
- depends on HAVE_MEMBLOCK
---help---
This option adds a kernel parameter 'memtest', which allows memtest
to be set.
diff --git a/lib/Makefile b/lib/Makefile
index 988949c4fd3a..db06d1237898 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -274,4 +274,3 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
-obj-$(CONFIG_GENERIC_LIB_UMODDI3) += umoddi3.o udivmoddi4.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 2fd07f6df0b8..eead55aa7170 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -36,11 +37,6 @@
* carefully filter out these unused bits from impacting their
* results.
*
- * These operations actually hold to a slightly stronger rule:
- * if you don't input any bitmaps to these ops that have some
- * unused bits set, then they won't output any set unused bits
- * in output bitmaps.
- *
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
@@ -466,20 +462,18 @@ EXPORT_SYMBOL(bitmap_parse_user);
* ranges if list is specified or hex digits grouped into comma-separated
* sets of 8 digits/set. Returns the number of characters written to buf.
*
- * It is assumed that @buf is a pointer into a PAGE_SIZE area and that
- * sufficient storage remains at @buf to accommodate the
- * bitmap_print_to_pagebuf() output.
+ * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
+ * area and that sufficient storage remains at @buf to accommodate the
+ * bitmap_print_to_pagebuf() output. Returns the number of characters
+ * actually printed to @buf, excluding terminating '\0'.
*/
int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
int nmaskbits)
{
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
- int n = 0;
+ ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
- if (len > 1)
- n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
- scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
- return n;
+ return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
+ scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
}
EXPORT_SYMBOL(bitmap_print_to_pagebuf);
diff --git a/lib/cpumask.c b/lib/cpumask.c
index beca6244671a..8d666ab84b5c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -4,7 +4,7 @@
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -163,7 +163,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
- *mask = memblock_virt_alloc(cpumask_size(), 0);
+ *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
}
/**
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8be175df3075..7ebccb5c1637 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -83,6 +83,7 @@
const struct kvec *kvec; \
struct kvec v; \
iterate_kvec(i, n, v, kvec, skip, (K)) \
+ } else if (unlikely(i->type & ITER_DISCARD)) { \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -114,6 +115,8 @@
} \
i->nr_segs -= kvec - i->kvec; \
i->kvec = kvec; \
+ } else if (unlikely(i->type & ITER_DISCARD)) { \
+ skip += n; \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -428,17 +431,19 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
}
EXPORT_SYMBOL(iov_iter_fault_in_readable);
-void iov_iter_init(struct iov_iter *i, int direction,
+void iov_iter_init(struct iov_iter *i, unsigned int direction,
const struct iovec *iov, unsigned long nr_segs,
size_t count)
{
+ WARN_ON(direction & ~(READ | WRITE));
+ direction &= READ | WRITE;
+
/* It will get better. Eventually... */
if (uaccess_kernel()) {
- direction |= ITER_KVEC;
- i->type = direction;
+ i->type = ITER_KVEC | direction;
i->kvec = (struct kvec *)iov;
} else {
- i->type = direction;
+ i->type = ITER_IOVEC | direction;
i->iov = iov;
}
i->nr_segs = nr_segs;
@@ -558,7 +563,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
@@ -658,7 +663,7 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
const char *from = addr;
unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter_mcsafe(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
@@ -692,7 +697,7 @@ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
@@ -712,7 +717,7 @@ EXPORT_SYMBOL(_copy_from_iter);
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return false;
}
@@ -739,7 +744,7 @@ EXPORT_SYMBOL(_copy_from_iter_full);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
@@ -773,7 +778,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return 0;
}
@@ -794,7 +799,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1);
return false;
}
@@ -836,7 +841,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
- } else if (likely(!(i->type & ITER_PIPE)))
+ } else if (unlikely(iov_iter_is_discard(i)))
+ return bytes;
+ else if (likely(!iov_iter_is_pipe(i)))
return copy_page_to_iter_iovec(page, offset, bytes, i);
else
return copy_page_to_iter_pipe(page, offset, bytes, i);
@@ -848,7 +855,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
{
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return 0;
}
@@ -888,7 +895,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v,
clear_user(v.iov_base, v.iov_len),
@@ -908,7 +915,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
kunmap_atomic(kaddr);
return 0;
}
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
kunmap_atomic(kaddr);
WARN_ON(1);
return 0;
@@ -972,10 +979,14 @@ static void pipe_advance(struct iov_iter *i, size_t size)
void iov_iter_advance(struct iov_iter *i, size_t size)
{
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
pipe_advance(i, size);
return;
}
+ if (unlikely(iov_iter_is_discard(i))) {
+ i->count -= size;
+ return;
+ }
iterate_and_advance(i, size, v, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -987,7 +998,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
if (WARN_ON(unroll > MAX_RW_COUNT))
return;
i->count += unroll;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe;
int idx = i->idx;
size_t off = i->iov_offset;
@@ -1011,12 +1022,14 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
pipe_truncate(i);
return;
}
+ if (unlikely(iov_iter_is_discard(i)))
+ return;
if (unroll <= i->iov_offset) {
i->iov_offset -= unroll;
return;
}
unroll -= i->iov_offset;
- if (i->type & ITER_BVEC) {
+ if (iov_iter_is_bvec(i)) {
const struct bio_vec *bvec = i->bvec;
while (1) {
size_t n = (--bvec)->bv_len;
@@ -1049,23 +1062,25 @@ EXPORT_SYMBOL(iov_iter_revert);
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return i->count; // it is a silly place, anyway
if (i->nr_segs == 1)
return i->count;
- else if (i->type & ITER_BVEC)
+ if (unlikely(iov_iter_is_discard(i)))
+ return i->count;
+ else if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset);
else
return min(i->count, i->iov->iov_len - i->iov_offset);
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
-void iov_iter_kvec(struct iov_iter *i, int direction,
+void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
const struct kvec *kvec, unsigned long nr_segs,
size_t count)
{
- BUG_ON(!(direction & ITER_KVEC));
- i->type = direction;
+ WARN_ON(direction & ~(READ | WRITE));
+ i->type = ITER_KVEC | (direction & (READ | WRITE));
i->kvec = kvec;
i->nr_segs = nr_segs;
i->iov_offset = 0;
@@ -1073,12 +1088,12 @@ void iov_iter_kvec(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_kvec);
-void iov_iter_bvec(struct iov_iter *i, int direction,
+void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
const struct bio_vec *bvec, unsigned long nr_segs,
size_t count)
{
- BUG_ON(!(direction & ITER_BVEC));
- i->type = direction;
+ WARN_ON(direction & ~(READ | WRITE));
+ i->type = ITER_BVEC | (direction & (READ | WRITE));
i->bvec = bvec;
i->nr_segs = nr_segs;
i->iov_offset = 0;
@@ -1086,13 +1101,13 @@ void iov_iter_bvec(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_bvec);
-void iov_iter_pipe(struct iov_iter *i, int direction,
+void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
struct pipe_inode_info *pipe,
size_t count)
{
- BUG_ON(direction != ITER_PIPE);
+ BUG_ON(direction != READ);
WARN_ON(pipe->nrbufs == pipe->buffers);
- i->type = direction;
+ i->type = ITER_PIPE | READ;
i->pipe = pipe;
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
i->iov_offset = 0;
@@ -1101,12 +1116,30 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
}
EXPORT_SYMBOL(iov_iter_pipe);
+/**
+ * iov_iter_discard - Initialise an I/O iterator that discards data
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator that just discards everything that's written to it.
+ * It's only available as a READ iterator.
+ */
+void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
+{
+ BUG_ON(direction != READ);
+ i->type = ITER_DISCARD | READ;
+ i->count = count;
+ i->iov_offset = 0;
+}
+EXPORT_SYMBOL(iov_iter_discard);
+
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
size_t size = i->count;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
return size | i->iov_offset;
return size;
@@ -1125,7 +1158,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
unsigned long res = 0;
size_t size = i->count;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return ~0U;
}
@@ -1193,8 +1226,11 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (maxsize > i->count)
maxsize = i->count;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
+ if (unlikely(iov_iter_is_discard(i)))
+ return -EFAULT;
+
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1205,7 +1241,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
len = maxpages * PAGE_SIZE;
addr &= ~(PAGE_SIZE - 1);
n = DIV_ROUND_UP(len, PAGE_SIZE);
- res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
+ res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
if (unlikely(res < 0))
return res;
return (res == n ? len : res * PAGE_SIZE) - *start;
@@ -1270,8 +1306,11 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
if (maxsize > i->count)
maxsize = i->count;
- if (unlikely(i->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages_alloc(i, pages, maxsize, start);
+ if (unlikely(iov_iter_is_discard(i)))
+ return -EFAULT;
+
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1283,7 +1322,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
p = get_pages_array(n);
if (!p)
return -ENOMEM;
- res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
+ res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
if (unlikely(res < 0)) {
kvfree(p);
return res;
@@ -1313,7 +1352,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return 0;
}
@@ -1355,7 +1394,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1);
return false;
}
@@ -1400,7 +1439,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
__wsum sum, next;
size_t off = 0;
sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
}
@@ -1442,8 +1481,10 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
if (!size)
return 0;
+ if (unlikely(iov_iter_is_discard(i)))
+ return 0;
- if (unlikely(i->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe;
size_t off;
int idx;
@@ -1481,11 +1522,13 @@ EXPORT_SYMBOL(iov_iter_npages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{
*new = *old;
- if (unlikely(new->type & ITER_PIPE)) {
+ if (unlikely(iov_iter_is_pipe(new))) {
WARN_ON(1);
return NULL;
}
- if (new->type & ITER_BVEC)
+ if (unlikely(iov_iter_is_discard(new)))
+ return NULL;
+ if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
flags);
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 661a1e807bd1..1006bf70bf74 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -175,7 +175,7 @@ int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(unsigned long)tmp)
+ if (tmp != (unsigned long)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -191,7 +191,7 @@ int _kstrtol(const char *s, unsigned int base, long *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(long)tmp)
+ if (tmp != (long)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -222,7 +222,7 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(unsigned int)tmp)
+ if (tmp != (unsigned int)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -253,7 +253,7 @@ int kstrtoint(const char *s, unsigned int base, int *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(int)tmp)
+ if (tmp != (int)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -268,7 +268,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(u16)tmp)
+ if (tmp != (u16)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -283,7 +283,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(s16)tmp)
+ if (tmp != (s16)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -298,7 +298,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res)
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (unsigned long long)(u8)tmp)
+ if (tmp != (u8)tmp)
return -ERANGE;
*res = tmp;
return 0;
@@ -313,7 +313,7 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
- if (tmp != (long long)(s8)tmp)
+ if (tmp != (s8)tmp)
return -ERANGE;
*res = tmp;
return 0;
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 141734d255e4..0c9d3ad17e0f 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -43,30 +43,36 @@
/*-*****************************
* Decompression functions
*******************************/
-/* LZ4_decompress_generic() :
- * This generic decompression function cover all use cases.
- * It shall be instantiated several times, using different sets of directives
- * Note that it is important this generic function is really inlined,
+
+#define DEBUGLOG(l, ...) {} /* disabled */
+
+#ifndef assert
+#define assert(condition) ((void)0)
+#endif
+
+/*
+ * LZ4_decompress_generic() :
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
* in order to remove useless branches during compilation optimization.
*/
static FORCE_INLINE int LZ4_decompress_generic(
- const char * const source,
- char * const dest,
- int inputSize,
+ const char * const src,
+ char * const dst,
+ int srcSize,
/*
* If endOnInput == endOnInputSize,
- * this value is the max size of Output Buffer.
+ * this value is `dstCapacity`
*/
int outputSize,
/* endOnOutputSize, endOnInputSize */
- int endOnInput,
+ endCondition_directive endOnInput,
/* full, partial */
- int partialDecoding,
- /* only used if partialDecoding == partial */
- int targetOutputSize,
+ earlyEnd_directive partialDecoding,
/* noDict, withPrefix64k, usingExtDict */
- int dict,
- /* == dest when no prefix */
+ dict_directive dict,
+ /* always <= dst, == dst when no prefix */
const BYTE * const lowPrefix,
/* only if dict == usingExtDict */
const BYTE * const dictStart,
@@ -74,35 +80,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
const size_t dictSize
)
{
- /* Local Variables */
- const BYTE *ip = (const BYTE *) source;
- const BYTE * const iend = ip + inputSize;
+ const BYTE *ip = (const BYTE *) src;
+ const BYTE * const iend = ip + srcSize;
- BYTE *op = (BYTE *) dest;
+ BYTE *op = (BYTE *) dst;
BYTE * const oend = op + outputSize;
BYTE *cpy;
- BYTE *oexit = op + targetOutputSize;
- const BYTE * const lowLimit = lowPrefix - dictSize;
const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
- static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
- static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+ static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
const int safeDecode = (endOnInput == endOnInputSize);
const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE *const shortiend = iend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
+ const BYTE *const shortoend = oend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+
+ DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
+ srcSize, outputSize);
+
/* Special cases */
- /* targetOutputSize too high => decode everything */
- if ((partialDecoding) && (oexit > oend - MFLIMIT))
- oexit = oend - MFLIMIT;
+ assert(lowPrefix <= op);
+ assert(src != NULL);
/* Empty output buffer */
if ((endOnInput) && (unlikely(outputSize == 0)))
- return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;
+ return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
if ((!endOnInput) && (unlikely(outputSize == 0)))
return (*ip == 0 ? 1 : -1);
+ if ((endOnInput) && unlikely(srcSize == 0))
+ return -1;
+
/* Main Loop : decode sequences */
while (1) {
size_t length;
@@ -111,12 +125,74 @@ static FORCE_INLINE int LZ4_decompress_generic(
/* get literal length */
unsigned int const token = *ip++;
-
length = token>>ML_BITS;
+ /* ip < iend before the increment */
+ assert(!endOnInput || ip <= iend);
+
+ /*
+ * A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough
+ * space, enter the shortcut and copy 16 bytes on behalf
+ * of the literals (in the fast mode, only 8 bytes can be
+ * safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes
+ * in a similar manner; but we ensure that there's enough
+ * space in the output for those 18 bytes earlier, upon
+ * entering the shortcut (in other words, there is a
+ * combined check for both stages).
+ */
+ if ((endOnInput ? length != RUN_MASK : length <= 8)
+ /*
+ * strictly "less than" on input, to re-enter
+ * the loop with at least one byte
+ */
+ && likely((endOnInput ? ip < shortiend : 1) &
+ (op <= shortoend))) {
+ /* Copy the literals */
+ memcpy(op, ip, endOnInput ? 16 : 8);
+ op += length; ip += length;
+
+ /*
+ * The second stage:
+ * prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted.
+ */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ((length != ML_MASK) &&
+ (offset >= 8) &&
+ (dict == withPrefix64k || match >= lowPrefix)) {
+ /* Copy the match. */
+ memcpy(op + 0, match + 0, 8);
+ memcpy(op + 8, match + 8, 8);
+ memcpy(op + 16, match + 16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /*
+ * The second stage didn't work out, but the info
+ * is ready. Propel it right to the point of match
+ * copying.
+ */
+ goto _copy_match;
+ }
+
+ /* decode literal length */
if (length == RUN_MASK) {
unsigned int s;
+ if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
+ /* overflow detection */
+ goto _output_error;
+ }
do {
s = *ip++;
length += s;
@@ -125,14 +201,14 @@ static FORCE_INLINE int LZ4_decompress_generic(
: 1) & (s == 255));
if ((safeDecode)
- && unlikely(
- (size_t)(op + length) < (size_t)(op))) {
+ && unlikely((uptrval)(op) +
+ length < (uptrval)(op))) {
/* overflow detection */
goto _output_error;
}
if ((safeDecode)
- && unlikely(
- (size_t)(ip + length) < (size_t)(ip))) {
+ && unlikely((uptrval)(ip) +
+ length < (uptrval)(ip))) {
/* overflow detection */
goto _output_error;
}
@@ -140,16 +216,19 @@ static FORCE_INLINE int LZ4_decompress_generic(
/* copy literals */
cpy = op + length;
- if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT))
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+
+ if (((endOnInput) && ((cpy > oend - MFLIMIT)
|| (ip + length > iend - (2 + 1 + LASTLITERALS))))
|| ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
if (partialDecoding) {
if (cpy > oend) {
/*
- * Error :
- * write attempt beyond end of output buffer
+ * Partial decoding :
+ * stop in the middle of literal segment
*/
- goto _output_error;
+ cpy = oend;
+ length = oend - op;
}
if ((endOnInput)
&& (ip + length > iend)) {
@@ -184,29 +263,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
memcpy(op, ip, length);
ip += length;
op += length;
+
/* Necessarily EOF, due to parsing restrictions */
- break;
+ if (!partialDecoding || (cpy == oend))
+ break;
+ } else {
+ /* may overwrite up to WILDCOPYLENGTH beyond cpy */
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length;
+ op = cpy;
}
- LZ4_wildCopy(op, ip, cpy);
- ip += length;
- op = cpy;
-
/* get offset */
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
- if ((checkOffset) && (unlikely(match < lowLimit))) {
+ /* get matchlength */
+ length = token & ML_MASK;
+
+_copy_match:
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
/* Error : offset outside buffers */
goto _output_error;
}
/* costs ~1%; silence an msan warning when offset == 0 */
- LZ4_write32(op, (U32)offset);
+ /*
+ * note : when partialDecoding, there is no guarantee that
+ * at least 4 bytes remain available in output buffer
+ */
+ if (!partialDecoding) {
+ assert(oend > op);
+ assert(oend - op >= 4);
+
+ LZ4_write32(op, (U32)offset);
+ }
- /* get matchlength */
- length = token & ML_MASK;
if (length == ML_MASK) {
unsigned int s;
@@ -221,7 +314,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
if ((safeDecode)
&& unlikely(
- (size_t)(op + length) < (size_t)op)) {
+ (uptrval)(op) + length < (uptrval)op)) {
/* overflow detection */
goto _output_error;
}
@@ -229,24 +322,26 @@ static FORCE_INLINE int LZ4_decompress_generic(
length += MINMATCH;
- /* check external dictionary */
+ /* match starting within external dictionary */
if ((dict == usingExtDict) && (match < lowPrefix)) {
if (unlikely(op + length > oend - LASTLITERALS)) {
/* doesn't respect parsing restriction */
- goto _output_error;
+ if (!partialDecoding)
+ goto _output_error;
+ length = min(length, (size_t)(oend - op));
}
if (length <= (size_t)(lowPrefix - match)) {
/*
- * match can be copied as a single segment
- * from external dictionary
+ * match fits entirely within external
+ * dictionary : just copy
*/
memmove(op, dictEnd - (lowPrefix - match),
length);
op += length;
} else {
/*
- * match encompass external
+ * match stretches into both external
* dictionary and current block
*/
size_t const copySize = (size_t)(lowPrefix - match);
@@ -254,7 +349,6 @@ static FORCE_INLINE int LZ4_decompress_generic(
memcpy(op, dictEnd - copySize, copySize);
op += copySize;
-
if (restSize > (size_t)(op - lowPrefix)) {
/* overlap copy */
BYTE * const endOfMatch = op + restSize;
@@ -267,23 +361,44 @@ static FORCE_INLINE int LZ4_decompress_generic(
op += restSize;
}
}
-
continue;
}
/* copy match within block */
cpy = op + length;
- if (unlikely(offset < 8)) {
- const int dec64 = dec64table[offset];
+ /*
+ * partialDecoding :
+ * may not respect endBlock parsing restrictions
+ */
+ assert(op <= oend);
+ if (partialDecoding &&
+ (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen = min(length, (size_t)(oend - op));
+ const BYTE * const matchEnd = match + mlen;
+ BYTE * const copyEnd = op + mlen;
+
+ if (matchEnd > op) {
+ /* overlap copy */
+ while (op < copyEnd)
+ *op++ = *match++;
+ } else {
+ memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend)
+ break;
+ continue;
+ }
+ if (unlikely(offset < 8)) {
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
- match += dec32table[offset];
+ match += inc32table[offset];
memcpy(op + 4, match, 4);
- match -= dec64;
+ match -= dec64table[offset];
} else {
LZ4_copy8(op, match);
match += 8;
@@ -291,7 +406,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
op += 8;
- if (unlikely(cpy > oend - 12)) {
+ if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
if (cpy > oend - LASTLITERALS) {
@@ -307,60 +422,139 @@ static FORCE_INLINE int LZ4_decompress_generic(
match += oCopyLimit - op;
op = oCopyLimit;
}
-
while (op < cpy)
*op++ = *match++;
} else {
LZ4_copy8(op, match);
-
if (length > 16)
LZ4_wildCopy(op + 8, match + 8, cpy);
}
-
- op = cpy; /* correction */
+ op = cpy; /* wildcopy correction */
}
/* end of decoding */
if (endOnInput) {
/* Nb of output bytes decoded */
- return (int) (((char *)op) - dest);
+ return (int) (((char *)op) - dst);
} else {
/* Nb of input bytes read */
- return (int) (((const char *)ip) - source);
+ return (int) (((const char *)ip) - src);
}
/* Overflow error detected */
_output_error:
- return -1;
+ return (int) (-(((const char *)ip) - src)) - 1;
}
int LZ4_decompress_safe(const char *source, char *dest,
int compressedSize, int maxDecompressedSize)
{
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxDecompressedSize, endOnInputSize, full, 0,
- noDict, (BYTE *)dest, NULL, 0);
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxDecompressedSize,
+ endOnInputSize, decode_full_block,
+ noDict, (BYTE *)dest, NULL, 0);
}
-int LZ4_decompress_safe_partial(const char *source, char *dest,
- int compressedSize, int targetOutputSize, int maxDecompressedSize)
+int LZ4_decompress_safe_partial(const char *src, char *dst,
+ int compressedSize, int targetOutputSize, int dstCapacity)
{
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxDecompressedSize, endOnInputSize, partial,
- targetOutputSize, noDict, (BYTE *)dest, NULL, 0);
+ dstCapacity = min(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ endOnInputSize, partial_decode,
+ noDict, (BYTE *)dst, NULL, 0);
}
int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
{
return LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0, withPrefix64k,
- (BYTE *)(dest - 64 * KB), NULL, 64 * KB);
+ endOnOutputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+/* ===== Instantiate a few more decoding cases, used more than once. ===== */
+
+int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+ int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
+ int compressedSize,
+ int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ noDict,
+ (BYTE *)dest - prefixSize, NULL, 0);
+}
+
+int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
}
+static int LZ4_decompress_fast_extDict(const char *source, char *dest,
+ int originalSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/*
+ * The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+static FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+static FORCE_INLINE
+int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
+ int originalSize, size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/* ===== streaming decompression functions ===== */
+
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize)
{
- LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode;
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
lz4sd->prefixSize = (size_t) dictSize;
lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
@@ -382,35 +576,51 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int compressedSize, int maxOutputSize)
{
- LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE *)dest) {
- result = LZ4_decompress_generic(source, dest,
- compressedSize,
- maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize,
- lz4sd->externalDict,
- lz4sd->extDictSize);
-
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 * KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source,
+ dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest,
+ compressedSize, maxOutputSize,
+ lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
-
lz4sd->prefixSize += result;
- lz4sd->prefixEnd += result;
+ lz4sd->prefixEnd += result;
} else {
+ /*
+ * The buffer wraps around, or they're
+ * switching to another buffer.
+ */
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest,
+ result = LZ4_decompress_safe_forceExtDict(source, dest,
compressedSize, maxOutputSize,
- endOnInputSize, full, 0,
- usingExtDict, (BYTE *)dest,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = result;
- lz4sd->prefixEnd = (BYTE *)dest + result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
}
return result;
@@ -422,75 +632,66 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
- if (lz4sd->prefixEnd == (BYTE *)dest) {
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict,
- lz4sd->prefixEnd - lz4sd->prefixSize,
- lz4sd->externalDict, lz4sd->extDictSize);
-
+ if (lz4sd->prefixSize == 0) {
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ if (lz4sd->prefixSize >= 64 * KB - 1 ||
+ lz4sd->extDictSize == 0)
+ result = LZ4_decompress_fast(source, dest,
+ originalSize);
+ else
+ result = LZ4_decompress_fast_doubleDict(source, dest,
+ originalSize, lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
-
lz4sd->prefixSize += originalSize;
- lz4sd->prefixEnd += originalSize;
+ lz4sd->prefixEnd += originalSize;
} else {
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, full, 0,
- usingExtDict, (BYTE *)dest,
- lz4sd->externalDict, lz4sd->extDictSize);
+ result = LZ4_decompress_fast_extDict(source, dest,
+ originalSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = originalSize;
- lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
}
-
return result;
}
-/*
- * Advanced decoding functions :
- * *_usingDict() :
- * These decoding functions work the same as "_continue" ones,
- * the dictionary must be explicitly provided within parameters
- */
-static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
- char *dest, int compressedSize, int maxOutputSize, int safe,
- const char *dictStart, int dictSize)
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const char *dictStart, int dictSize)
{
if (dictSize == 0)
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize, safe, full, 0,
- noDict, (BYTE *)dest, NULL, 0);
- if (dictStart + dictSize == dest) {
- if (dictSize >= (int)(64 * KB - 1))
- return LZ4_decompress_generic(source, dest,
- compressedSize, maxOutputSize, safe, full, 0,
- withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0);
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxOutputSize, safe, full, 0, noDict,
- (BYTE *)dest - dictSize, NULL, 0);
+ return LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (dictStart+dictSize == dest) {
+ if (dictSize >= 64 * KB - 1)
+ return LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest,
+ compressedSize, maxOutputSize, dictSize);
}
- return LZ4_decompress_generic(source, dest, compressedSize,
- maxOutputSize, safe, full, 0, usingExtDict,
- (BYTE *)dest, (const BYTE *)dictStart, dictSize);
-}
-
-int LZ4_decompress_safe_usingDict(const char *source, char *dest,
- int compressedSize, int maxOutputSize,
- const char *dictStart, int dictSize)
-{
- return LZ4_decompress_usingDict_generic(source, dest,
- compressedSize, maxOutputSize, 1, dictStart, dictSize);
+ return LZ4_decompress_safe_forceExtDict(source, dest,
+ compressedSize, maxOutputSize, dictStart, dictSize);
}
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
- int originalSize, const char *dictStart, int dictSize)
+ int originalSize,
+ const char *dictStart, int dictSize)
{
- return LZ4_decompress_usingDict_generic(source, dest, 0,
- originalSize, 0, dictStart, dictSize);
+ if (dictSize == 0 || dictStart + dictSize == dest)
+ return LZ4_decompress_fast(source, dest, originalSize);
+
+ return LZ4_decompress_fast_extDict(source, dest, originalSize,
+ dictStart, dictSize);
}
#ifndef STATIC
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 00a0b58a0871..1a7fa9d9170f 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -75,6 +75,11 @@ typedef uintptr_t uptrval;
#define WILDCOPYLENGTH 8
#define LASTLITERALS 5
#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
+/*
+ * ensure it's possible to write 2 x wildcopyLength
+ * without overflowing output buffer
+ */
+#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
/* Increase this value ==> compression run slower on incompressible data */
#define LZ4_SKIPTRIGGER 6
@@ -222,6 +227,8 @@ typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
-typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
#endif
diff --git a/lib/parser.c b/lib/parser.c
index 3278958b472a..dd70e5e6c9e2 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -131,13 +131,10 @@ static int match_number(substring_t *s, int *result, int base)
char *buf;
int ret;
long val;
- size_t len = s->to - s->from;
- buf = kmalloc(len + 1, GFP_KERNEL);
+ buf = match_strdup(s);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, len);
- buf[len] = '\0';
ret = 0;
val = simple_strtol(buf, &endp, base);
@@ -166,13 +163,10 @@ static int match_u64int(substring_t *s, u64 *result, int base)
char *buf;
int ret;
u64 val;
- size_t len = s->to - s->from;
- buf = kmalloc(len + 1, GFP_KERNEL);
+ buf = match_strdup(s);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, len);
- buf[len] = '\0';
ret = kstrtoull(buf, base, &val);
if (!ret)
@@ -327,10 +321,6 @@ EXPORT_SYMBOL(match_strlcpy);
*/
char *match_strdup(const substring_t *s)
{
- size_t sz = s->to - s->from + 1;
- char *p = kmalloc(sz, GFP_KERNEL);
- if (p)
- match_strlcpy(p, s, sz);
- return p;
+ return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL);
}
EXPORT_SYMBOL(match_strdup);
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index 6dd30615a201..d1c1e6388eaa 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -148,10 +148,9 @@ static __init int sg_pool_init(void)
cleanup_sdb:
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct sg_pool *sgp = sg_pools + i;
- if (sgp->pool)
- mempool_destroy(sgp->pool);
- if (sgp->slab)
- kmem_cache_destroy(sgp->slab);
+
+ mempool_destroy(sgp->pool);
+ kmem_cache_destroy(sgp->slab);
}
return -ENOMEM;
diff --git a/lib/udivmoddi4.c b/lib/udivmoddi4.c
deleted file mode 100644
index c08bc8a5f1cf..000000000000
--- a/lib/udivmoddi4.c
+++ /dev/null
@@ -1,310 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.
- */
-
-#include <linux/libgcc.h>
-
-#define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz(X))
-
-#define W_TYPE_SIZE 32
-
-#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2))
-#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
-#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
-
-/* If we still don't have umul_ppmm, define it using plain C. */
-#if !defined(umul_ppmm)
-#define umul_ppmm(w1, w0, u, v) \
- do { \
- unsigned long __x0, __x1, __x2, __x3; \
- unsigned short __ul, __vl, __uh, __vh; \
- \
- __ul = __ll_lowpart(u); \
- __uh = __ll_highpart(u); \
- __vl = __ll_lowpart(v); \
- __vh = __ll_highpart(v); \
- \
- __x0 = (unsigned long) __ul * __vl; \
- __x1 = (unsigned long) __ul * __vh; \
- __x2 = (unsigned long) __uh * __vl; \
- __x3 = (unsigned long) __uh * __vh; \
- \
- __x1 += __ll_highpart(__x0); \
- __x1 += __x2; \
- if (__x1 < __x2) \
- __x3 += __ll_B; \
- \
- (w1) = __x3 + __ll_highpart(__x1); \
- (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
- } while (0)
-#endif
-
-#if !defined(sub_ddmmss)
-#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
- do { \
- unsigned long __x; \
- __x = (al) - (bl); \
- (sh) = (ah) - (bh) - (__x > (al)); \
- (sl) = __x; \
- } while (0)
-#endif
-
-/* Define this unconditionally, so it can be used for debugging. */
-#define __udiv_qrnnd_c(q, r, n1, n0, d) \
- do { \
- unsigned long __d1, __d0, __q1, __q0; \
- unsigned long __r1, __r0, __m; \
- __d1 = __ll_highpart(d); \
- __d0 = __ll_lowpart(d); \
- \
- __r1 = (n1) % __d1; \
- __q1 = (n1) / __d1; \
- __m = (unsigned long) __q1 * __d0; \
- __r1 = __r1 * __ll_B | __ll_highpart(n0); \
- if (__r1 < __m) { \
- __q1--, __r1 += (d); \
- if (__r1 >= (d)) \
- if (__r1 < __m) \
- __q1--, __r1 += (d); \
- } \
- __r1 -= __m; \
- \
- __r0 = __r1 % __d1; \
- __q0 = __r1 / __d1; \
- __m = (unsigned long) __q0 * __d0; \
- __r0 = __r0 * __ll_B | __ll_lowpart(n0); \
- if (__r0 < __m) { \
- __q0--, __r0 += (d); \
- if (__r0 >= (d)) \
- if (__r0 < __m) \
- __q0--, __r0 += (d); \
- } \
- __r0 -= __m; \
- \
- (q) = (unsigned long) __q1 * __ll_B | __q0; \
- (r) = __r0; \
- } while (0)
-
-/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
-#if !defined(udiv_qrnnd)
-#define UDIV_NEEDS_NORMALIZATION 1
-#define udiv_qrnnd __udiv_qrnnd_c
-#endif
-
-unsigned long long __udivmoddi4(unsigned long long u, unsigned long long v,
- unsigned long long *rp)
-{
- const DWunion nn = {.ll = u };
- const DWunion dd = {.ll = v };
- DWunion rr, ww;
- unsigned long d0, d1, n0, n1, n2;
- unsigned long q0 = 0, q1 = 0;
- unsigned long b, bm;
-
- d0 = dd.s.low;
- d1 = dd.s.high;
- n0 = nn.s.low;
- n1 = nn.s.high;
-
-#if !UDIV_NEEDS_NORMALIZATION
-
- if (d1 == 0) {
- if (d0 > n1) {
- /* 0q = nn / 0D */
-
- udiv_qrnnd(q0, n0, n1, n0, d0);
- q1 = 0;
-
- /* Remainder in n0. */
- } else {
- /* qq = NN / 0d */
-
- if (d0 == 0)
- /* Divide intentionally by zero. */
- d0 = 1 / d0;
-
- udiv_qrnnd(q1, n1, 0, n1, d0);
- udiv_qrnnd(q0, n0, n1, n0, d0);
-
- /* Remainder in n0. */
- }
-
- if (rp != 0) {
- rr.s.low = n0;
- rr.s.high = 0;
- *rp = rr.ll;
- }
-
-#else /* UDIV_NEEDS_NORMALIZATION */
-
- if (d1 == 0) {
- if (d0 > n1) {
- /* 0q = nn / 0D */
-
- count_leading_zeros(bm, d0);
-
- if (bm != 0) {
- /*
- * Normalize, i.e. make the most significant bit
- * of the denominator set.
- */
-
- d0 = d0 << bm;
- n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
- n0 = n0 << bm;
- }
-
- udiv_qrnnd(q0, n0, n1, n0, d0);
- q1 = 0;
-
- /* Remainder in n0 >> bm. */
- } else {
- /* qq = NN / 0d */
-
- if (d0 == 0)
- /* Divide intentionally by zero. */
- d0 = 1 / d0;
-
- count_leading_zeros(bm, d0);
-
- if (bm == 0) {
- /*
- * From (n1 >= d0) /\ (the most significant bit
- * of d0 is set), conclude (the most significant
- * bit of n1 is set) /\ (theleading quotient
- * digit q1 = 1).
- *
- * This special case is necessary, not an
- * optimization. (Shifts counts of W_TYPE_SIZE
- * are undefined.)
- */
-
- n1 -= d0;
- q1 = 1;
- } else {
- /* Normalize. */
-
- b = W_TYPE_SIZE - bm;
-
- d0 = d0 << bm;
- n2 = n1 >> b;
- n1 = (n1 << bm) | (n0 >> b);
- n0 = n0 << bm;
-
- udiv_qrnnd(q1, n1, n2, n1, d0);
- }
-
- /* n1 != d0... */
-
- udiv_qrnnd(q0, n0, n1, n0, d0);
-
- /* Remainder in n0 >> bm. */
- }
-
- if (rp != 0) {
- rr.s.low = n0 >> bm;
- rr.s.high = 0;
- *rp = rr.ll;
- }
-
-#endif /* UDIV_NEEDS_NORMALIZATION */
-
- } else {
- if (d1 > n1) {
- /* 00 = nn / DD */
-
- q0 = 0;
- q1 = 0;
-
- /* Remainder in n1n0. */
- if (rp != 0) {
- rr.s.low = n0;
- rr.s.high = n1;
- *rp = rr.ll;
- }
- } else {
- /* 0q = NN / dd */
-
- count_leading_zeros(bm, d1);
- if (bm == 0) {
- /*
- * From (n1 >= d1) /\ (the most significant bit
- * of d1 is set), conclude (the most significant
- * bit of n1 is set) /\ (the quotient digit q0 =
- * 0 or 1).
- *
- * This special case is necessary, not an
- * optimization.
- */
-
- /*
- * The condition on the next line takes
- * advantage of that n1 >= d1 (true due to
- * program flow).
- */
- if (n1 > d1 || n0 >= d0) {
- q0 = 1;
- sub_ddmmss(n1, n0, n1, n0, d1, d0);
- } else {
- q0 = 0;
- }
-
- q1 = 0;
-
- if (rp != 0) {
- rr.s.low = n0;
- rr.s.high = n1;
- *rp = rr.ll;
- }
- } else {
- unsigned long m1, m0;
- /* Normalize. */
-
- b = W_TYPE_SIZE - bm;
-
- d1 = (d1 << bm) | (d0 >> b);
- d0 = d0 << bm;
- n2 = n1 >> b;
- n1 = (n1 << bm) | (n0 >> b);
- n0 = n0 << bm;
-
- udiv_qrnnd(q0, n1, n2, n1, d1);
- umul_ppmm(m1, m0, q0, d0);
-
- if (m1 > n1 || (m1 == n1 && m0 > n0)) {
- q0--;
- sub_ddmmss(m1, m0, m1, m0, d1, d0);
- }
-
- q1 = 0;
-
- /* Remainder in (n1n0 - m1m0) >> bm. */
- if (rp != 0) {
- sub_ddmmss(n1, n0, n1, n0, m1, m0);
- rr.s.low = (n1 << b) | (n0 >> bm);
- rr.s.high = n1 >> bm;
- *rp = rr.ll;
- }
- }
- }
- }
-
- ww.s.low = q0;
- ww.s.high = q1;
-
- return ww.ll;
-}
diff --git a/lib/umoddi3.c b/lib/umoddi3.c
deleted file mode 100644
index d7bbf0f85197..000000000000
--- a/lib/umoddi3.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/libgcc.h>
-
-extern unsigned long long __udivmoddi4(unsigned long long u,
- unsigned long long v,
- unsigned long long *rp);
-
-unsigned long long __umoddi3(unsigned long long u, unsigned long long v)
-{
- unsigned long long w;
- (void)__udivmoddi4(u, v, &w);
- return w;
-}
-EXPORT_SYMBOL(__umoddi3);
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 58a733b10387..48f14cd58c77 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -382,6 +382,7 @@ int zlib_inflate(z_streamp strm, int flush)
strm->adler = state->check = REVERSE(hold);
INITBITS();
state->mode = DICT;
+ /* fall through */
case DICT:
if (state->havedict == 0) {
RESTORE();
@@ -389,8 +390,10 @@ int zlib_inflate(z_streamp strm, int flush)
}
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = TYPE;
+ /* fall through */
case TYPE:
if (flush == Z_BLOCK) goto inf_leave;
+ /* fall through */
case TYPEDO:
if (state->last) {
BYTEBITS();
@@ -428,6 +431,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->length = (unsigned)hold & 0xffff;
INITBITS();
state->mode = COPY;
+ /* fall through */
case COPY:
copy = state->length;
if (copy) {
@@ -461,6 +465,7 @@ int zlib_inflate(z_streamp strm, int flush)
#endif
state->have = 0;
state->mode = LENLENS;
+ /* fall through */
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
@@ -481,6 +486,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->have = 0;
state->mode = CODELENS;
+ /* fall through */
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
@@ -554,6 +560,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = LEN;
+ /* fall through */
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
@@ -593,6 +600,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->extra = (unsigned)(this.op) & 15;
state->mode = LENEXT;
+ /* fall through */
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -600,6 +608,7 @@ int zlib_inflate(z_streamp strm, int flush)
DROPBITS(state->extra);
}
state->mode = DIST;
+ /* fall through */
case DIST:
for (;;) {
this = state->distcode[BITS(state->distbits)];
@@ -625,6 +634,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->offset = (unsigned)this.val;
state->extra = (unsigned)(this.op) & 15;
state->mode = DISTEXT;
+ /* fall through */
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -644,6 +654,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = MATCH;
+ /* fall through */
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
@@ -694,6 +705,7 @@ int zlib_inflate(z_streamp strm, int flush)
INITBITS();
}
state->mode = DONE;
+ /* fall through */
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
diff --git a/mm/Kconfig b/mm/Kconfig
index 02301a89089e..d85e39da47ae 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -127,9 +127,6 @@ config SPARSEMEM_VMEMMAP
pfn_to_page and page_to_pfn operations. This is the most
efficient option when sufficient kernel resources are available.
-config HAVE_MEMBLOCK
- bool
-
config HAVE_MEMBLOCK_NODE_MAP
bool
@@ -142,9 +139,6 @@ config HAVE_GENERIC_GUP
config ARCH_DISCARD_MEMBLOCK
bool
-config NO_BOOTMEM
- bool
-
config MEMORY_ISOLATION
bool
@@ -481,7 +475,7 @@ config FRONTSWAP
config CMA
bool "Contiguous Memory Allocator"
- depends on HAVE_MEMBLOCK && MMU
+ depends on MMU
select MIGRATION
select MEMORY_ISOLATION
help
@@ -634,7 +628,6 @@ config MAX_STACK_SIZE_MB
config DEFERRED_STRUCT_PAGE_INIT
bool "Defer initialisation of struct pages to kthreads"
default n
- depends on NO_BOOTMEM
depends on SPARSEMEM
depends on !NEED_PER_CPU_KM
depends on 64BIT
diff --git a/mm/Makefile b/mm/Makefile
index 6485d5745dd7..d210cc9d6f80 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -42,17 +42,11 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
debug.o $(mmu-y)
obj-y += init-mm.o
-
-ifdef CONFIG_NO_BOOTMEM
- obj-y += nobootmem.o
-else
- obj-y += bootmem.o
-endif
+obj-y += memblock.o
ifdef CONFIG_MMU
obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o
endif
-obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_slots.o
obj-$(CONFIG_FRONTSWAP) += frontswap.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
deleted file mode 100644
index 97db0e8e362b..000000000000
--- a/mm/bootmem.c
+++ /dev/null
@@ -1,811 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bootmem - A boot-time physical memory allocator and configurator
- *
- * Copyright (C) 1999 Ingo Molnar
- * 1999 Kanoj Sarcar, SGI
- * 2008 Johannes Weiner
- *
- * Access to this subsystem has to be serialized externally (which is true
- * for the boot process anyway).
- */
-#include <linux/init.h>
-#include <linux/pfn.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/kmemleak.h>
-#include <linux/range.h>
-#include <linux/bug.h>
-#include <linux/io.h>
-#include <linux/bootmem.h>
-
-#include "internal.h"
-
-/**
- * DOC: bootmem overview
- *
- * Bootmem is a boot-time physical memory allocator and configurator.
- *
- * It is used early in the boot process before the page allocator is
- * set up.
- *
- * Bootmem is based on the most basic of allocators, a First Fit
- * allocator which uses a bitmap to represent memory. If a bit is 1,
- * the page is allocated and 0 if unallocated. To satisfy allocations
- * of sizes smaller than a page, the allocator records the Page Frame
- * Number (PFN) of the last allocation and the offset the allocation
- * ended at. Subsequent small allocations are merged together and
- * stored on the same page.
- *
- * The information used by the bootmem allocator is represented by
- * :c:type:`struct bootmem_data`. An array to hold up to %MAX_NUMNODES
- * such structures is statically allocated and then it is discarded
- * when the system initialization completes. Each entry in this array
- * corresponds to a node with memory. For UMA systems only entry 0 is
- * used.
- *
- * The bootmem allocator is initialized during early architecture
- * specific setup. Each architecture is required to supply a
- * :c:func:`setup_arch` function which, among other tasks, is
- * responsible for acquiring the necessary parameters to initialise
- * the boot memory allocator. These parameters define limits of usable
- * physical memory:
- *
- * * @min_low_pfn - the lowest PFN that is available in the system
- * * @max_low_pfn - the highest PFN that may be addressed by low
- * memory (%ZONE_NORMAL)
- * * @max_pfn - the last PFN available to the system.
- *
- * After those limits are determined, the :c:func:`init_bootmem` or
- * :c:func:`init_bootmem_node` function should be called to initialize
- * the bootmem allocator. The UMA case should use the `init_bootmem`
- * function. It will initialize ``contig_page_data`` structure that
- * represents the only memory node in the system. In the NUMA case the
- * `init_bootmem_node` function should be called to initialize the
- * bootmem allocator for each node.
- *
- * Once the allocator is set up, it is possible to use either single
- * node or NUMA variant of the allocation APIs.
- */
-
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data = {
- .bdata = &bootmem_node_data[0]
-};
-EXPORT_SYMBOL(contig_page_data);
-#endif
-
-unsigned long max_low_pfn;
-unsigned long min_low_pfn;
-unsigned long max_pfn;
-unsigned long long max_possible_pfn;
-
-bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
-
-static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
-
-static int bootmem_debug;
-
-static int __init bootmem_debug_setup(char *buf)
-{
- bootmem_debug = 1;
- return 0;
-}
-early_param("bootmem_debug", bootmem_debug_setup);
-
-#define bdebug(fmt, args...) ({ \
- if (unlikely(bootmem_debug)) \
- pr_info("bootmem::%s " fmt, \
- __func__, ## args); \
-})
-
-static unsigned long __init bootmap_bytes(unsigned long pages)
-{
- unsigned long bytes = DIV_ROUND_UP(pages, BITS_PER_BYTE);
-
- return ALIGN(bytes, sizeof(long));
-}
-
-/**
- * bootmem_bootmap_pages - calculate bitmap size in pages
- * @pages: number of pages the bitmap has to represent
- *
- * Return: the number of pages needed to hold the bitmap.
- */
-unsigned long __init bootmem_bootmap_pages(unsigned long pages)
-{
- unsigned long bytes = bootmap_bytes(pages);
-
- return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
-}
-
-/*
- * link bdata in order
- */
-static void __init link_bootmem(bootmem_data_t *bdata)
-{
- bootmem_data_t *ent;
-
- list_for_each_entry(ent, &bdata_list, list) {
- if (bdata->node_min_pfn < ent->node_min_pfn) {
- list_add_tail(&bdata->list, &ent->list);
- return;
- }
- }
-
- list_add_tail(&bdata->list, &bdata_list);
-}
-
-/*
- * Called once to set up the allocator itself.
- */
-static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
- unsigned long mapstart, unsigned long start, unsigned long end)
-{
- unsigned long mapsize;
-
- mminit_validate_memmodel_limits(&start, &end);
- bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
- bdata->node_min_pfn = start;
- bdata->node_low_pfn = end;
- link_bootmem(bdata);
-
- /*
- * Initially all pages are reserved - setup_arch() has to
- * register free RAM areas explicitly.
- */
- mapsize = bootmap_bytes(end - start);
- memset(bdata->node_bootmem_map, 0xff, mapsize);
-
- bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
- bdata - bootmem_node_data, start, mapstart, end, mapsize);
-
- return mapsize;
-}
-
-/**
- * init_bootmem_node - register a node as boot memory
- * @pgdat: node to register
- * @freepfn: pfn where the bitmap for this node is to be placed
- * @startpfn: first pfn on the node
- * @endpfn: first pfn after the node
- *
- * Return: the number of bytes needed to hold the bitmap for this node.
- */
-unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
- unsigned long startpfn, unsigned long endpfn)
-{
- return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
-}
-
-/**
- * init_bootmem - register boot memory
- * @start: pfn where the bitmap is to be placed
- * @pages: number of available physical pages
- *
- * Return: the number of bytes needed to hold the bitmap.
- */
-unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
-{
- max_low_pfn = pages;
- min_low_pfn = start;
- return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
-}
-
-void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
-{
- unsigned long cursor, end;
-
- kmemleak_free_part_phys(physaddr, size);
-
- cursor = PFN_UP(physaddr);
- end = PFN_DOWN(physaddr + size);
-
- for (; cursor < end; cursor++) {
- __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
- totalram_pages++;
- }
-}
-
-static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
-{
- struct page *page;
- unsigned long *map, start, end, pages, cur, count = 0;
-
- if (!bdata->node_bootmem_map)
- return 0;
-
- map = bdata->node_bootmem_map;
- start = bdata->node_min_pfn;
- end = bdata->node_low_pfn;
-
- bdebug("nid=%td start=%lx end=%lx\n",
- bdata - bootmem_node_data, start, end);
-
- while (start < end) {
- unsigned long idx, vec;
- unsigned shift;
-
- idx = start - bdata->node_min_pfn;
- shift = idx & (BITS_PER_LONG - 1);
- /*
- * vec holds at most BITS_PER_LONG map bits,
- * bit 0 corresponds to start.
- */
- vec = ~map[idx / BITS_PER_LONG];
-
- if (shift) {
- vec >>= shift;
- if (end - start >= BITS_PER_LONG)
- vec |= ~map[idx / BITS_PER_LONG + 1] <<
- (BITS_PER_LONG - shift);
- }
- /*
- * If we have a properly aligned and fully unreserved
- * BITS_PER_LONG block of pages in front of us, free
- * it in one go.
- */
- if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
- int order = ilog2(BITS_PER_LONG);
-
- __free_pages_bootmem(pfn_to_page(start), start, order);
- count += BITS_PER_LONG;
- start += BITS_PER_LONG;
- } else {
- cur = start;
-
- start = ALIGN(start + 1, BITS_PER_LONG);
- while (vec && cur != start) {
- if (vec & 1) {
- page = pfn_to_page(cur);
- __free_pages_bootmem(page, cur, 0);
- count++;
- }
- vec >>= 1;
- ++cur;
- }
- }
- }
-
- cur = bdata->node_min_pfn;
- page = virt_to_page(bdata->node_bootmem_map);
- pages = bdata->node_low_pfn - bdata->node_min_pfn;
- pages = bootmem_bootmap_pages(pages);
- count += pages;
- while (pages--)
- __free_pages_bootmem(page++, cur++, 0);
- bdata->node_bootmem_map = NULL;
-
- bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
-
- return count;
-}
-
-static int reset_managed_pages_done __initdata;
-
-void reset_node_managed_pages(pg_data_t *pgdat)
-{
- struct zone *z;
-
- for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
- z->managed_pages = 0;
-}
-
-void __init reset_all_zones_managed_pages(void)
-{
- struct pglist_data *pgdat;
-
- if (reset_managed_pages_done)
- return;
-
- for_each_online_pgdat(pgdat)
- reset_node_managed_pages(pgdat);
-
- reset_managed_pages_done = 1;
-}
-
-unsigned long __init free_all_bootmem(void)
-{
- unsigned long total_pages = 0;
- bootmem_data_t *bdata;
-
- reset_all_zones_managed_pages();
-
- list_for_each_entry(bdata, &bdata_list, list)
- total_pages += free_all_bootmem_core(bdata);
-
- totalram_pages += total_pages;
-
- return total_pages;
-}
-
-static void __init __free(bootmem_data_t *bdata,
- unsigned long sidx, unsigned long eidx)
-{
- unsigned long idx;
-
- bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
- sidx + bdata->node_min_pfn,
- eidx + bdata->node_min_pfn);
-
- if (WARN_ON(bdata->node_bootmem_map == NULL))
- return;
-
- if (bdata->hint_idx > sidx)
- bdata->hint_idx = sidx;
-
- for (idx = sidx; idx < eidx; idx++)
- if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
- BUG();
-}
-
-static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
- unsigned long eidx, int flags)
-{
- unsigned long idx;
- int exclusive = flags & BOOTMEM_EXCLUSIVE;
-
- bdebug("nid=%td start=%lx end=%lx flags=%x\n",
- bdata - bootmem_node_data,
- sidx + bdata->node_min_pfn,
- eidx + bdata->node_min_pfn,
- flags);
-
- if (WARN_ON(bdata->node_bootmem_map == NULL))
- return 0;
-
- for (idx = sidx; idx < eidx; idx++)
- if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
- if (exclusive) {
- __free(bdata, sidx, idx);
- return -EBUSY;
- }
- bdebug("silent double reserve of PFN %lx\n",
- idx + bdata->node_min_pfn);
- }
- return 0;
-}
-
-static int __init mark_bootmem_node(bootmem_data_t *bdata,
- unsigned long start, unsigned long end,
- int reserve, int flags)
-{
- unsigned long sidx, eidx;
-
- bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
- bdata - bootmem_node_data, start, end, reserve, flags);
-
- BUG_ON(start < bdata->node_min_pfn);
- BUG_ON(end > bdata->node_low_pfn);
-
- sidx = start - bdata->node_min_pfn;
- eidx = end - bdata->node_min_pfn;
-
- if (reserve)
- return __reserve(bdata, sidx, eidx, flags);
- else
- __free(bdata, sidx, eidx);
- return 0;
-}
-
-static int __init mark_bootmem(unsigned long start, unsigned long end,
- int reserve, int flags)
-{
- unsigned long pos;
- bootmem_data_t *bdata;
-
- pos = start;
- list_for_each_entry(bdata, &bdata_list, list) {
- int err;
- unsigned long max;
-
- if (pos < bdata->node_min_pfn ||
- pos >= bdata->node_low_pfn) {
- BUG_ON(pos != start);
- continue;
- }
-
- max = min(bdata->node_low_pfn, end);
-
- err = mark_bootmem_node(bdata, pos, max, reserve, flags);
- if (reserve && err) {
- mark_bootmem(start, pos, 0, 0);
- return err;
- }
-
- if (max == end)
- return 0;
- pos = bdata->node_low_pfn;
- }
- BUG();
-}
-
-void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
- unsigned long size)
-{
- unsigned long start, end;
-
- kmemleak_free_part_phys(physaddr, size);
-
- start = PFN_UP(physaddr);
- end = PFN_DOWN(physaddr + size);
-
- mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
-}
-
-void __init free_bootmem(unsigned long physaddr, unsigned long size)
-{
- unsigned long start, end;
-
- kmemleak_free_part_phys(physaddr, size);
-
- start = PFN_UP(physaddr);
- end = PFN_DOWN(physaddr + size);
-
- mark_bootmem(start, end, 0, 0);
-}
-
-/**
- * reserve_bootmem_node - mark a page range as reserved
- * @pgdat: node the range resides on
- * @physaddr: starting address of the range
- * @size: size of the range in bytes
- * @flags: reservation flags (see linux/bootmem.h)
- *
- * Partial pages will be reserved.
- *
- * The range must reside completely on the specified node.
- *
- * Return: 0 on success, -errno on failure.
- */
-int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
- unsigned long size, int flags)
-{
- unsigned long start, end;
-
- start = PFN_DOWN(physaddr);
- end = PFN_UP(physaddr + size);
-
- return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
-}
-
-/**
- * reserve_bootmem - mark a page range as reserved
- * @addr: starting address of the range
- * @size: size of the range in bytes
- * @flags: reservation flags (see linux/bootmem.h)
- *
- * Partial pages will be reserved.
- *
- * The range must be contiguous but may span node boundaries.
- *
- * Return: 0 on success, -errno on failure.
- */
-int __init reserve_bootmem(unsigned long addr, unsigned long size,
- int flags)
-{
- unsigned long start, end;
-
- start = PFN_DOWN(addr);
- end = PFN_UP(addr + size);
-
- return mark_bootmem(start, end, 1, flags);
-}
-
-static unsigned long __init align_idx(struct bootmem_data *bdata,
- unsigned long idx, unsigned long step)
-{
- unsigned long base = bdata->node_min_pfn;
-
- /*
- * Align the index with respect to the node start so that the
- * combination of both satisfies the requested alignment.
- */
-
- return ALIGN(base + idx, step) - base;
-}
-
-static unsigned long __init align_off(struct bootmem_data *bdata,
- unsigned long off, unsigned long align)
-{
- unsigned long base = PFN_PHYS(bdata->node_min_pfn);
-
- /* Same as align_idx for byte offsets */
-
- return ALIGN(base + off, align) - base;
-}
-
-static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
- unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- unsigned long fallback = 0;
- unsigned long min, max, start, sidx, midx, step;
-
- bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
- bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
- align, goal, limit);
-
- BUG_ON(!size);
- BUG_ON(align & (align - 1));
- BUG_ON(limit && goal + size > limit);
-
- if (!bdata->node_bootmem_map)
- return NULL;
-
- min = bdata->node_min_pfn;
- max = bdata->node_low_pfn;
-
- goal >>= PAGE_SHIFT;
- limit >>= PAGE_SHIFT;
-
- if (limit && max > limit)
- max = limit;
- if (max <= min)
- return NULL;
-
- step = max(align >> PAGE_SHIFT, 1UL);
-
- if (goal && min < goal && goal < max)
- start = ALIGN(goal, step);
- else
- start = ALIGN(min, step);
-
- sidx = start - bdata->node_min_pfn;
- midx = max - bdata->node_min_pfn;
-
- if (bdata->hint_idx > sidx) {
- /*
- * Handle the valid case of sidx being zero and still
- * catch the fallback below.
- */
- fallback = sidx + 1;
- sidx = align_idx(bdata, bdata->hint_idx, step);
- }
-
- while (1) {
- int merge;
- void *region;
- unsigned long eidx, i, start_off, end_off;
-find_block:
- sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
- sidx = align_idx(bdata, sidx, step);
- eidx = sidx + PFN_UP(size);
-
- if (sidx >= midx || eidx > midx)
- break;
-
- for (i = sidx; i < eidx; i++)
- if (test_bit(i, bdata->node_bootmem_map)) {
- sidx = align_idx(bdata, i, step);
- if (sidx == i)
- sidx += step;
- goto find_block;
- }
-
- if (bdata->last_end_off & (PAGE_SIZE - 1) &&
- PFN_DOWN(bdata->last_end_off) + 1 == sidx)
- start_off = align_off(bdata, bdata->last_end_off, align);
- else
- start_off = PFN_PHYS(sidx);
-
- merge = PFN_DOWN(start_off) < sidx;
- end_off = start_off + size;
-
- bdata->last_end_off = end_off;
- bdata->hint_idx = PFN_UP(end_off);
-
- /*
- * Reserve the area now:
- */
- if (__reserve(bdata, PFN_DOWN(start_off) + merge,
- PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
- BUG();
-
- region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
- start_off);
- memset(region, 0, size);
- /*
- * The min_count is set to 0 so that bootmem allocated blocks
- * are never reported as leaks.
- */
- kmemleak_alloc(region, size, 0, 0);
- return region;
- }
-
- if (fallback) {
- sidx = align_idx(bdata, fallback - 1, step);
- fallback = 0;
- goto find_block;
- }
-
- return NULL;
-}
-
-static void * __init alloc_bootmem_core(unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit)
-{
- bootmem_data_t *bdata;
- void *region;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc(size, GFP_NOWAIT);
-
- list_for_each_entry(bdata, &bdata_list, list) {
- if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
- continue;
- if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
- break;
-
- region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
- if (region)
- return region;
- }
-
- return NULL;
-}
-
-static void * __init ___alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
-restart:
- ptr = alloc_bootmem_core(size, align, goal, limit);
- if (ptr)
- return ptr;
- if (goal) {
- goal = 0;
- goto restart;
- }
-
- return NULL;
-}
-
-void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- unsigned long limit = 0;
-
- return ___alloc_bootmem_nopanic(size, align, goal, limit);
-}
-
-static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
-
- if (mem)
- return mem;
- /*
- * Whoops, we cannot satisfy the allocation request.
- */
- pr_alert("bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of memory");
- return NULL;
-}
-
-void * __init __alloc_bootmem(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- unsigned long limit = 0;
-
- return ___alloc_bootmem(size, align, goal, limit);
-}
-
-void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- void *ptr;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-again:
-
- /* do not panic in alloc_bootmem_bdata() */
- if (limit && goal + size > limit)
- limit = 0;
-
- ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
- if (ptr)
- return ptr;
-
- ptr = alloc_bootmem_core(size, align, goal, limit);
- if (ptr)
- return ptr;
-
- if (goal) {
- goal = 0;
- goto again;
- }
-
- return NULL;
-}
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
-}
-
-void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
- ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
- if (ptr)
- return ptr;
-
- pr_alert("bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of memory");
- return NULL;
-}
-
-void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
-}
-
-void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
-#ifdef MAX_DMA32_PFN
- unsigned long end_pfn;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- /* update goal according ...MAX_DMA32_PFN */
- end_pfn = pgdat_end_pfn(pgdat);
-
- if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
- (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
- void *ptr;
- unsigned long new_goal;
-
- new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
- ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
- new_goal, 0);
- if (ptr)
- return ptr;
- }
-#endif
-
- return __alloc_bootmem_node(pgdat, size, align, goal);
-
-}
-
-void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
-}
-
-void * __init __alloc_bootmem_low_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal)
-{
- return ___alloc_bootmem_nopanic(size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
-}
-
-void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node(pgdat, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
-}
diff --git a/mm/filemap.c b/mm/filemap.c
index 218d0b2ec82d..81adec8ee02c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2049,7 +2049,7 @@ find_page:
!mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date;
/* pipes can't handle partially uptodate pages */
- if (unlikely(iter->type & ITER_PIPE))
+ if (unlikely(iov_iter_is_pipe(iter)))
goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
@@ -2825,6 +2825,42 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
EXPORT_SYMBOL(read_cache_page_gfp);
/*
+ * Don't operate on ranges the page cache doesn't support, and don't exceed the
+ * LFS limits. If pos is under the limit it becomes a short access. If it
+ * exceeds the limit we return -EFBIG.
+ */
+static int generic_access_check_limits(struct file *file, loff_t pos,
+ loff_t *count)
+{
+ struct inode *inode = file->f_mapping->host;
+ loff_t max_size = inode->i_sb->s_maxbytes;
+
+ if (!(file->f_flags & O_LARGEFILE))
+ max_size = MAX_NON_LFS;
+
+ if (unlikely(pos >= max_size))
+ return -EFBIG;
+ *count = min(*count, max_size - pos);
+ return 0;
+}
+
+static int generic_write_check_limits(struct file *file, loff_t pos,
+ loff_t *count)
+{
+ loff_t limit = rlimit(RLIMIT_FSIZE);
+
+ if (limit != RLIM_INFINITY) {
+ if (pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+ }
+ *count = min(*count, limit - pos);
+ }
+
+ return generic_access_check_limits(file, pos, count);
+}
+
+/*
* Performs necessary checks before doing a write
*
* Can adjust writing position or amount of bytes to write.
@@ -2835,8 +2871,8 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- unsigned long limit = rlimit(RLIMIT_FSIZE);
- loff_t pos;
+ loff_t count;
+ int ret;
if (!iov_iter_count(from))
return 0;
@@ -2845,43 +2881,99 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
if (iocb->ki_flags & IOCB_APPEND)
iocb->ki_pos = i_size_read(inode);
- pos = iocb->ki_pos;
-
if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
return -EINVAL;
- if (limit != RLIM_INFINITY) {
- if (iocb->ki_pos >= limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
- }
- iov_iter_truncate(from, limit - (unsigned long)pos);
- }
+ count = iov_iter_count(from);
+ ret = generic_write_check_limits(file, iocb->ki_pos, &count);
+ if (ret)
+ return ret;
+
+ iov_iter_truncate(from, count);
+ return iov_iter_count(from);
+}
+EXPORT_SYMBOL(generic_write_checks);
+
+/*
+ * Performs necessary checks before doing a clone.
+ *
+ * Can adjust amount of bytes to clone.
+ * Returns appropriate error code that caller should return or
+ * zero in case the clone should be allowed.
+ */
+int generic_remap_checks(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *req_count, unsigned int remap_flags)
+{
+ struct inode *inode_in = file_in->f_mapping->host;
+ struct inode *inode_out = file_out->f_mapping->host;
+ uint64_t count = *req_count;
+ uint64_t bcount;
+ loff_t size_in, size_out;
+ loff_t bs = inode_out->i_sb->s_blocksize;
+ int ret;
+
+ /* The start of both ranges must be aligned to an fs block. */
+ if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
+ return -EINVAL;
+
+ /* Ensure offsets don't wrap. */
+ if (pos_in + count < pos_in || pos_out + count < pos_out)
+ return -EINVAL;
+
+ size_in = i_size_read(inode_in);
+ size_out = i_size_read(inode_out);
+
+ /* Dedupe requires both ranges to be within EOF. */
+ if ((remap_flags & REMAP_FILE_DEDUP) &&
+ (pos_in >= size_in || pos_in + count > size_in ||
+ pos_out >= size_out || pos_out + count > size_out))
+ return -EINVAL;
+
+ /* Ensure the infile range is within the infile. */
+ if (pos_in >= size_in)
+ return -EINVAL;
+ count = min(count, size_in - (uint64_t)pos_in);
+
+ ret = generic_access_check_limits(file_in, pos_in, &count);
+ if (ret)
+ return ret;
+
+ ret = generic_write_check_limits(file_out, pos_out, &count);
+ if (ret)
+ return ret;
/*
- * LFS rule
+ * If the user wanted us to link to the infile's EOF, round up to the
+ * next block boundary for this check.
+ *
+ * Otherwise, make sure the count is also block-aligned, having
+ * already confirmed the starting offsets' block alignment.
*/
- if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
- !(file->f_flags & O_LARGEFILE))) {
- if (pos >= MAX_NON_LFS)
- return -EFBIG;
- iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
+ if (pos_in + count == size_in) {
+ bcount = ALIGN(size_in, bs) - pos_in;
+ } else {
+ if (!IS_ALIGNED(count, bs))
+ count = ALIGN_DOWN(count, bs);
+ bcount = count;
}
+ /* Don't allow overlapped cloning within the same file. */
+ if (inode_in == inode_out &&
+ pos_out + bcount > pos_in &&
+ pos_out < pos_in + bcount)
+ return -EINVAL;
+
/*
- * Are we about to exceed the fs block limit ?
- *
- * If we have written data it becomes a short write. If we have
- * exceeded without writing data we send a signal and return EFBIG.
- * Linus frestrict idea will clean these up nicely..
+ * We shortened the request but the caller can't deal with that, so
+ * bounce the request back to userspace.
*/
- if (unlikely(pos >= inode->i_sb->s_maxbytes))
- return -EFBIG;
+ if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
+ return -EINVAL;
- iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
- return iov_iter_count(from);
+ *req_count = count;
+ return 0;
}
-EXPORT_SYMBOL(generic_write_checks);
int pagecache_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
diff --git a/mm/gup.c b/mm/gup.c
index 841d7ef53591..f76e77a2d34b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1817,8 +1817,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* interrupts disabled by get_futex_key.
*
* With interrupts disabled, we block page table pages from being
- * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
- * for more details.
+ * freed from under us. See struct mmu_table_batch comments in
+ * include/asm-generic/tlb.h for more details.
*
* We do not adopt an rcu_read_lock(.) here as we also want to
* block IPIs that come from THPs splitting.
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index debf11388a60..5b42d3d4b60a 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -27,6 +27,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
int nr;
struct page **pages;
+ if (gup->size > ULONG_MAX)
+ return -EINVAL;
+
nr_pages = gup->size / PAGE_SIZE;
pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
if (!pages)
diff --git a/mm/hmm.c b/mm/hmm.c
index 774d684fa2b4..90c34f3d1243 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -11,7 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * Authors: Jérôme Glisse <jglisse@redhat.com>
+ * Authors: Jérôme Glisse <jglisse@redhat.com>
*/
/*
* Refer to include/linux/hmm.h for information about heterogeneous memory
@@ -43,7 +43,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
*
* @mm: mm struct this HMM struct is bound to
* @lock: lock protecting ranges list
- * @sequence: we track updates to the CPU page table with a sequence number
* @ranges: list of range being snapshotted
* @mirrors: list of mirrors for this mm
* @mmu_notifier: mmu notifier to track updates to CPU page table
@@ -52,7 +51,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
struct hmm {
struct mm_struct *mm;
spinlock_t lock;
- atomic_t sequence;
struct list_head ranges;
struct list_head mirrors;
struct mmu_notifier mmu_notifier;
@@ -85,22 +83,11 @@ static struct hmm *hmm_register(struct mm_struct *mm)
return NULL;
INIT_LIST_HEAD(&hmm->mirrors);
init_rwsem(&hmm->mirrors_sem);
- atomic_set(&hmm->sequence, 0);
hmm->mmu_notifier.ops = NULL;
INIT_LIST_HEAD(&hmm->ranges);
spin_lock_init(&hmm->lock);
hmm->mm = mm;
- /*
- * We should only get here if hold the mmap_sem in write mode ie on
- * registration of first mirror through hmm_mirror_register()
- */
- hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
- if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
- kfree(hmm);
- return NULL;
- }
-
spin_lock(&mm->page_table_lock);
if (!mm->hmm)
mm->hmm = hmm;
@@ -108,12 +95,27 @@ static struct hmm *hmm_register(struct mm_struct *mm)
cleanup = true;
spin_unlock(&mm->page_table_lock);
- if (cleanup) {
- mmu_notifier_unregister(&hmm->mmu_notifier, mm);
- kfree(hmm);
- }
+ if (cleanup)
+ goto error;
+
+ /*
+ * We should only get here if hold the mmap_sem in write mode ie on
+ * registration of first mirror through hmm_mirror_register()
+ */
+ hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
+ if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
+ goto error_mm;
return mm->hmm;
+
+error_mm:
+ spin_lock(&mm->page_table_lock);
+ if (mm->hmm == hmm)
+ mm->hmm = NULL;
+ spin_unlock(&mm->page_table_lock);
+error:
+ kfree(hmm);
+ return NULL;
}
void hmm_mm_destroy(struct mm_struct *mm)
@@ -121,10 +123,8 @@ void hmm_mm_destroy(struct mm_struct *mm)
kfree(mm->hmm);
}
-static void hmm_invalidate_range(struct hmm *hmm,
- enum hmm_update_type action,
- unsigned long start,
- unsigned long end)
+static int hmm_invalidate_range(struct hmm *hmm, bool device,
+ const struct hmm_update *update)
{
struct hmm_mirror *mirror;
struct hmm_range *range;
@@ -133,22 +133,33 @@ static void hmm_invalidate_range(struct hmm *hmm,
list_for_each_entry(range, &hmm->ranges, list) {
unsigned long addr, idx, npages;
- if (end < range->start || start >= range->end)
+ if (update->end < range->start || update->start >= range->end)
continue;
range->valid = false;
- addr = max(start, range->start);
+ addr = max(update->start, range->start);
idx = (addr - range->start) >> PAGE_SHIFT;
- npages = (min(range->end, end) - addr) >> PAGE_SHIFT;
+ npages = (min(range->end, update->end) - addr) >> PAGE_SHIFT;
memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
}
spin_unlock(&hmm->lock);
+ if (!device)
+ return 0;
+
down_read(&hmm->mirrors_sem);
- list_for_each_entry(mirror, &hmm->mirrors, list)
- mirror->ops->sync_cpu_device_pagetables(mirror, action,
- start, end);
+ list_for_each_entry(mirror, &hmm->mirrors, list) {
+ int ret;
+
+ ret = mirror->ops->sync_cpu_device_pagetables(mirror, update);
+ if (!update->blockable && ret == -EAGAIN) {
+ up_read(&hmm->mirrors_sem);
+ return -EAGAIN;
+ }
+ }
up_read(&hmm->mirrors_sem);
+
+ return 0;
}
static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -178,18 +189,21 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
}
static int hmm_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end,
+ bool blockable)
{
+ struct hmm_update update;
struct hmm *hmm = mm->hmm;
VM_BUG_ON(!hmm);
- atomic_inc(&hmm->sequence);
-
- return 0;
+ update.start = start;
+ update.end = end;
+ update.event = HMM_UPDATE_INVALIDATE;
+ update.blockable = blockable;
+ return hmm_invalidate_range(hmm, true, &update);
}
static void hmm_invalidate_range_end(struct mmu_notifier *mn,
@@ -197,11 +211,16 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
+ struct hmm_update update;
struct hmm *hmm = mm->hmm;
VM_BUG_ON(!hmm);
- hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
+ update.start = start;
+ update.end = end;
+ update.event = HMM_UPDATE_INVALIDATE;
+ update.blockable = true;
+ hmm_invalidate_range(hmm, false, &update);
}
static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
@@ -278,12 +297,13 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror)
if (!should_unregister || mm == NULL)
return;
+ mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
+
spin_lock(&mm->page_table_lock);
if (mm->hmm == hmm)
mm->hmm = NULL;
spin_unlock(&mm->page_table_lock);
- mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
kfree(hmm);
}
EXPORT_SYMBOL(hmm_mirror_unregister);
@@ -571,22 +591,42 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
+ struct vm_area_struct *vma = walk->vma;
uint64_t *pfns = range->pfns;
unsigned long addr = start, i;
pte_t *ptep;
+ pmd_t pmd;
- i = (addr - range->start) >> PAGE_SHIFT;
again:
- if (pmd_none(*pmdp))
+ pmd = READ_ONCE(*pmdp);
+ if (pmd_none(pmd))
return hmm_vma_walk_hole(start, end, walk);
- if (pmd_huge(*pmdp) && (range->vma->vm_flags & VM_HUGETLB))
+ if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
return hmm_pfns_bad(start, end, walk);
- if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) {
- pmd_t pmd;
+ if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
+ bool fault, write_fault;
+ unsigned long npages;
+ uint64_t *pfns;
+
+ i = (addr - range->start) >> PAGE_SHIFT;
+ npages = (end - addr) >> PAGE_SHIFT;
+ pfns = &range->pfns[i];
+ hmm_range_need_fault(hmm_vma_walk, pfns, npages,
+ 0, &fault, &write_fault);
+ if (fault || write_fault) {
+ hmm_vma_walk->last = addr;
+ pmd_migration_entry_wait(vma->vm_mm, pmdp);
+ return -EAGAIN;
+ }
+ return 0;
+ } else if (!pmd_present(pmd))
+ return hmm_pfns_bad(start, end, walk);
+
+ if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
* No need to take pmd_lock here, even if some other threads
* is splitting the huge pmd we will get that event through
@@ -601,13 +641,21 @@ again:
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
goto again;
+ i = (addr - range->start) >> PAGE_SHIFT;
return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
}
- if (pmd_bad(*pmdp))
+ /*
+ * We have handled all the valid case above ie either none, migration,
+ * huge or transparent huge. At this point either it is a valid pmd
+ * entry pointing to pte directory or it is a bad pmd that will not
+ * recover.
+ */
+ if (pmd_bad(pmd))
return hmm_pfns_bad(start, end, walk);
ptep = pte_offset_map(pmdp, addr);
+ i = (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
int r;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4e4ef8fa479d..55478ab3c83b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -629,21 +629,40 @@ release:
* available
* never: never stall for any thp allocation
*/
-static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
+static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
{
const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
+ gfp_t this_node = 0;
+
+#ifdef CONFIG_NUMA
+ struct mempolicy *pol;
+ /*
+ * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
+ * specified, to express a general desire to stay on the current
+ * node for optimistic allocation attempts. If the defrag mode
+ * and/or madvise hint requires the direct reclaim then we prefer
+ * to fallback to other node rather than node reclaim because that
+ * can lead to excessive reclaim even though there is free memory
+ * on other nodes. We expect that NUMA preferences are specified
+ * by memory policies.
+ */
+ pol = get_vma_policy(vma, addr);
+ if (pol->mode != MPOL_BIND)
+ this_node = __GFP_THISNODE;
+ mpol_cond_put(pol);
+#endif
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
+ return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
- __GFP_KSWAPD_RECLAIM);
+ __GFP_KSWAPD_RECLAIM | this_node);
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
- 0);
- return GFP_TRANSHUGE_LIGHT;
+ this_node);
+ return GFP_TRANSHUGE_LIGHT | this_node;
}
/* Caller must hold page table lock. */
@@ -715,8 +734,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
pte_free(vma->vm_mm, pgtable);
return ret;
}
- gfp = alloc_hugepage_direct_gfpmask(vma);
- page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+ gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+ page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
@@ -1286,8 +1305,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
alloc:
if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow()) {
- huge_gfp = alloc_hugepage_direct_gfpmask(vma);
- new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
+ huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+ new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
+ haddr, numa_node_id());
} else
new_page = NULL;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7b5c0ad9a6bd..c007fb5fb8d5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -15,7 +15,7 @@
#include <linux/compiler.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/mmdebug.h>
@@ -2100,9 +2100,9 @@ int __alloc_bootmem_huge_page(struct hstate *h)
for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
void *addr;
- addr = memblock_virt_alloc_try_nid_raw(
+ addr = memblock_alloc_try_nid_raw(
huge_page_size(h), huge_page_size(h),
- 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
+ 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
if (addr) {
/*
* Use the beginning of the huge page to store the
diff --git a/mm/internal.h b/mm/internal.h
index 87256ae1bef8..291eb2b6d1d8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -161,7 +161,7 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
}
extern int __isolate_free_page(struct page *page, unsigned int order);
-extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
+extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order,
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
index 7a2a2f13f86f..c7550eb65922 100644
--- a/mm/kasan/kasan_init.c
+++ b/mm/kasan/kasan_init.c
@@ -10,11 +10,10 @@
*
*/
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
-#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/slab.h>
@@ -83,8 +82,8 @@ static inline bool kasan_zero_page_entry(pte_t pte)
static __init void *early_alloc(size_t size, int node)
{
- return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, node);
+ return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
}
static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 4f7e4b5a2f08..877de4fa0720 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -92,7 +92,7 @@
#include <linux/stacktrace.h>
#include <linux/cache.h>
#include <linux/percpu.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/mmzone.h>
#include <linux/slab.h>
diff --git a/mm/memblock.c b/mm/memblock.c
index a85315083b5a..7df468c8ebc8 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -20,7 +20,6 @@
#include <linux/kmemleak.h>
#include <linux/seq_file.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <asm/sections.h>
#include <linux/io.h>
@@ -82,6 +81,16 @@
* initialization compltes.
*/
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+struct pglist_data __refdata contig_page_data;
+EXPORT_SYMBOL(contig_page_data);
+#endif
+
+unsigned long max_low_pfn;
+unsigned long min_low_pfn;
+unsigned long max_pfn;
+unsigned long long max_possible_pfn;
+
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
@@ -1238,8 +1247,11 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
{
phys_addr_t found;
- if (!align)
+ if (!align) {
+ /* Can't use WARNs this early in boot on powerpc */
+ dump_stack();
align = SMP_CACHE_BYTES;
+ }
found = memblock_find_in_range_node(size, align, start, end, nid,
flags);
@@ -1269,7 +1281,7 @@ phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
}
-phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
+phys_addr_t __init memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
enum memblock_flags flags = choose_memblock_flags();
phys_addr_t ret;
@@ -1304,23 +1316,22 @@ phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys
return alloc;
}
-phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+phys_addr_t __init memblock_phys_alloc(phys_addr_t size, phys_addr_t align)
{
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
-phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
+phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
- phys_addr_t res = memblock_alloc_nid(size, align, nid);
+ phys_addr_t res = memblock_phys_alloc_nid(size, align, nid);
if (res)
return res;
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
-#if defined(CONFIG_NO_BOOTMEM)
/**
- * memblock_virt_alloc_internal - allocate boot memory block
+ * memblock_alloc_internal - allocate boot memory block
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region to allocate (phys address)
@@ -1333,9 +1344,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* hold the requested memory.
*
* The allocation is performed from memory region limited by
- * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
- *
- * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
+ * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
*
* The phys address of allocated boot memory block is converted to virtual and
* allocated memory is reset to 0.
@@ -1346,7 +1355,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
-static void * __init memblock_virt_alloc_internal(
+static void * __init memblock_alloc_internal(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
@@ -1361,13 +1370,15 @@ static void * __init memblock_virt_alloc_internal(
/*
* Detect any accidental use of these APIs after slab is ready, as at
* this moment memblock may be deinitialized already and its
- * internal data may be destroyed (after execution of free_all_bootmem)
+ * internal data may be destroyed (after execution of memblock_free_all)
*/
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, nid);
- if (!align)
+ if (!align) {
+ dump_stack();
align = SMP_CACHE_BYTES;
+ }
if (max_addr > memblock.current_limit)
max_addr = memblock.current_limit;
@@ -1413,14 +1424,14 @@ done:
}
/**
- * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing
+ * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
* memory and without panicking
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
@@ -1431,7 +1442,7 @@ done:
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
-void * __init memblock_virt_alloc_try_nid_raw(
+void * __init memblock_alloc_try_nid_raw(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
@@ -1442,7 +1453,7 @@ void * __init memblock_virt_alloc_try_nid_raw(
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
- ptr = memblock_virt_alloc_internal(size, align,
+ ptr = memblock_alloc_internal(size, align,
min_addr, max_addr, nid);
if (ptr && size > 0)
page_init_poison(ptr, size);
@@ -1451,13 +1462,13 @@ void * __init memblock_virt_alloc_try_nid_raw(
}
/**
- * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
+ * memblock_alloc_try_nid_nopanic - allocate boot memory block
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
@@ -1467,7 +1478,7 @@ void * __init memblock_virt_alloc_try_nid_raw(
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
-void * __init memblock_virt_alloc_try_nid_nopanic(
+void * __init memblock_alloc_try_nid_nopanic(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
@@ -1478,7 +1489,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
- ptr = memblock_virt_alloc_internal(size, align,
+ ptr = memblock_alloc_internal(size, align,
min_addr, max_addr, nid);
if (ptr)
memset(ptr, 0, size);
@@ -1486,24 +1497,24 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
}
/**
- * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
+ * memblock_alloc_try_nid - allocate boot memory block with panicking
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
- * Public panicking version of memblock_virt_alloc_try_nid_nopanic()
+ * Public panicking version of memblock_alloc_try_nid_nopanic()
* which provides debug information (including caller info), if enabled,
* and panics if the request can not be satisfied.
*
* Return:
* Virtual address of allocated memory block on success, NULL on failure.
*/
-void * __init memblock_virt_alloc_try_nid(
+void * __init memblock_alloc_try_nid(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
@@ -1513,7 +1524,7 @@ void * __init memblock_virt_alloc_try_nid(
memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
- ptr = memblock_virt_alloc_internal(size, align,
+ ptr = memblock_alloc_internal(size, align,
min_addr, max_addr, nid);
if (ptr) {
memset(ptr, 0, size);
@@ -1524,14 +1535,13 @@ void * __init memblock_virt_alloc_try_nid(
__func__, (u64)size, (u64)align, nid, &min_addr, &max_addr);
return NULL;
}
-#endif
/**
* __memblock_free_early - free boot memory block
* @base: phys starting address of the boot memory block
* @size: size of the boot memory block in bytes
*
- * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
+ * Free boot memory block previously allocated by memblock_alloc_xx() API.
* The freeing memory will not be released to the buddy allocator.
*/
void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
@@ -1565,7 +1575,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
end = PFN_DOWN(base + size);
for (; cursor < end; cursor++) {
- __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
+ memblock_free_pages(pfn_to_page(cursor), cursor, 0);
totalram_pages++;
}
}
@@ -1879,6 +1889,100 @@ static int __init early_memblock(char *p)
}
early_param("memblock", early_memblock);
+static void __init __free_pages_memory(unsigned long start, unsigned long end)
+{
+ int order;
+
+ while (start < end) {
+ order = min(MAX_ORDER - 1UL, __ffs(start));
+
+ while (start + (1UL << order) > end)
+ order--;
+
+ memblock_free_pages(pfn_to_page(start), start, order);
+
+ start += (1UL << order);
+ }
+}
+
+static unsigned long __init __free_memory_core(phys_addr_t start,
+ phys_addr_t end)
+{
+ unsigned long start_pfn = PFN_UP(start);
+ unsigned long end_pfn = min_t(unsigned long,
+ PFN_DOWN(end), max_low_pfn);
+
+ if (start_pfn >= end_pfn)
+ return 0;
+
+ __free_pages_memory(start_pfn, end_pfn);
+
+ return end_pfn - start_pfn;
+}
+
+static unsigned long __init free_low_memory_core_early(void)
+{
+ unsigned long count = 0;
+ phys_addr_t start, end;
+ u64 i;
+
+ memblock_clear_hotplug(0, -1);
+
+ for_each_reserved_mem_region(i, &start, &end)
+ reserve_bootmem_region(start, end);
+
+ /*
+ * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
+ * because in some case like Node0 doesn't have RAM installed
+ * low ram will be on Node1
+ */
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
+ NULL)
+ count += __free_memory_core(start, end);
+
+ return count;
+}
+
+static int reset_managed_pages_done __initdata;
+
+void reset_node_managed_pages(pg_data_t *pgdat)
+{
+ struct zone *z;
+
+ for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+ z->managed_pages = 0;
+}
+
+void __init reset_all_zones_managed_pages(void)
+{
+ struct pglist_data *pgdat;
+
+ if (reset_managed_pages_done)
+ return;
+
+ for_each_online_pgdat(pgdat)
+ reset_node_managed_pages(pgdat);
+
+ reset_managed_pages_done = 1;
+}
+
+/**
+ * memblock_free_all - release free pages to the buddy allocator
+ *
+ * Return: the number of pages actually released.
+ */
+unsigned long __init memblock_free_all(void)
+{
+ unsigned long pages;
+
+ reset_all_zones_managed_pages();
+
+ pages = free_low_memory_core_early();
+ totalram_pages += pages;
+
+ return pages;
+}
+
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
static int memblock_debug_show(struct seq_file *m, void *private)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 54920cbc46bf..6e1469b80cb7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2593,7 +2593,7 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
struct mem_cgroup *memcg;
int ret = 0;
- if (memcg_kmem_bypass())
+ if (mem_cgroup_disabled() || memcg_kmem_bypass())
return 0;
memcg = get_mem_cgroup_from_current();
diff --git a/mm/memory.c b/mm/memory.c
index 072139579d89..4ad2d293ddc2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1537,10 +1537,15 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
* in may not match the PFN we have mapped if the
* mapped PFN is a writeable COW page. In the mkwrite
* case we are creating a writable PTE for a shared
- * mapping and we expect the PFNs to match.
+ * mapping and we expect the PFNs to match. If they
+ * don't match, we are likely racing with block
+ * allocation and mapping invalidation so just skip the
+ * update.
*/
- if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
+ if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
+ WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
goto out_unlock;
+ }
entry = *pte;
goto out_mkwrite;
} else
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7e6509a53d79..2b2b3ccbbfb5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -33,7 +33,6 @@
#include <linux/stop_machine.h>
#include <linux/hugetlb.h>
#include <linux/memblock.h>
-#include <linux/bootmem.h>
#include <linux/compaction.h>
#include <asm/tlbflush.h>
@@ -587,6 +586,7 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
for (i = 0; i < sections_to_remove; i++) {
unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
+ cond_resched();
ret = __remove_section(zone, __pfn_to_section(pfn), map_offset,
altmap);
map_offset = 0;
@@ -839,7 +839,6 @@ static struct zone * __meminit move_pfn_range(int online_type, int nid,
return zone;
}
-/* Must be protected by mem_hotplug_begin() or a device_lock */
int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
{
unsigned long flags;
@@ -851,6 +850,8 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
struct memory_notify arg;
struct memory_block *mem;
+ mem_hotplug_begin();
+
/*
* We can't use pfn_to_nid() because nid might be stored in struct page
* which is not yet initialized. Instead, we find nid from memory block.
@@ -915,6 +916,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
if (onlined_pages)
memory_notify(MEM_ONLINE, &arg);
+ mem_hotplug_done();
return 0;
failed_addition:
@@ -922,6 +924,7 @@ failed_addition:
(unsigned long long) pfn << PAGE_SHIFT,
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg);
+ mem_hotplug_done();
return ret;
}
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
@@ -1069,7 +1072,12 @@ static int online_memory_block(struct memory_block *mem, void *arg)
return device_online(&mem->dev);
}
-/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
+/*
+ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+ * and online/offline operations (triggered e.g. by sysfs).
+ *
+ * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
+ */
int __ref add_memory_resource(int nid, struct resource *res, bool online)
{
u64 start, size;
@@ -1121,26 +1129,26 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
/* create new memmap entry */
firmware_map_add_hotplug(start, start + size, "System RAM");
+ /* device_online() will take the lock when calling online_pages() */
+ mem_hotplug_done();
+
/* online pages if requested */
if (online)
walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
NULL, online_memory_block);
- goto out;
-
+ return ret;
error:
/* rollback pgdat allocation and others */
if (new_node)
rollback_node_hotadd(nid);
memblock_remove(start, size);
-
-out:
mem_hotplug_done();
return ret;
}
-EXPORT_SYMBOL_GPL(add_memory_resource);
-int __ref add_memory(int nid, u64 start, u64 size)
+/* requires device_hotplug_lock, see add_memory_resource() */
+int __ref __add_memory(int nid, u64 start, u64 size)
{
struct resource *res;
int ret;
@@ -1154,6 +1162,17 @@ int __ref add_memory(int nid, u64 start, u64 size)
release_memory_resource(res);
return ret;
}
+
+int add_memory(int nid, u64 start, u64 size)
+{
+ int rc;
+
+ lock_device_hotplug();
+ rc = __add_memory(nid, start, size);
+ unlock_device_hotplug();
+
+ return rc;
+}
EXPORT_SYMBOL_GPL(add_memory);
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -1540,10 +1559,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
return -EINVAL;
if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
return -EINVAL;
+
+ mem_hotplug_begin();
+
/* This makes hotplug much easier...and readable.
we assume this for now. .*/
- if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
+ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
+ &valid_end)) {
+ mem_hotplug_done();
return -EINVAL;
+ }
zone = page_zone(pfn_to_page(valid_start));
node = zone_to_nid(zone);
@@ -1552,8 +1577,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE, true);
- if (ret)
+ if (ret) {
+ mem_hotplug_done();
return ret;
+ }
arg.start_pfn = start_pfn;
arg.nr_pages = nr_pages;
@@ -1624,6 +1651,7 @@ repeat:
writeback_set_ratelimit();
memory_notify(MEM_OFFLINE, &arg);
+ mem_hotplug_done();
return 0;
failed_removal:
@@ -1633,10 +1661,10 @@ failed_removal:
memory_notify(MEM_CANCEL_OFFLINE, &arg);
/* pushback to free area */
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+ mem_hotplug_done();
return ret;
}
-/* Must be protected by mem_hotplug_begin() or a device_lock */
int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
{
return __offline_pages(start_pfn, start_pfn + nr_pages);
@@ -1807,7 +1835,7 @@ EXPORT_SYMBOL(try_offline_node);
* and online/offline operations before this call, as required by
* try_offline_node().
*/
-void __ref remove_memory(int nid, u64 start, u64 size)
+void __ref __remove_memory(int nid, u64 start, u64 size)
{
int ret;
@@ -1836,5 +1864,12 @@ void __ref remove_memory(int nid, u64 start, u64 size)
mem_hotplug_done();
}
+
+void remove_memory(int nid, u64 start, u64 size)
+{
+ lock_device_hotplug();
+ __remove_memory(nid, start, size);
+ unlock_device_hotplug();
+}
EXPORT_SYMBOL_GPL(remove_memory);
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cfd26d7e61a1..5837a067124d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1116,8 +1116,8 @@ static struct page *new_page(struct page *page, unsigned long start)
} else if (PageTransHuge(page)) {
struct page *thp;
- thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
- HPAGE_PMD_ORDER);
+ thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
+ address, numa_node_id());
if (!thp)
return NULL;
prep_transhuge_page(thp);
@@ -1662,7 +1662,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
* freeing by another task. It is the caller's responsibility to free the
* extra reference for shared policies.
*/
-static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol = __get_vma_policy(vma, addr);
@@ -2011,7 +2011,6 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
* @vma: Pointer to VMA or NULL if not available.
* @addr: Virtual Address of the allocation. Must be inside the VMA.
* @node: Which node to prefer for allocation (modulo policy).
- * @hugepage: for hugepages try only the preferred node if possible
*
* This function allocates a page from the kernel page pool and applies
* a NUMA policy associated with the VMA or the current process.
@@ -2022,7 +2021,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
*/
struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
- unsigned long addr, int node, bool hugepage)
+ unsigned long addr, int node)
{
struct mempolicy *pol;
struct page *page;
@@ -2040,32 +2039,6 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
goto out;
}
- if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
- int hpage_node = node;
-
- /*
- * For hugepage allocation and non-interleave policy which
- * allows the current node (or other explicitly preferred
- * node) we only try to allocate from the current/preferred
- * node and don't fall back to other nodes, as the cost of
- * remote accesses would likely offset THP benefits.
- *
- * If the policy is interleave, or does not allow the current
- * node in its nodemask, we allocate the standard way.
- */
- if (pol->mode == MPOL_PREFERRED &&
- !(pol->flags & MPOL_F_LOCAL))
- hpage_node = pol->v.preferred_node;
-
- nmask = policy_nodemask(gfp, pol);
- if (!nmask || node_isset(hpage_node, *nmask)) {
- mpol_cond_put(pol);
- page = __alloc_pages_node(hpage_node,
- gfp | __GFP_THISNODE, order);
- goto out;
- }
- }
-
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
deleted file mode 100644
index 439af3b765a7..000000000000
--- a/mm/nobootmem.c
+++ /dev/null
@@ -1,445 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bootmem - A boot-time physical memory allocator and configurator
- *
- * Copyright (C) 1999 Ingo Molnar
- * 1999 Kanoj Sarcar, SGI
- * 2008 Johannes Weiner
- *
- * Access to this subsystem has to be serialized externally (which is true
- * for the boot process anyway).
- */
-#include <linux/init.h>
-#include <linux/pfn.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/kmemleak.h>
-#include <linux/range.h>
-#include <linux/memblock.h>
-#include <linux/bootmem.h>
-
-#include <asm/bug.h>
-#include <asm/io.h>
-
-#include "internal.h"
-
-#ifndef CONFIG_HAVE_MEMBLOCK
-#error CONFIG_HAVE_MEMBLOCK not defined
-#endif
-
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data;
-EXPORT_SYMBOL(contig_page_data);
-#endif
-
-unsigned long max_low_pfn;
-unsigned long min_low_pfn;
-unsigned long max_pfn;
-unsigned long long max_possible_pfn;
-
-static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
- u64 goal, u64 limit)
-{
- void *ptr;
- u64 addr;
- enum memblock_flags flags = choose_memblock_flags();
-
- if (limit > memblock.current_limit)
- limit = memblock.current_limit;
-
-again:
- addr = memblock_find_in_range_node(size, align, goal, limit, nid,
- flags);
- if (!addr && (flags & MEMBLOCK_MIRROR)) {
- flags &= ~MEMBLOCK_MIRROR;
- pr_warn("Could not allocate %pap bytes of mirrored memory\n",
- &size);
- goto again;
- }
- if (!addr)
- return NULL;
-
- if (memblock_reserve(addr, size))
- return NULL;
-
- ptr = phys_to_virt(addr);
- memset(ptr, 0, size);
- /*
- * The min_count is set to 0 so that bootmem allocated blocks
- * are never reported as leaks.
- */
- kmemleak_alloc(ptr, size, 0, 0);
- return ptr;
-}
-
-/**
- * free_bootmem_late - free bootmem pages directly to page allocator
- * @addr: starting address of the range
- * @size: size of the range in bytes
- *
- * This is only useful when the bootmem allocator has already been torn
- * down, but we are still initializing the system. Pages are given directly
- * to the page allocator, no bootmem metadata is updated because it is gone.
- */
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
-{
- unsigned long cursor, end;
-
- kmemleak_free_part_phys(addr, size);
-
- cursor = PFN_UP(addr);
- end = PFN_DOWN(addr + size);
-
- for (; cursor < end; cursor++) {
- __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
- totalram_pages++;
- }
-}
-
-static void __init __free_pages_memory(unsigned long start, unsigned long end)
-{
- int order;
-
- while (start < end) {
- order = min(MAX_ORDER - 1UL, __ffs(start));
-
- while (start + (1UL << order) > end)
- order--;
-
- __free_pages_bootmem(pfn_to_page(start), start, order);
-
- start += (1UL << order);
- }
-}
-
-static unsigned long __init __free_memory_core(phys_addr_t start,
- phys_addr_t end)
-{
- unsigned long start_pfn = PFN_UP(start);
- unsigned long end_pfn = min_t(unsigned long,
- PFN_DOWN(end), max_low_pfn);
-
- if (start_pfn >= end_pfn)
- return 0;
-
- __free_pages_memory(start_pfn, end_pfn);
-
- return end_pfn - start_pfn;
-}
-
-static unsigned long __init free_low_memory_core_early(void)
-{
- unsigned long count = 0;
- phys_addr_t start, end;
- u64 i;
-
- memblock_clear_hotplug(0, -1);
-
- for_each_reserved_mem_region(i, &start, &end)
- reserve_bootmem_region(start, end);
-
- /*
- * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
- * because in some case like Node0 doesn't have RAM installed
- * low ram will be on Node1
- */
- for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
- NULL)
- count += __free_memory_core(start, end);
-
- return count;
-}
-
-static int reset_managed_pages_done __initdata;
-
-void reset_node_managed_pages(pg_data_t *pgdat)
-{
- struct zone *z;
-
- for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
- z->managed_pages = 0;
-}
-
-void __init reset_all_zones_managed_pages(void)
-{
- struct pglist_data *pgdat;
-
- if (reset_managed_pages_done)
- return;
-
- for_each_online_pgdat(pgdat)
- reset_node_managed_pages(pgdat);
-
- reset_managed_pages_done = 1;
-}
-
-/**
- * free_all_bootmem - release free pages to the buddy allocator
- *
- * Return: the number of pages actually released.
- */
-unsigned long __init free_all_bootmem(void)
-{
- unsigned long pages;
-
- reset_all_zones_managed_pages();
-
- pages = free_low_memory_core_early();
- totalram_pages += pages;
-
- return pages;
-}
-
-/**
- * free_bootmem_node - mark a page range as usable
- * @pgdat: node the range resides on
- * @physaddr: starting physical address of the range
- * @size: size of the range in bytes
- *
- * Partial pages will be considered reserved and left as they are.
- *
- * The range must reside completely on the specified node.
- */
-void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
- unsigned long size)
-{
- memblock_free(physaddr, size);
-}
-
-/**
- * free_bootmem - mark a page range as usable
- * @addr: starting physical address of the range
- * @size: size of the range in bytes
- *
- * Partial pages will be considered reserved and left as they are.
- *
- * The range must be contiguous but may span node boundaries.
- */
-void __init free_bootmem(unsigned long addr, unsigned long size)
-{
- memblock_free(addr, size);
-}
-
-static void * __init ___alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc(size, GFP_NOWAIT);
-
-restart:
-
- ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
-
- if (ptr)
- return ptr;
-
- if (goal != 0) {
- goal = 0;
- goto restart;
- }
-
- return NULL;
-}
-
-/**
- * __alloc_bootmem_nopanic - allocate boot memory without panicking
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may happen on any node in the system.
- *
- * Return: address of the allocated region or %NULL on failure.
- */
-void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- unsigned long limit = -1UL;
-
- return ___alloc_bootmem_nopanic(size, align, goal, limit);
-}
-
-static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
-
- if (mem)
- return mem;
- /*
- * Whoops, we cannot satisfy the allocation request.
- */
- pr_alert("bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of memory");
- return NULL;
-}
-
-/**
- * __alloc_bootmem - allocate boot memory
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may happen on any node in the system.
- *
- * The function panics if the request can not be satisfied.
- *
- * Return: address of the allocated region.
- */
-void * __init __alloc_bootmem(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- unsigned long limit = -1UL;
-
- return ___alloc_bootmem(size, align, goal, limit);
-}
-
-void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
-again:
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, limit);
- if (ptr)
- return ptr;
-
- ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
- goal, limit);
- if (ptr)
- return ptr;
-
- if (goal) {
- goal = 0;
- goto again;
- }
-
- return NULL;
-}
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
-}
-
-static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal,
- unsigned long limit)
-{
- void *ptr;
-
- ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
- if (ptr)
- return ptr;
-
- pr_alert("bootmem alloc of %lu bytes failed!\n", size);
- panic("Out of memory");
- return NULL;
-}
-
-/**
- * __alloc_bootmem_node - allocate boot memory from a specific node
- * @pgdat: node to allocate from
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may fall back to any node in the system if the specified node
- * can not hold the requested memory.
- *
- * The function panics if the request can not be satisfied.
- *
- * Return: address of the allocated region.
- */
-void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
-}
-
-void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- return __alloc_bootmem_node(pgdat, size, align, goal);
-}
-
-
-/**
- * __alloc_bootmem_low - allocate low boot memory
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may happen on any node in the system.
- *
- * The function panics if the request can not be satisfied.
- *
- * Return: address of the allocated region.
- */
-void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
- unsigned long goal)
-{
- return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
-}
-
-void * __init __alloc_bootmem_low_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal)
-{
- return ___alloc_bootmem_nopanic(size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
-}
-
-/**
- * __alloc_bootmem_low_node - allocate low boot memory from a specific node
- * @pgdat: node to allocate from
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may fall back to any node in the system if the specified node
- * can not hold the requested memory.
- *
- * The function panics if the request can not be satisfied.
- *
- * Return: address of the allocated region.
- */
-void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- return ___alloc_bootmem_node(pgdat, size, align, goal,
- ARCH_LOW_ADDRESS_LIMIT);
-}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 863d46da6586..a919ba5cb3c8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -20,7 +20,6 @@
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/jiffies.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
@@ -1339,7 +1338,7 @@ meminit_pfn_in_nid(unsigned long pfn, int node,
#endif
-void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
+void __init memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order)
{
if (early_page_uninitialised(pfn))
@@ -5476,7 +5475,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
/*
* Initially all pages are reserved - free ones are freed
- * up by free_all_bootmem() once the early boot process is
+ * up by memblock_free_all() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
@@ -6209,7 +6208,7 @@ static void __ref setup_usemap(struct pglist_data *pgdat,
zone->pageblock_flags = NULL;
if (usemapsize)
zone->pageblock_flags =
- memblock_virt_alloc_node_nopanic(usemapsize,
+ memblock_alloc_node_nopanic(usemapsize,
pgdat->node_id);
}
#else
@@ -6439,7 +6438,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
- map = memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
+ map = memblock_alloc_node_nopanic(size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
}
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
@@ -6508,8 +6507,7 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
free_area_init_core(pgdat);
}
-#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
-
+#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
/*
* Zero all valid struct pages in range [spfn, epfn), return number of struct
* pages zeroed
@@ -6569,7 +6567,7 @@ void __init zero_resv_unavail(void)
if (pgcnt)
pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
}
-#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */
+#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -7712,9 +7710,11 @@ void *__init alloc_large_system_hash(const char *tablename,
size = bucketsize << log2qty;
if (flags & HASH_EARLY) {
if (flags & HASH_ZERO)
- table = memblock_virt_alloc_nopanic(size, 0);
+ table = memblock_alloc_nopanic(size,
+ SMP_CACHE_BYTES);
else
- table = memblock_virt_alloc_raw(size, 0);
+ table = memblock_alloc_raw(size,
+ SMP_CACHE_BYTES);
} else if (hashdist) {
table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
} else {
diff --git a/mm/page_ext.c b/mm/page_ext.c
index a9826da84ccb..ae44f7adbe07 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/mmzone.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/page_ext.h>
#include <linux/memory.h>
#include <linux/vmalloc.h>
@@ -161,9 +161,9 @@ static int __init alloc_node_page_ext(int nid)
table_size = get_entry_size() * nr_pages;
- base = memblock_virt_alloc_try_nid_nopanic(
+ base = memblock_alloc_try_nid_nopanic(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
if (!base)
return -ENOMEM;
NODE_DATA(nid)->node_page_ext = base;
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 6302bc62c27d..b9e4b42b33ab 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
diff --git a/mm/page_io.c b/mm/page_io.c
index a451ffa9491c..d4d1c89bcddd 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -294,7 +294,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
};
struct iov_iter from;
- iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE);
+ iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
init_sync_kiocb(&kiocb, swap_file);
kiocb.ki_pos = page_file_offset(page);
@@ -339,7 +339,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
goto out;
}
bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
- bio_associate_blkg_from_page(bio, page);
+ bio_associate_blkcg_from_page(bio, page);
count_swpout_vm_event(page);
set_page_writeback(page);
unlock_page(page);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index d80adfe702d3..87bc0dfdb52b 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -3,7 +3,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/stacktrace.h>
#include <linux/page_owner.h>
#include <linux/jump_label.h>
diff --git a/mm/page_poison.c b/mm/page_poison.c
index aa2b3d34e8ea..f0c15e9017c0 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -17,11 +17,16 @@ static int __init early_page_poison_param(char *buf)
}
early_param("page_poison", early_page_poison_param);
+/**
+ * page_poisoning_enabled - check if page poisoning is enabled
+ *
+ * Return true if page poisoning is enabled, or false if not.
+ */
bool page_poisoning_enabled(void)
{
/*
* Assumes that debug_pagealloc_enabled is set before
- * free_all_bootmem.
+ * memblock_free_all.
* Page poisoning is debug page alloc for some arches. If
* either of those options are enabled, enable poisoning.
*/
@@ -29,6 +34,7 @@ bool page_poisoning_enabled(void)
(!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
debug_pagealloc_enabled()));
}
+EXPORT_SYMBOL_GPL(page_poisoning_enabled);
static void poison_page(struct page *page)
{
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index ae3c2a35d61b..11df03e71288 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -21,7 +21,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
if (!is_swap_pte(*pvmw->pte))
return false;
} else {
- if (!pte_present(*pvmw->pte))
+ /*
+ * We get here when we are trying to unmap a private
+ * device page from the process address space. Such
+ * page is not CPU accessible and thus is mapped as
+ * a special swap entry, nonetheless it still does
+ * count as a valid regular mapping for the page (and
+ * is accounted as such in page maps count).
+ *
+ * So handle this special case as if it was a normal
+ * page mapping ie lock CPU page table and returns
+ * true.
+ *
+ * For more details on device private memory see HMM
+ * (include/linux/hmm.h or mm/hmm.c).
+ */
+ if (is_swap_pte(*pvmw->pte)) {
+ swp_entry_t entry;
+
+ /* Handle un-addressable ZONE_DEVICE memory */
+ entry = pte_to_swp_entry(*pvmw->pte);
+ if (!is_device_private_entry(entry))
+ return false;
+ } else if (!pte_present(*pvmw->pte))
return false;
}
}
diff --git a/mm/percpu.c b/mm/percpu.c
index 4b90682623e9..db86282fd024 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -65,7 +65,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/lcm.h>
#include <linux/list.h>
@@ -1101,9 +1101,9 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
region_size = ALIGN(start_offset + map_size, lcm_align);
/* allocate chunk */
- chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) +
- BITS_TO_LONGS(region_size >> PAGE_SHIFT),
- 0);
+ chunk = memblock_alloc(sizeof(struct pcpu_chunk) +
+ BITS_TO_LONGS(region_size >> PAGE_SHIFT),
+ SMP_CACHE_BYTES);
INIT_LIST_HEAD(&chunk->list);
@@ -1114,12 +1114,12 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
chunk->nr_pages = region_size >> PAGE_SHIFT;
region_bits = pcpu_chunk_map_bits(chunk);
- chunk->alloc_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits) *
- sizeof(chunk->alloc_map[0]), 0);
- chunk->bound_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits + 1) *
- sizeof(chunk->bound_map[0]), 0);
- chunk->md_blocks = memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk) *
- sizeof(chunk->md_blocks[0]), 0);
+ chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]),
+ SMP_CACHE_BYTES);
+ chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]),
+ SMP_CACHE_BYTES);
+ chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]),
+ SMP_CACHE_BYTES);
pcpu_init_md_blocks(chunk);
/* manage populated page bitmap */
@@ -1888,7 +1888,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
__alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
- ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
+ ptr = memblock_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
if (!ptr)
return NULL;
ai = ptr;
@@ -2075,12 +2075,14 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
/* process group information and build config tables accordingly */
- group_offsets = memblock_virt_alloc(ai->nr_groups *
- sizeof(group_offsets[0]), 0);
- group_sizes = memblock_virt_alloc(ai->nr_groups *
- sizeof(group_sizes[0]), 0);
- unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
- unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
+ group_offsets = memblock_alloc(ai->nr_groups * sizeof(group_offsets[0]),
+ SMP_CACHE_BYTES);
+ group_sizes = memblock_alloc(ai->nr_groups * sizeof(group_sizes[0]),
+ SMP_CACHE_BYTES);
+ unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]),
+ SMP_CACHE_BYTES);
+ unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]),
+ SMP_CACHE_BYTES);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
@@ -2144,8 +2146,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
* empty chunks.
*/
pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
- pcpu_slot = memblock_virt_alloc(
- pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
+ pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]),
+ SMP_CACHE_BYTES);
for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_slot[i]);
@@ -2458,7 +2460,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
- areas = memblock_virt_alloc_nopanic(areas_size, 0);
+ areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES);
if (!areas) {
rc = -ENOMEM;
goto out_free;
@@ -2589,7 +2591,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
BUG_ON(ai->nr_groups != 1);
upa = ai->alloc_size/ai->unit_size;
nr_g0_units = roundup(num_possible_cpus(), upa);
- if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
+ if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
pcpu_free_alloc_info(ai);
return -EINVAL;
}
@@ -2599,7 +2601,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
/* unaligned allocations can't be freed, round up to page size */
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
sizeof(pages[0]));
- pages = memblock_virt_alloc(pages_size, 0);
+ pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
/* allocate pages */
j = 0;
@@ -2688,7 +2690,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
size_t align)
{
- return memblock_virt_alloc_from_nopanic(
+ return memblock_alloc_from_nopanic(
size, align, __pa(MAX_DMA_ADDRESS));
}
@@ -2737,7 +2739,7 @@ void __init setup_per_cpu_areas(void)
void *fc;
ai = pcpu_alloc_alloc_info(1, 1);
- fc = memblock_virt_alloc_from_nopanic(unit_size,
+ fc = memblock_alloc_from_nopanic(unit_size,
PAGE_SIZE,
__pa(MAX_DMA_ADDRESS));
if (!ai || !fc)
diff --git a/mm/shmem.c b/mm/shmem.c
index 56bf122e0bb4..ea26d7a0342d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1435,7 +1435,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
shmem_pseudo_vma_init(&pvma, info, hindex);
page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
- HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
+ HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
shmem_pseudo_vma_destroy(&pvma);
if (page)
prep_transhuge_page(page);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 8301293331a2..7fec05796796 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -20,7 +20,7 @@
*/
#include <linux/mm.h>
#include <linux/mmzone.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/memremap.h>
#include <linux/highmem.h>
#include <linux/slab.h>
@@ -42,8 +42,8 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long align,
unsigned long goal)
{
- return memblock_virt_alloc_try_nid_raw(size, align, goal,
- BOOTMEM_ALLOC_ACCESSIBLE, node);
+ return memblock_alloc_try_nid_raw(size, align, goal,
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
}
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
diff --git a/mm/sparse.c b/mm/sparse.c
index 67ad061f7fb8..33307fc05c4d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -5,7 +5,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/highmem.h>
#include <linux/export.h>
@@ -68,7 +68,8 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
if (slab_is_available())
section = kzalloc_node(array_size, GFP_KERNEL, nid);
else
- section = memblock_virt_alloc_node(array_size, nid);
+ section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
+ nid);
return section;
}
@@ -216,7 +217,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
align = 1 << (INTERNODE_CACHE_SHIFT);
- mem_section = memblock_virt_alloc(size, align);
+ mem_section = memblock_alloc(size, align);
}
#endif
@@ -306,7 +307,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
limit = goal + (1UL << PA_SECTION_SHIFT);
nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
again:
- p = memblock_virt_alloc_try_nid_nopanic(size,
+ p = memblock_alloc_try_nid_nopanic(size,
SMP_CACHE_BYTES, goal, limit,
nid);
if (!p && limit) {
@@ -362,7 +363,7 @@ static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
unsigned long size)
{
- return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
+ return memblock_alloc_node_nopanic(size, pgdat->node_id);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -391,9 +392,9 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
if (map)
return map;
- map = memblock_virt_alloc_try_nid(size,
+ map = memblock_alloc_try_nid(size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
return map;
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -405,9 +406,9 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
{
WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
sparsemap_buf =
- memblock_virt_alloc_try_nid_raw(size, PAGE_SIZE,
+ memblock_alloc_try_nid_raw(size, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
sparsemap_buf_end = sparsemap_buf + size;
}
diff --git a/net/9p/client.c b/net/9p/client.c
index 5f23e18eecc0..2c9a17b9b46b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -2066,7 +2066,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
struct kvec kv = {.iov_base = data, .iov_len = count};
struct iov_iter to;
- iov_iter_kvec(&to, READ | ITER_KVEC, &kv, 1, count);
+ iov_iter_kvec(&to, READ, &kv, 1, count);
p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
fid->fid, (unsigned long long) offset, count);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index eb596c2ed546..b1d39cabf125 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -329,7 +329,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
if (!iov_iter_count(data))
return 0;
- if (!(data->type & ITER_KVEC)) {
+ if (!iov_iter_is_kvec(data)) {
int n;
/*
* We allow only p9_max_pages pinned. We wait for the
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 4e2576fc0c59..828e87fe8027 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -467,7 +467,7 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
iv.iov_len = skb->len;
memset(&msg, 0, sizeof(msg));
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len);
+ iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
err = l2cap_chan_send(chan, &msg, skb->len);
if (err > 0) {
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 51c2cf2d8923..58fc6333d412 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -63,7 +63,7 @@ static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *dat
memset(&msg, 0, sizeof(msg));
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, total_len);
+ iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, total_len);
l2cap_chan_send(chan, &msg, total_len);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index a1c1b7e8a45c..c822e626761b 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -622,7 +622,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
memset(&msg, 0, sizeof(msg));
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iv, 2, 1 + len);
+ iov_iter_kvec(&msg.msg_iter, WRITE, iv, 2, 1 + len);
l2cap_chan_send(chan, &msg, 1 + len);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 0a187196aeed..57fcc6b4bf6e 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -156,7 +156,6 @@ static bool con_flag_test_and_set(struct ceph_connection *con,
/* Slab caches for frequently-allocated structures */
static struct kmem_cache *ceph_msg_cache;
-static struct kmem_cache *ceph_msg_data_cache;
/* static tag bytes (protocol control messages) */
static char tag_msg = CEPH_MSGR_TAG_MSG;
@@ -235,23 +234,11 @@ static int ceph_msgr_slab_init(void)
if (!ceph_msg_cache)
return -ENOMEM;
- BUG_ON(ceph_msg_data_cache);
- ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0);
- if (ceph_msg_data_cache)
- return 0;
-
- kmem_cache_destroy(ceph_msg_cache);
- ceph_msg_cache = NULL;
-
- return -ENOMEM;
+ return 0;
}
static void ceph_msgr_slab_exit(void)
{
- BUG_ON(!ceph_msg_data_cache);
- kmem_cache_destroy(ceph_msg_data_cache);
- ceph_msg_data_cache = NULL;
-
BUG_ON(!ceph_msg_cache);
kmem_cache_destroy(ceph_msg_cache);
ceph_msg_cache = NULL;
@@ -526,7 +513,7 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
if (!buf)
msg.msg_flags |= MSG_TRUNC;
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len);
+ iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len);
r = sock_recvmsg(sock, &msg, msg.msg_flags);
if (r == -EAGAIN)
r = 0;
@@ -545,7 +532,7 @@ static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
int r;
BUG_ON(page_offset + length > PAGE_SIZE);
- iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length);
+ iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length);
r = sock_recvmsg(sock, &msg, msg.msg_flags);
if (r == -EAGAIN)
r = 0;
@@ -607,7 +594,7 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
else
msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
- iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size);
+ iov_iter_bvec(&msg.msg_iter, WRITE, &bvec, 1, size);
ret = sock_sendmsg(sock, &msg);
if (ret == -EAGAIN)
ret = 0;
@@ -1141,16 +1128,13 @@ static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length)
{
struct ceph_msg_data_cursor *cursor = &msg->cursor;
- struct ceph_msg_data *data;
BUG_ON(!length);
BUG_ON(length > msg->data_length);
- BUG_ON(list_empty(&msg->data));
+ BUG_ON(!msg->num_data_items);
- cursor->data_head = &msg->data;
cursor->total_resid = length;
- data = list_first_entry(&msg->data, struct ceph_msg_data, links);
- cursor->data = data;
+ cursor->data = msg->data;
__ceph_msg_data_cursor_init(cursor);
}
@@ -1231,8 +1215,7 @@ static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
if (!cursor->resid && cursor->total_resid) {
WARN_ON(!cursor->last_piece);
- BUG_ON(list_is_last(&cursor->data->links, cursor->data_head));
- cursor->data = list_next_entry(cursor->data, links);
+ cursor->data++;
__ceph_msg_data_cursor_init(cursor);
new_piece = true;
}
@@ -1248,9 +1231,6 @@ static size_t sizeof_footer(struct ceph_connection *con)
static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
{
- BUG_ON(!msg);
- BUG_ON(!data_len);
-
/* Initialize data cursor */
ceph_msg_data_cursor_init(msg, (size_t)data_len);
@@ -1590,7 +1570,7 @@ static int write_partial_message_data(struct ceph_connection *con)
dout("%s %p msg %p\n", __func__, con, msg);
- if (list_empty(&msg->data))
+ if (!msg->num_data_items)
return -EINVAL;
/*
@@ -2347,8 +2327,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
u32 crc = 0;
int ret;
- BUG_ON(!msg);
- if (list_empty(&msg->data))
+ if (!msg->num_data_items)
return -EIO;
if (do_datacrc)
@@ -3256,32 +3235,16 @@ bool ceph_con_keepalive_expired(struct ceph_connection *con,
return false;
}
-static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
+static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
{
- struct ceph_msg_data *data;
-
- if (WARN_ON(!ceph_msg_data_type_valid(type)))
- return NULL;
-
- data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
- if (!data)
- return NULL;
-
- data->type = type;
- INIT_LIST_HEAD(&data->links);
-
- return data;
+ BUG_ON(msg->num_data_items >= msg->max_data_items);
+ return &msg->data[msg->num_data_items++];
}
static void ceph_msg_data_destroy(struct ceph_msg_data *data)
{
- if (!data)
- return;
-
- WARN_ON(!list_empty(&data->links));
if (data->type == CEPH_MSG_DATA_PAGELIST)
ceph_pagelist_release(data->pagelist);
- kmem_cache_free(ceph_msg_data_cache, data);
}
void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
@@ -3292,13 +3255,12 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
BUG_ON(!pages);
BUG_ON(!length);
- data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES);
- BUG_ON(!data);
+ data = ceph_msg_data_add(msg);
+ data->type = CEPH_MSG_DATA_PAGES;
data->pages = pages;
data->length = length;
data->alignment = alignment & ~PAGE_MASK;
- list_add_tail(&data->links, &msg->data);
msg->data_length += length;
}
EXPORT_SYMBOL(ceph_msg_data_add_pages);
@@ -3311,11 +3273,11 @@ void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
BUG_ON(!pagelist);
BUG_ON(!pagelist->length);
- data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST);
- BUG_ON(!data);
+ data = ceph_msg_data_add(msg);
+ data->type = CEPH_MSG_DATA_PAGELIST;
+ refcount_inc(&pagelist->refcnt);
data->pagelist = pagelist;
- list_add_tail(&data->links, &msg->data);
msg->data_length += pagelist->length;
}
EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
@@ -3326,12 +3288,11 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
{
struct ceph_msg_data *data;
- data = ceph_msg_data_create(CEPH_MSG_DATA_BIO);
- BUG_ON(!data);
+ data = ceph_msg_data_add(msg);
+ data->type = CEPH_MSG_DATA_BIO;
data->bio_pos = *bio_pos;
data->bio_length = length;
- list_add_tail(&data->links, &msg->data);
msg->data_length += length;
}
EXPORT_SYMBOL(ceph_msg_data_add_bio);
@@ -3342,11 +3303,10 @@ void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
{
struct ceph_msg_data *data;
- data = ceph_msg_data_create(CEPH_MSG_DATA_BVECS);
- BUG_ON(!data);
+ data = ceph_msg_data_add(msg);
+ data->type = CEPH_MSG_DATA_BVECS;
data->bvec_pos = *bvec_pos;
- list_add_tail(&data->links, &msg->data);
msg->data_length += bvec_pos->iter.bi_size;
}
EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
@@ -3355,8 +3315,8 @@ EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
* construct a new message with given type, size
* the new msg has a ref count of 1.
*/
-struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
- bool can_fail)
+struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
+ gfp_t flags, bool can_fail)
{
struct ceph_msg *m;
@@ -3370,7 +3330,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
INIT_LIST_HEAD(&m->list_head);
kref_init(&m->kref);
- INIT_LIST_HEAD(&m->data);
/* front */
if (front_len) {
@@ -3385,6 +3344,15 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
}
m->front_alloc_len = m->front.iov_len = front_len;
+ if (max_data_items) {
+ m->data = kmalloc_array(max_data_items, sizeof(*m->data),
+ flags);
+ if (!m->data)
+ goto out2;
+
+ m->max_data_items = max_data_items;
+ }
+
dout("ceph_msg_new %p front %d\n", m, front_len);
return m;
@@ -3401,6 +3369,13 @@ out:
}
return NULL;
}
+EXPORT_SYMBOL(ceph_msg_new2);
+
+struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
+ bool can_fail)
+{
+ return ceph_msg_new2(type, front_len, 0, flags, can_fail);
+}
EXPORT_SYMBOL(ceph_msg_new);
/*
@@ -3496,13 +3471,14 @@ static void ceph_msg_free(struct ceph_msg *m)
{
dout("%s %p\n", __func__, m);
kvfree(m->front.iov_base);
+ kfree(m->data);
kmem_cache_free(ceph_msg_cache, m);
}
static void ceph_msg_release(struct kref *kref)
{
struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
- struct ceph_msg_data *data, *next;
+ int i;
dout("%s %p\n", __func__, m);
WARN_ON(!list_empty(&m->list_head));
@@ -3515,11 +3491,8 @@ static void ceph_msg_release(struct kref *kref)
m->middle = NULL;
}
- list_for_each_entry_safe(data, next, &m->data, links) {
- list_del_init(&data->links);
- ceph_msg_data_destroy(data);
- }
- m->data_length = 0;
+ for (i = 0; i < m->num_data_items; i++)
+ ceph_msg_data_destroy(&m->data[i]);
if (m->pool)
ceph_msgpool_put(m->pool, m);
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index 72571535883f..e3ecb80cd182 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -14,7 +14,8 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
struct ceph_msgpool *pool = arg;
struct ceph_msg *msg;
- msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true);
+ msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items,
+ gfp_mask, true);
if (!msg) {
dout("msgpool_alloc %s failed\n", pool->name);
} else {
@@ -35,11 +36,13 @@ static void msgpool_free(void *element, void *arg)
}
int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
- int front_len, int size, bool blocking, const char *name)
+ int front_len, int max_data_items, int size,
+ const char *name)
{
dout("msgpool %s init\n", name);
pool->type = type;
pool->front_len = front_len;
+ pool->max_data_items = max_data_items;
pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
if (!pool->pool)
return -ENOMEM;
@@ -53,18 +56,21 @@ void ceph_msgpool_destroy(struct ceph_msgpool *pool)
mempool_destroy(pool->pool);
}
-struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
- int front_len)
+struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
+ int max_data_items)
{
struct ceph_msg *msg;
- if (front_len > pool->front_len) {
- dout("msgpool_get %s need front %d, pool size is %d\n",
- pool->name, front_len, pool->front_len);
- WARN_ON(1);
+ if (front_len > pool->front_len ||
+ max_data_items > pool->max_data_items) {
+ pr_warn_ratelimited("%s need %d/%d, pool %s has %d/%d\n",
+ __func__, front_len, max_data_items, pool->name,
+ pool->front_len, pool->max_data_items);
+ WARN_ON_ONCE(1);
/* try to alloc a fresh message */
- return ceph_msg_new(pool->type, front_len, GFP_NOFS, false);
+ return ceph_msg_new2(pool->type, front_len, max_data_items,
+ GFP_NOFS, false);
}
msg = mempool_alloc(pool->pool, GFP_NOFS);
@@ -80,6 +86,9 @@ void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
msg->front.iov_len = pool->front_len;
msg->hdr.front_len = cpu_to_le32(pool->front_len);
+ msg->data_length = 0;
+ msg->num_data_items = 0;
+
kref_init(&msg->kref); /* retake single ref */
mempool_free(msg, pool->pool);
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 60934bd8796c..d23a9f81f3d7 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -126,6 +126,9 @@ static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
}
+/*
+ * Consumes @pages if @own_pages is true.
+ */
static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
struct page **pages, u64 length, u32 alignment,
bool pages_from_pool, bool own_pages)
@@ -138,6 +141,9 @@ static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
osd_data->own_pages = own_pages;
}
+/*
+ * Consumes a ref on @pagelist.
+ */
static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
struct ceph_pagelist *pagelist)
{
@@ -362,6 +368,8 @@ static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
num_pages = calc_pages_for((u64)osd_data->alignment,
(u64)osd_data->length);
ceph_release_page_vector(osd_data->pages, num_pages);
+ } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
+ ceph_pagelist_release(osd_data->pagelist);
}
ceph_osd_data_init(osd_data);
}
@@ -402,6 +410,9 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
case CEPH_OSD_OP_LIST_WATCHERS:
ceph_osd_data_release(&op->list_watchers.response_data);
break;
+ case CEPH_OSD_OP_COPY_FROM:
+ ceph_osd_data_release(&op->copy_from.osd_data);
+ break;
default:
break;
}
@@ -606,12 +617,15 @@ static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
}
-int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
+static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
+ int num_request_data_items,
+ int num_reply_data_items)
{
struct ceph_osd_client *osdc = req->r_osdc;
struct ceph_msg *msg;
int msg_size;
+ WARN_ON(req->r_request || req->r_reply);
WARN_ON(ceph_oid_empty(&req->r_base_oid));
WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
@@ -633,9 +647,11 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
msg_size += 4 + 8; /* retry_attempt, features */
if (req->r_mempool)
- msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
+ msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
+ num_request_data_items);
else
- msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
+ msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
+ num_request_data_items, gfp, true);
if (!msg)
return -ENOMEM;
@@ -648,9 +664,11 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
if (req->r_mempool)
- msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
+ msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
+ num_reply_data_items);
else
- msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
+ msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
+ num_reply_data_items, gfp, true);
if (!msg)
return -ENOMEM;
@@ -658,7 +676,6 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
return 0;
}
-EXPORT_SYMBOL(ceph_osdc_alloc_messages);
static bool osd_req_opcode_valid(u16 opcode)
{
@@ -671,6 +688,65 @@ __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
}
}
+static void get_num_data_items(struct ceph_osd_request *req,
+ int *num_request_data_items,
+ int *num_reply_data_items)
+{
+ struct ceph_osd_req_op *op;
+
+ *num_request_data_items = 0;
+ *num_reply_data_items = 0;
+
+ for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
+ switch (op->op) {
+ /* request */
+ case CEPH_OSD_OP_WRITE:
+ case CEPH_OSD_OP_WRITEFULL:
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ case CEPH_OSD_OP_COPY_FROM:
+ *num_request_data_items += 1;
+ break;
+
+ /* reply */
+ case CEPH_OSD_OP_STAT:
+ case CEPH_OSD_OP_READ:
+ case CEPH_OSD_OP_LIST_WATCHERS:
+ *num_reply_data_items += 1;
+ break;
+
+ /* both */
+ case CEPH_OSD_OP_NOTIFY:
+ *num_request_data_items += 1;
+ *num_reply_data_items += 1;
+ break;
+ case CEPH_OSD_OP_CALL:
+ *num_request_data_items += 2;
+ *num_reply_data_items += 1;
+ break;
+
+ default:
+ WARN_ON(!osd_req_opcode_valid(op->op));
+ break;
+ }
+ }
+}
+
+/*
+ * oid, oloc and OSD op opcode(s) must be filled in before this function
+ * is called.
+ */
+int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
+{
+ int num_request_data_items, num_reply_data_items;
+
+ get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
+ return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
+ num_reply_data_items);
+}
+EXPORT_SYMBOL(ceph_osdc_alloc_messages);
+
/*
* This is an osd op init function for opcodes that have no data or
* other information associated with them. It also serves as a
@@ -767,22 +843,19 @@ void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
EXPORT_SYMBOL(osd_req_op_extent_dup_last);
int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
- u16 opcode, const char *class, const char *method)
+ const char *class, const char *method)
{
- struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
- opcode, 0);
+ struct ceph_osd_req_op *op;
struct ceph_pagelist *pagelist;
size_t payload_len = 0;
size_t size;
- BUG_ON(opcode != CEPH_OSD_OP_CALL);
+ op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
- pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
+ pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!pagelist)
return -ENOMEM;
- ceph_pagelist_init(pagelist);
-
op->cls.class_name = class;
size = strlen(class);
BUG_ON(size > (size_t) U8_MAX);
@@ -815,12 +888,10 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
- pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+ pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!pagelist)
return -ENOMEM;
- ceph_pagelist_init(pagelist);
-
payload_len = strlen(name);
op->xattr.name_len = payload_len;
ceph_pagelist_append(pagelist, name, payload_len);
@@ -900,12 +971,6 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
static u32 osd_req_encode_op(struct ceph_osd_op *dst,
const struct ceph_osd_req_op *src)
{
- if (WARN_ON(!osd_req_opcode_valid(src->op))) {
- pr_err("unrecognized osd opcode %d\n", src->op);
-
- return 0;
- }
-
switch (src->op) {
case CEPH_OSD_OP_STAT:
break;
@@ -955,6 +1020,14 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst,
case CEPH_OSD_OP_CREATE:
case CEPH_OSD_OP_DELETE:
break;
+ case CEPH_OSD_OP_COPY_FROM:
+ dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
+ dst->copy_from.src_version =
+ cpu_to_le64(src->copy_from.src_version);
+ dst->copy_from.flags = src->copy_from.flags;
+ dst->copy_from.src_fadvise_flags =
+ cpu_to_le32(src->copy_from.src_fadvise_flags);
+ break;
default:
pr_err("unsupported osd opcode %s\n",
ceph_osd_op_name(src->op));
@@ -1038,7 +1111,15 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
if (flags & CEPH_OSD_FLAG_WRITE)
req->r_data_offset = off;
- r = ceph_osdc_alloc_messages(req, GFP_NOFS);
+ if (num_ops > 1)
+ /*
+ * This is a special case for ceph_writepages_start(), but it
+ * also covers ceph_uninline_data(). If more multi-op request
+ * use cases emerge, we will need a separate helper.
+ */
+ r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
+ else
+ r = ceph_osdc_alloc_messages(req, GFP_NOFS);
if (r)
goto fail;
@@ -1845,48 +1926,55 @@ static bool should_plug_request(struct ceph_osd_request *req)
return true;
}
-static void setup_request_data(struct ceph_osd_request *req,
- struct ceph_msg *msg)
+/*
+ * Keep get_num_data_items() in sync with this function.
+ */
+static void setup_request_data(struct ceph_osd_request *req)
{
- u32 data_len = 0;
- int i;
+ struct ceph_msg *request_msg = req->r_request;
+ struct ceph_msg *reply_msg = req->r_reply;
+ struct ceph_osd_req_op *op;
- if (!list_empty(&msg->data))
+ if (req->r_request->num_data_items || req->r_reply->num_data_items)
return;
- WARN_ON(msg->data_length);
- for (i = 0; i < req->r_num_ops; i++) {
- struct ceph_osd_req_op *op = &req->r_ops[i];
-
+ WARN_ON(request_msg->data_length || reply_msg->data_length);
+ for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
switch (op->op) {
/* request */
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
WARN_ON(op->indata_len != op->extent.length);
- ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
+ ceph_osdc_msg_data_add(request_msg,
+ &op->extent.osd_data);
break;
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
WARN_ON(op->indata_len != op->xattr.name_len +
op->xattr.value_len);
- ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
+ ceph_osdc_msg_data_add(request_msg,
+ &op->xattr.osd_data);
break;
case CEPH_OSD_OP_NOTIFY_ACK:
- ceph_osdc_msg_data_add(msg,
+ ceph_osdc_msg_data_add(request_msg,
&op->notify_ack.request_data);
break;
+ case CEPH_OSD_OP_COPY_FROM:
+ ceph_osdc_msg_data_add(request_msg,
+ &op->copy_from.osd_data);
+ break;
/* reply */
case CEPH_OSD_OP_STAT:
- ceph_osdc_msg_data_add(req->r_reply,
+ ceph_osdc_msg_data_add(reply_msg,
&op->raw_data_in);
break;
case CEPH_OSD_OP_READ:
- ceph_osdc_msg_data_add(req->r_reply,
+ ceph_osdc_msg_data_add(reply_msg,
&op->extent.osd_data);
break;
case CEPH_OSD_OP_LIST_WATCHERS:
- ceph_osdc_msg_data_add(req->r_reply,
+ ceph_osdc_msg_data_add(reply_msg,
&op->list_watchers.response_data);
break;
@@ -1895,25 +1983,23 @@ static void setup_request_data(struct ceph_osd_request *req,
WARN_ON(op->indata_len != op->cls.class_len +
op->cls.method_len +
op->cls.indata_len);
- ceph_osdc_msg_data_add(msg, &op->cls.request_info);
+ ceph_osdc_msg_data_add(request_msg,
+ &op->cls.request_info);
/* optional, can be NONE */
- ceph_osdc_msg_data_add(msg, &op->cls.request_data);
+ ceph_osdc_msg_data_add(request_msg,
+ &op->cls.request_data);
/* optional, can be NONE */
- ceph_osdc_msg_data_add(req->r_reply,
+ ceph_osdc_msg_data_add(reply_msg,
&op->cls.response_data);
break;
case CEPH_OSD_OP_NOTIFY:
- ceph_osdc_msg_data_add(msg,
+ ceph_osdc_msg_data_add(request_msg,
&op->notify.request_data);
- ceph_osdc_msg_data_add(req->r_reply,
+ ceph_osdc_msg_data_add(reply_msg,
&op->notify.response_data);
break;
}
-
- data_len += op->indata_len;
}
-
- WARN_ON(data_len != msg->data_length);
}
static void encode_pgid(void **p, const struct ceph_pg *pgid)
@@ -1961,7 +2047,7 @@ static void encode_request_partial(struct ceph_osd_request *req,
req->r_data_offset || req->r_snapc);
}
- setup_request_data(req, msg);
+ setup_request_data(req);
encode_spgid(&p, &req->r_t.spgid); /* actual spg */
ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
@@ -3001,11 +3087,21 @@ static void linger_submit(struct ceph_osd_linger_request *lreq)
struct ceph_osd_client *osdc = lreq->osdc;
struct ceph_osd *osd;
+ down_write(&osdc->lock);
+ linger_register(lreq);
+ if (lreq->is_watch) {
+ lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
+ lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
+ } else {
+ lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
+ }
+
calc_target(osdc, &lreq->t, NULL, false);
osd = lookup_create_osd(osdc, lreq->t.osd, true);
link_linger(osd, lreq);
send_linger(lreq);
+ up_write(&osdc->lock);
}
static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
@@ -4318,9 +4414,7 @@ static void handle_watch_notify(struct ceph_osd_client *osdc,
lreq->notify_id, notify_id);
} else if (!completion_done(&lreq->notify_finish_wait)) {
struct ceph_msg_data *data =
- list_first_entry_or_null(&msg->data,
- struct ceph_msg_data,
- links);
+ msg->num_data_items ? &msg->data[0] : NULL;
if (data) {
if (lreq->preply_pages) {
@@ -4476,6 +4570,23 @@ alloc_linger_request(struct ceph_osd_linger_request *lreq)
ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
+ return req;
+}
+
+static struct ceph_osd_request *
+alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
+{
+ struct ceph_osd_request *req;
+
+ req = alloc_linger_request(lreq);
+ if (!req)
+ return NULL;
+
+ /*
+ * Pass 0 for cookie because we don't know it yet, it will be
+ * filled in by linger_submit().
+ */
+ osd_req_op_watch_init(req, 0, 0, watch_opcode);
if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
ceph_osdc_put_request(req);
@@ -4514,27 +4625,19 @@ ceph_osdc_watch(struct ceph_osd_client *osdc,
lreq->t.flags = CEPH_OSD_FLAG_WRITE;
ktime_get_real_ts64(&lreq->mtime);
- lreq->reg_req = alloc_linger_request(lreq);
+ lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
if (!lreq->reg_req) {
ret = -ENOMEM;
goto err_put_lreq;
}
- lreq->ping_req = alloc_linger_request(lreq);
+ lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
if (!lreq->ping_req) {
ret = -ENOMEM;
goto err_put_lreq;
}
- down_write(&osdc->lock);
- linger_register(lreq); /* before osd_req_op_* */
- osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
- CEPH_OSD_WATCH_OP_WATCH);
- osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
- CEPH_OSD_WATCH_OP_PING);
linger_submit(lreq);
- up_write(&osdc->lock);
-
ret = linger_reg_commit_wait(lreq);
if (ret) {
linger_cancel(lreq);
@@ -4599,11 +4702,10 @@ static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
- pl = kmalloc(sizeof(*pl), GFP_NOIO);
+ pl = ceph_pagelist_alloc(GFP_NOIO);
if (!pl)
return -ENOMEM;
- ceph_pagelist_init(pl);
ret = ceph_pagelist_encode_64(pl, notify_id);
ret |= ceph_pagelist_encode_64(pl, cookie);
if (payload) {
@@ -4641,12 +4743,12 @@ int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
ceph_oloc_copy(&req->r_base_oloc, oloc);
req->r_flags = CEPH_OSD_FLAG_READ;
- ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+ ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
+ payload_len);
if (ret)
goto out_put_req;
- ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
- payload_len);
+ ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
if (ret)
goto out_put_req;
@@ -4670,11 +4772,10 @@ static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
op->notify.cookie = cookie;
- pl = kmalloc(sizeof(*pl), GFP_NOIO);
+ pl = ceph_pagelist_alloc(GFP_NOIO);
if (!pl)
return -ENOMEM;
- ceph_pagelist_init(pl);
ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
ret |= ceph_pagelist_encode_32(pl, timeout);
ret |= ceph_pagelist_encode_32(pl, payload_len);
@@ -4733,29 +4834,30 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
goto out_put_lreq;
}
+ /*
+ * Pass 0 for cookie because we don't know it yet, it will be
+ * filled in by linger_submit().
+ */
+ ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
+ payload, payload_len);
+ if (ret)
+ goto out_put_lreq;
+
/* for notify_id */
pages = ceph_alloc_page_vector(1, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out_put_lreq;
}
-
- down_write(&osdc->lock);
- linger_register(lreq); /* before osd_req_op_* */
- ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
- timeout, payload, payload_len);
- if (ret) {
- linger_unregister(lreq);
- up_write(&osdc->lock);
- ceph_release_page_vector(pages, 1);
- goto out_put_lreq;
- }
ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
response_data),
pages, PAGE_SIZE, 0, false, true);
- linger_submit(lreq);
- up_write(&osdc->lock);
+ ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
+ if (ret)
+ goto out_put_lreq;
+
+ linger_submit(lreq);
ret = linger_reg_commit_wait(lreq);
if (!ret)
ret = linger_notify_finish_wait(lreq);
@@ -4881,10 +4983,6 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
ceph_oloc_copy(&req->r_base_oloc, oloc);
req->r_flags = CEPH_OSD_FLAG_READ;
- ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
- if (ret)
- goto out_put_req;
-
pages = ceph_alloc_page_vector(1, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
@@ -4896,6 +4994,10 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
response_data),
pages, PAGE_SIZE, 0, false, true);
+ ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+ if (ret)
+ goto out_put_req;
+
ceph_osdc_start_request(osdc, req, false);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0) {
@@ -4958,11 +5060,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
ceph_oloc_copy(&req->r_base_oloc, oloc);
req->r_flags = flags;
- ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
- if (ret)
- goto out_put_req;
-
- ret = osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
+ ret = osd_req_op_cls_init(req, 0, class, method);
if (ret)
goto out_put_req;
@@ -4973,6 +5071,10 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
osd_req_op_cls_response_data_pages(req, 0, &resp_page,
*resp_len, 0, false, false);
+ ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+ if (ret)
+ goto out_put_req;
+
ceph_osdc_start_request(osdc, req, false);
ret = ceph_osdc_wait_request(osdc, req);
if (ret >= 0) {
@@ -5021,11 +5123,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
goto out_map;
err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
- PAGE_SIZE, 10, true, "osd_op");
+ PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
if (err < 0)
goto out_mempool;
err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
- PAGE_SIZE, 10, true, "osd_op_reply");
+ PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
+ "osd_op_reply");
if (err < 0)
goto out_msgpool;
@@ -5168,6 +5271,80 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
}
EXPORT_SYMBOL(ceph_osdc_writepages);
+static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ u32 dst_fadvise_flags,
+ u8 copy_from_flags)
+{
+ struct ceph_osd_req_op *op;
+ struct page **pages;
+ void *p, *end;
+
+ pages = ceph_alloc_page_vector(1, GFP_KERNEL);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags);
+ op->copy_from.snapid = src_snapid;
+ op->copy_from.src_version = src_version;
+ op->copy_from.flags = copy_from_flags;
+ op->copy_from.src_fadvise_flags = src_fadvise_flags;
+
+ p = page_address(pages[0]);
+ end = p + PAGE_SIZE;
+ ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
+ encode_oloc(&p, end, src_oloc);
+ op->indata_len = PAGE_SIZE - (end - p);
+
+ ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
+ op->indata_len, 0, false, true);
+ return 0;
+}
+
+int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ struct ceph_object_id *dst_oid,
+ struct ceph_object_locator *dst_oloc,
+ u32 dst_fadvise_flags,
+ u8 copy_from_flags)
+{
+ struct ceph_osd_request *req;
+ int ret;
+
+ req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->r_flags = CEPH_OSD_FLAG_WRITE;
+
+ ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
+ ceph_oid_copy(&req->r_t.base_oid, dst_oid);
+
+ ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
+ src_oloc, src_fadvise_flags,
+ dst_fadvise_flags, copy_from_flags);
+ if (ret)
+ goto out;
+
+ ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ ceph_osdc_start_request(osdc, req, false);
+ ret = ceph_osdc_wait_request(osdc, req);
+
+out:
+ ceph_osdc_put_request(req);
+ return ret;
+}
+EXPORT_SYMBOL(ceph_osdc_copy_from);
+
int __init ceph_osdc_setup(void)
{
size_t size = sizeof(struct ceph_osd_request) +
@@ -5295,7 +5472,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
u32 front_len = le32_to_cpu(hdr->front_len);
u32 data_len = le32_to_cpu(hdr->data_len);
- m = ceph_msg_new(type, front_len, GFP_NOIO, false);
+ m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
if (!m)
return NULL;
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index 2ea0564771d2..65e34f78b05d 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -6,6 +6,26 @@
#include <linux/highmem.h>
#include <linux/ceph/pagelist.h>
+struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags)
+{
+ struct ceph_pagelist *pl;
+
+ pl = kmalloc(sizeof(*pl), gfp_flags);
+ if (!pl)
+ return NULL;
+
+ INIT_LIST_HEAD(&pl->head);
+ pl->mapped_tail = NULL;
+ pl->length = 0;
+ pl->room = 0;
+ INIT_LIST_HEAD(&pl->free_list);
+ pl->num_pages_free = 0;
+ refcount_set(&pl->refcnt, 1);
+
+ return pl;
+}
+EXPORT_SYMBOL(ceph_pagelist_alloc);
+
static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
{
if (pl->mapped_tail) {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f679c7a7d761..e01274bd5e3e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3600,6 +3600,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
+ if (dev->type != ARPHRD_ETHER) {
+ NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
+ return -EINVAL;
+ }
+
addr = nla_data(tb[NDA_LLADDR]);
err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
@@ -3704,6 +3709,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
+ if (dev->type != ARPHRD_ETHER) {
+ NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
+ return -EINVAL;
+ }
+
addr = nla_data(tb[NDA_LLADDR]);
err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 4da39446da2d..765b2b32c4a4 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -111,13 +111,10 @@
#ifdef CONFIG_IP_MULTICAST
/* Parameter names and values are taken from igmp-v2-06 draft */
-#define IGMP_V1_ROUTER_PRESENT_TIMEOUT (400*HZ)
-#define IGMP_V2_ROUTER_PRESENT_TIMEOUT (400*HZ)
#define IGMP_V2_UNSOLICITED_REPORT_INTERVAL (10*HZ)
#define IGMP_V3_UNSOLICITED_REPORT_INTERVAL (1*HZ)
+#define IGMP_QUERY_INTERVAL (125*HZ)
#define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ)
-#define IGMP_QUERY_ROBUSTNESS_VARIABLE 2
-
#define IGMP_INITIAL_REPORT_DELAY (1)
@@ -935,13 +932,15 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
in_dev->mr_v1_seen = jiffies +
- IGMP_V1_ROUTER_PRESENT_TIMEOUT;
+ (in_dev->mr_qrv * in_dev->mr_qi) +
+ in_dev->mr_qri;
group = 0;
} else {
/* v2 router present */
max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
in_dev->mr_v2_seen = jiffies +
- IGMP_V2_ROUTER_PRESENT_TIMEOUT;
+ (in_dev->mr_qrv * in_dev->mr_qi) +
+ in_dev->mr_qri;
}
/* cancel the interface change timer */
in_dev->mr_ifc_count = 0;
@@ -981,8 +980,21 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
if (!max_delay)
max_delay = 1; /* can't mod w/ 0 */
in_dev->mr_maxdelay = max_delay;
- if (ih3->qrv)
- in_dev->mr_qrv = ih3->qrv;
+
+ /* RFC3376, 4.1.6. QRV and 4.1.7. QQIC, when the most recently
+ * received value was zero, use the default or statically
+ * configured value.
+ */
+ in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv;
+ in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
+
+ /* RFC3376, 8.3. Query Response Interval:
+ * The number of seconds represented by the [Query Response
+ * Interval] must be less than the [Query Interval].
+ */
+ if (in_dev->mr_qri >= in_dev->mr_qi)
+ in_dev->mr_qri = (in_dev->mr_qi/HZ - 1)*HZ;
+
if (!group) { /* general query */
if (ih3->nsrcs)
return true; /* no sources allowed */
@@ -1723,18 +1735,30 @@ void ip_mc_down(struct in_device *in_dev)
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
}
-void ip_mc_init_dev(struct in_device *in_dev)
-{
#ifdef CONFIG_IP_MULTICAST
+static void ip_mc_reset(struct in_device *in_dev)
+{
struct net *net = dev_net(in_dev->dev);
+
+ in_dev->mr_qi = IGMP_QUERY_INTERVAL;
+ in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL;
+ in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
+}
+#else
+static void ip_mc_reset(struct in_device *in_dev)
+{
+}
#endif
+
+void ip_mc_init_dev(struct in_device *in_dev)
+{
ASSERT_RTNL();
#ifdef CONFIG_IP_MULTICAST
timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0);
timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0);
- in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
#endif
+ ip_mc_reset(in_dev);
spin_lock_init(&in_dev->mc_tomb_lock);
}
@@ -1744,15 +1768,10 @@ void ip_mc_init_dev(struct in_device *in_dev)
void ip_mc_up(struct in_device *in_dev)
{
struct ip_mc_list *pmc;
-#ifdef CONFIG_IP_MULTICAST
- struct net *net = dev_net(in_dev->dev);
-#endif
ASSERT_RTNL();
-#ifdef CONFIG_IP_MULTICAST
- in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
-#endif
+ ip_mc_reset(in_dev);
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
for_each_pmc_rtnl(in_dev, pmc) {
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index f5c9ef2586de..411dd7a90046 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <net/addrconf.h>
#include <net/inet_connection_sock.h>
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1834818ed07b..9e6bc4d6daa7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -262,7 +262,7 @@
#include <linux/net.h>
#include <linux/socket.h>
#include <linux/random.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/cache.h>
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index b7918d4caa30..3b45fe530f91 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -145,6 +145,7 @@ msg_bytes_ready:
ret = err;
goto out;
}
+ copied = -EAGAIN;
}
ret = copied;
out:
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ca3ed931f2a9..1976fddb9e00 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -81,7 +81,7 @@
#include <linux/uaccess.h>
#include <asm/ioctls.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/types.h>
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index d4020c5e831d..2526be6b3d90 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1616,7 +1616,7 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
EnterFunction(7);
/* Receive a packet */
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, buflen);
+ iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, buflen);
len = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
if (len < 0)
return len;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index a70097ecf33c..865ecef68196 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -3030,7 +3030,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
* is already present */
if (mac_proto != MAC_PROTO_NONE)
return -EINVAL;
- mac_proto = MAC_PROTO_NONE;
+ mac_proto = MAC_PROTO_ETHERNET;
break;
case OVS_ACTION_ATTR_POP_ETH:
@@ -3038,7 +3038,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
return -EINVAL;
if (vlan_tci & htons(VLAN_TAG_PRESENT))
return -EINVAL;
- mac_proto = MAC_PROTO_ETHERNET;
+ mac_proto = MAC_PROTO_NONE;
break;
case OVS_ACTION_ATTR_PUSH_NSH:
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index a827a1f562bf..6a28b96e779e 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -499,8 +499,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
void sctp_assoc_rm_peer(struct sctp_association *asoc,
struct sctp_transport *peer)
{
- struct list_head *pos;
- struct sctp_transport *transport;
+ struct sctp_transport *transport;
+ struct list_head *pos;
+ struct sctp_chunk *ch;
pr_debug("%s: association:%p addr:%pISpc\n",
__func__, asoc, &peer->ipaddr.sa);
@@ -564,7 +565,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
*/
if (!list_empty(&peer->transmitted)) {
struct sctp_transport *active = asoc->peer.active_path;
- struct sctp_chunk *ch;
/* Reset the transport of each chunk on this list */
list_for_each_entry(ch, &peer->transmitted,
@@ -586,6 +586,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
sctp_transport_hold(active);
}
+ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
+ if (ch->transport == peer)
+ ch->transport = NULL;
+
asoc->peer.transport_count--;
sctp_transport_free(peer);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e948db29ab53..9b277bd36d1a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -46,7 +46,7 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/slab.h>
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fc0386e8ff23..739f3e50120d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -7083,14 +7083,15 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
}
policy = params.sprstat_policy;
- if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
+ ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
if (!asoc)
goto out;
- if (policy & SCTP_PR_SCTP_ALL) {
+ if (policy == SCTP_PR_SCTP_ALL) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
@@ -7142,7 +7143,8 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
}
policy = params.sprstat_policy;
- if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
+ ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 52241d679cc9..89c3a8c7859a 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -286,7 +286,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
*/
krflags = MSG_PEEK | MSG_WAITALL;
smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1,
+ iov_iter_kvec(&msg.msg_iter, READ, &vec, 1,
sizeof(struct smc_clc_msg_hdr));
len = sock_recvmsg(smc->clcsock, &msg, krflags);
if (signal_pending(current)) {
@@ -325,7 +325,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
/* receive the complete CLC message */
memset(&msg, 0, sizeof(struct msghdr));
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
+ iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen);
krflags = MSG_WAITALL;
len = sock_recvmsg(smc->clcsock, &msg, krflags);
if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
diff --git a/net/socket.c b/net/socket.c
index 99c96851469f..593826e11a53 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -635,7 +635,7 @@ EXPORT_SYMBOL(sock_sendmsg);
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
- iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size);
+ iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size);
return sock_sendmsg(sock, msg);
}
EXPORT_SYMBOL(kernel_sendmsg);
@@ -648,7 +648,7 @@ int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
if (!sock->ops->sendmsg_locked)
return sock_no_sendmsg_locked(sk, msg, size);
- iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size);
+ iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size);
return sock->ops->sendmsg_locked(sk, msg, msg_data_left(msg));
}
@@ -823,7 +823,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
mm_segment_t oldfs = get_fs();
int result;
- iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size);
+ iov_iter_kvec(&msg->msg_iter, READ, vec, num, size);
set_fs(KERNEL_DS);
result = sock_recvmsg(sock, msg, flags);
set_fs(oldfs);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 860f2a1bbb67..1ece4bc3eb8d 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -76,6 +76,7 @@ struct rsi {
struct xdr_netobj in_handle, in_token;
struct xdr_netobj out_handle, out_token;
int major_status, minor_status;
+ struct rcu_head rcu_head;
};
static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old);
@@ -89,13 +90,21 @@ static void rsi_free(struct rsi *rsii)
kfree(rsii->out_token.data);
}
-static void rsi_put(struct kref *ref)
+static void rsi_free_rcu(struct rcu_head *head)
{
- struct rsi *rsii = container_of(ref, struct rsi, h.ref);
+ struct rsi *rsii = container_of(head, struct rsi, rcu_head);
+
rsi_free(rsii);
kfree(rsii);
}
+static void rsi_put(struct kref *ref)
+{
+ struct rsi *rsii = container_of(ref, struct rsi, h.ref);
+
+ call_rcu(&rsii->rcu_head, rsi_free_rcu);
+}
+
static inline int rsi_hash(struct rsi *item)
{
return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS)
@@ -282,7 +291,7 @@ static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item)
struct cache_head *ch;
int hash = rsi_hash(item);
- ch = sunrpc_cache_lookup(cd, &item->h, hash);
+ ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
if (ch)
return container_of(ch, struct rsi, h);
else
@@ -330,6 +339,7 @@ struct rsc {
struct svc_cred cred;
struct gss_svc_seq_data seqdata;
struct gss_ctx *mechctx;
+ struct rcu_head rcu_head;
};
static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
@@ -343,12 +353,22 @@ static void rsc_free(struct rsc *rsci)
free_svc_cred(&rsci->cred);
}
+static void rsc_free_rcu(struct rcu_head *head)
+{
+ struct rsc *rsci = container_of(head, struct rsc, rcu_head);
+
+ kfree(rsci->handle.data);
+ kfree(rsci);
+}
+
static void rsc_put(struct kref *ref)
{
struct rsc *rsci = container_of(ref, struct rsc, h.ref);
- rsc_free(rsci);
- kfree(rsci);
+ if (rsci->mechctx)
+ gss_delete_sec_context(&rsci->mechctx);
+ free_svc_cred(&rsci->cred);
+ call_rcu(&rsci->rcu_head, rsc_free_rcu);
}
static inline int
@@ -542,7 +562,7 @@ static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item)
struct cache_head *ch;
int hash = rsc_hash(item);
- ch = sunrpc_cache_lookup(cd, &item->h, hash);
+ ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
if (ch)
return container_of(ch, struct rsc, h);
else
@@ -1764,14 +1784,21 @@ out_err:
}
static void
-svcauth_gss_domain_release(struct auth_domain *dom)
+svcauth_gss_domain_release_rcu(struct rcu_head *head)
{
+ struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
struct gss_domain *gd = container_of(dom, struct gss_domain, h);
kfree(dom->name);
kfree(gd);
}
+static void
+svcauth_gss_domain_release(struct auth_domain *dom)
+{
+ call_rcu(&dom->rcu_head, svcauth_gss_domain_release_rcu);
+}
+
static struct auth_ops svcauthops_gss = {
.name = "rpcsec_gss",
.owner = THIS_MODULE,
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 109fbe591e7b..f96345b1180e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,28 +54,33 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
h->last_refresh = now;
}
-struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
- struct cache_head *key, int hash)
+static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
+ struct cache_head *key,
+ int hash)
{
- struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
- struct hlist_head *head;
-
- head = &detail->hash_table[hash];
-
- read_lock(&detail->hash_lock);
+ struct hlist_head *head = &detail->hash_table[hash];
+ struct cache_head *tmp;
- hlist_for_each_entry(tmp, head, cache_list) {
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp, head, cache_list) {
if (detail->match(tmp, key)) {
if (cache_is_expired(detail, tmp))
- /* This entry is expired, we will discard it. */
- break;
- cache_get(tmp);
- read_unlock(&detail->hash_lock);
+ continue;
+ tmp = cache_get_rcu(tmp);
+ rcu_read_unlock();
return tmp;
}
}
- read_unlock(&detail->hash_lock);
- /* Didn't find anything, insert an empty entry */
+ rcu_read_unlock();
+ return NULL;
+}
+
+static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
+ struct cache_head *key,
+ int hash)
+{
+ struct cache_head *new, *tmp, *freeme = NULL;
+ struct hlist_head *head = &detail->hash_table[hash];
new = detail->alloc();
if (!new)
@@ -87,35 +92,46 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
cache_init(new, detail);
detail->init(new, key);
- write_lock(&detail->hash_lock);
+ spin_lock(&detail->hash_lock);
/* check if entry appeared while we slept */
- hlist_for_each_entry(tmp, head, cache_list) {
+ hlist_for_each_entry_rcu(tmp, head, cache_list) {
if (detail->match(tmp, key)) {
if (cache_is_expired(detail, tmp)) {
- hlist_del_init(&tmp->cache_list);
+ hlist_del_init_rcu(&tmp->cache_list);
detail->entries --;
freeme = tmp;
break;
}
cache_get(tmp);
- write_unlock(&detail->hash_lock);
+ spin_unlock(&detail->hash_lock);
cache_put(new, detail);
return tmp;
}
}
- hlist_add_head(&new->cache_list, head);
+ hlist_add_head_rcu(&new->cache_list, head);
detail->entries++;
cache_get(new);
- write_unlock(&detail->hash_lock);
+ spin_unlock(&detail->hash_lock);
if (freeme)
cache_put(freeme, detail);
return new;
}
-EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
+struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
+ struct cache_head *key, int hash)
+{
+ struct cache_head *ret;
+
+ ret = sunrpc_cache_find_rcu(detail, key, hash);
+ if (ret)
+ return ret;
+ /* Didn't find anything, insert an empty entry */
+ return sunrpc_cache_add_entry(detail, key, hash);
+}
+EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
@@ -151,18 +167,18 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
struct cache_head *tmp;
if (!test_bit(CACHE_VALID, &old->flags)) {
- write_lock(&detail->hash_lock);
+ spin_lock(&detail->hash_lock);
if (!test_bit(CACHE_VALID, &old->flags)) {
if (test_bit(CACHE_NEGATIVE, &new->flags))
set_bit(CACHE_NEGATIVE, &old->flags);
else
detail->update(old, new);
cache_fresh_locked(old, new->expiry_time, detail);
- write_unlock(&detail->hash_lock);
+ spin_unlock(&detail->hash_lock);
cache_fresh_unlocked(old, detail);
return old;
}
- write_unlock(&detail->hash_lock);
+ spin_unlock(&detail->hash_lock);
}
/* We need to insert a new entry */
tmp = detail->alloc();
@@ -173,7 +189,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
cache_init(tmp, detail);
detail->init(tmp, old);
- write_lock(&detail->hash_lock);
+ spin_lock(&detail->hash_lock);
if (test_bit(CACHE_NEGATIVE, &new->flags))
set_bit(CACHE_NEGATIVE, &tmp->flags);
else
@@ -183,7 +199,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
cache_get(tmp);
cache_fresh_locked(tmp, new->expiry_time, detail);
cache_fresh_locked(old, 0, detail);
- write_unlock(&detail->hash_lock);
+ spin_unlock(&detail->hash_lock);
cache_fresh_unlocked(tmp, detail);
cache_fresh_unlocked(old, detail);
cache_put(old, detail);
@@ -223,7 +239,7 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h
{
int rv;
- write_lock(&detail->hash_lock);
+ spin_lock(&detail->hash_lock);
rv = cache_is_valid(h);
if (rv == -EAGAIN) {
set_bit(CACHE_NEGATIVE, &h->flags);
@@ -231,7 +247,7 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h
detail);
rv = -ENOENT;
}
- write_unlock(&detail->hash_lock);
+ spin_unlock(&detail->hash_lock);
cache_fresh_unlocked(h, detail);
return rv;
}
@@ -341,7 +357,7 @@ static struct delayed_work cache_cleaner;
void sunrpc_init_cache_detail(struct cache_detail *cd)
{
- rwlock_init(&cd->hash_lock);
+ spin_lock_init(&cd->hash_lock);
INIT_LIST_HEAD(&cd->queue);
spin_lock(&cache_list_lock);
cd->nextcheck = 0;
@@ -361,11 +377,11 @@ void sunrpc_destroy_cache_detail(struct cache_detail *cd)
{
cache_purge(cd);
spin_lock(&cache_list_lock);
- write_lock(&cd->hash_lock);
+ spin_lock(&cd->hash_lock);
if (current_detail == cd)
current_detail = NULL;
list_del_init(&cd->others);
- write_unlock(&cd->hash_lock);
+ spin_unlock(&cd->hash_lock);
spin_unlock(&cache_list_lock);
if (list_empty(&cache_list)) {
/* module must be being unloaded so its safe to kill the worker */
@@ -422,7 +438,7 @@ static int cache_clean(void)
struct hlist_head *head;
struct hlist_node *tmp;
- write_lock(&current_detail->hash_lock);
+ spin_lock(&current_detail->hash_lock);
/* Ok, now to clean this strand */
@@ -433,13 +449,13 @@ static int cache_clean(void)
if (!cache_is_expired(current_detail, ch))
continue;
- hlist_del_init(&ch->cache_list);
+ hlist_del_init_rcu(&ch->cache_list);
current_detail->entries--;
rv = 1;
break;
}
- write_unlock(&current_detail->hash_lock);
+ spin_unlock(&current_detail->hash_lock);
d = current_detail;
if (!ch)
current_index ++;
@@ -494,9 +510,9 @@ void cache_purge(struct cache_detail *detail)
struct hlist_node *tmp = NULL;
int i = 0;
- write_lock(&detail->hash_lock);
+ spin_lock(&detail->hash_lock);
if (!detail->entries) {
- write_unlock(&detail->hash_lock);
+ spin_unlock(&detail->hash_lock);
return;
}
@@ -504,17 +520,17 @@ void cache_purge(struct cache_detail *detail)
for (i = 0; i < detail->hash_size; i++) {
head = &detail->hash_table[i];
hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
- hlist_del_init(&ch->cache_list);
+ hlist_del_init_rcu(&ch->cache_list);
detail->entries--;
set_bit(CACHE_CLEANED, &ch->flags);
- write_unlock(&detail->hash_lock);
+ spin_unlock(&detail->hash_lock);
cache_fresh_unlocked(ch, detail);
cache_put(ch, detail);
- write_lock(&detail->hash_lock);
+ spin_lock(&detail->hash_lock);
}
}
- write_unlock(&detail->hash_lock);
+ spin_unlock(&detail->hash_lock);
}
EXPORT_SYMBOL_GPL(cache_purge);
@@ -1289,21 +1305,19 @@ EXPORT_SYMBOL_GPL(qword_get);
* get a header, then pass each real item in the cache
*/
-void *cache_seq_start(struct seq_file *m, loff_t *pos)
- __acquires(cd->hash_lock)
+static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
{
loff_t n = *pos;
unsigned int hash, entry;
struct cache_head *ch;
struct cache_detail *cd = m->private;
- read_lock(&cd->hash_lock);
if (!n--)
return SEQ_START_TOKEN;
hash = n >> 32;
entry = n & ((1LL<<32) - 1);
- hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
+ hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
if (!entry--)
return ch;
n &= ~((1LL<<32) - 1);
@@ -1315,12 +1329,12 @@ void *cache_seq_start(struct seq_file *m, loff_t *pos)
if (hash >= cd->hash_size)
return NULL;
*pos = n+1;
- return hlist_entry_safe(cd->hash_table[hash].first,
+ return hlist_entry_safe(rcu_dereference_raw(
+ hlist_first_rcu(&cd->hash_table[hash])),
struct cache_head, cache_list);
}
-EXPORT_SYMBOL_GPL(cache_seq_start);
-void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
+static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
{
struct cache_head *ch = p;
int hash = (*pos >> 32);
@@ -1333,7 +1347,8 @@ void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
*pos += 1LL<<32;
} else {
++*pos;
- return hlist_entry_safe(ch->cache_list.next,
+ return hlist_entry_safe(rcu_dereference_raw(
+ hlist_next_rcu(&ch->cache_list)),
struct cache_head, cache_list);
}
*pos &= ~((1LL<<32) - 1);
@@ -1345,18 +1360,32 @@ void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
if (hash >= cd->hash_size)
return NULL;
++*pos;
- return hlist_entry_safe(cd->hash_table[hash].first,
+ return hlist_entry_safe(rcu_dereference_raw(
+ hlist_first_rcu(&cd->hash_table[hash])),
struct cache_head, cache_list);
}
EXPORT_SYMBOL_GPL(cache_seq_next);
-void cache_seq_stop(struct seq_file *m, void *p)
- __releases(cd->hash_lock)
+void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
+ __acquires(RCU)
{
- struct cache_detail *cd = m->private;
- read_unlock(&cd->hash_lock);
+ rcu_read_lock();
+ return __cache_seq_start(m, pos);
+}
+EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
+
+void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
+{
+ return cache_seq_next(file, p, pos);
+}
+EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
+
+void cache_seq_stop_rcu(struct seq_file *m, void *p)
+ __releases(RCU)
+{
+ rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(cache_seq_stop);
+EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
static int c_show(struct seq_file *m, void *p)
{
@@ -1384,9 +1413,9 @@ static int c_show(struct seq_file *m, void *p)
}
static const struct seq_operations cache_content_op = {
- .start = cache_seq_start,
- .next = cache_seq_next,
- .stop = cache_seq_stop,
+ .start = cache_seq_start_rcu,
+ .next = cache_seq_next_rcu,
+ .stop = cache_seq_stop_rcu,
.show = c_show,
};
@@ -1844,13 +1873,13 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
{
- write_lock(&cd->hash_lock);
+ spin_lock(&cd->hash_lock);
if (!hlist_unhashed(&h->cache_list)){
- hlist_del_init(&h->cache_list);
+ hlist_del_init_rcu(&h->cache_list);
cd->entries--;
- write_unlock(&cd->hash_lock);
+ spin_unlock(&cd->hash_lock);
cache_put(h, cd);
} else
- write_unlock(&cd->hash_lock);
+ spin_unlock(&cd->hash_lock);
}
EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 87533fbb96cf..51d36230b6e3 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -987,7 +987,7 @@ static void call_xpt_users(struct svc_xprt *xprt)
spin_lock(&xprt->xpt_lock);
while (!list_empty(&xprt->xpt_users)) {
u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
- list_del(&u->list);
+ list_del_init(&u->list);
u->callback(u);
}
spin_unlock(&xprt->xpt_lock);
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index bb8db3cb8032..775b8c94265b 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -27,12 +27,32 @@
extern struct auth_ops svcauth_null;
extern struct auth_ops svcauth_unix;
-static DEFINE_SPINLOCK(authtab_lock);
-static struct auth_ops *authtab[RPC_AUTH_MAXFLAVOR] = {
- [0] = &svcauth_null,
- [1] = &svcauth_unix,
+static struct auth_ops __rcu *authtab[RPC_AUTH_MAXFLAVOR] = {
+ [RPC_AUTH_NULL] = (struct auth_ops __force __rcu *)&svcauth_null,
+ [RPC_AUTH_UNIX] = (struct auth_ops __force __rcu *)&svcauth_unix,
};
+static struct auth_ops *
+svc_get_auth_ops(rpc_authflavor_t flavor)
+{
+ struct auth_ops *aops;
+
+ if (flavor >= RPC_AUTH_MAXFLAVOR)
+ return NULL;
+ rcu_read_lock();
+ aops = rcu_dereference(authtab[flavor]);
+ if (aops != NULL && !try_module_get(aops->owner))
+ aops = NULL;
+ rcu_read_unlock();
+ return aops;
+}
+
+static void
+svc_put_auth_ops(struct auth_ops *aops)
+{
+ module_put(aops->owner);
+}
+
int
svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
{
@@ -45,14 +65,11 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
dprintk("svc: svc_authenticate (%d)\n", flavor);
- spin_lock(&authtab_lock);
- if (flavor >= RPC_AUTH_MAXFLAVOR || !(aops = authtab[flavor]) ||
- !try_module_get(aops->owner)) {
- spin_unlock(&authtab_lock);
+ aops = svc_get_auth_ops(flavor);
+ if (aops == NULL) {
*authp = rpc_autherr_badcred;
return SVC_DENIED;
}
- spin_unlock(&authtab_lock);
rqstp->rq_auth_slack = 0;
init_svc_cred(&rqstp->rq_cred);
@@ -82,7 +99,7 @@ int svc_authorise(struct svc_rqst *rqstp)
if (aops) {
rv = aops->release(rqstp);
- module_put(aops->owner);
+ svc_put_auth_ops(aops);
}
return rv;
}
@@ -90,13 +107,14 @@ int svc_authorise(struct svc_rqst *rqstp)
int
svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops)
{
+ struct auth_ops *old;
int rv = -EINVAL;
- spin_lock(&authtab_lock);
- if (flavor < RPC_AUTH_MAXFLAVOR && authtab[flavor] == NULL) {
- authtab[flavor] = aops;
- rv = 0;
+
+ if (flavor < RPC_AUTH_MAXFLAVOR) {
+ old = cmpxchg((struct auth_ops ** __force)&authtab[flavor], NULL, aops);
+ if (old == NULL || old == aops)
+ rv = 0;
}
- spin_unlock(&authtab_lock);
return rv;
}
EXPORT_SYMBOL_GPL(svc_auth_register);
@@ -104,10 +122,8 @@ EXPORT_SYMBOL_GPL(svc_auth_register);
void
svc_auth_unregister(rpc_authflavor_t flavor)
{
- spin_lock(&authtab_lock);
if (flavor < RPC_AUTH_MAXFLAVOR)
- authtab[flavor] = NULL;
- spin_unlock(&authtab_lock);
+ rcu_assign_pointer(authtab[flavor], NULL);
}
EXPORT_SYMBOL_GPL(svc_auth_unregister);
@@ -127,10 +143,11 @@ static struct hlist_head auth_domain_table[DN_HASHMAX];
static DEFINE_SPINLOCK(auth_domain_lock);
static void auth_domain_release(struct kref *kref)
+ __releases(&auth_domain_lock)
{
struct auth_domain *dom = container_of(kref, struct auth_domain, ref);
- hlist_del(&dom->hash);
+ hlist_del_rcu(&dom->hash);
dom->flavour->domain_release(dom);
spin_unlock(&auth_domain_lock);
}
@@ -159,7 +176,7 @@ auth_domain_lookup(char *name, struct auth_domain *new)
}
}
if (new)
- hlist_add_head(&new->hash, head);
+ hlist_add_head_rcu(&new->hash, head);
spin_unlock(&auth_domain_lock);
return new;
}
@@ -167,6 +184,21 @@ EXPORT_SYMBOL_GPL(auth_domain_lookup);
struct auth_domain *auth_domain_find(char *name)
{
- return auth_domain_lookup(name, NULL);
+ struct auth_domain *hp;
+ struct hlist_head *head;
+
+ head = &auth_domain_table[hash_str(name, DN_HASHBITS)];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(hp, head, hash) {
+ if (strcmp(hp->name, name)==0) {
+ if (!kref_get_unless_zero(&hp->ref))
+ hp = NULL;
+ rcu_read_unlock();
+ return hp;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
}
EXPORT_SYMBOL_GPL(auth_domain_find);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index af7f28fb8102..fb9041b92f72 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -37,20 +37,26 @@ struct unix_domain {
extern struct auth_ops svcauth_null;
extern struct auth_ops svcauth_unix;
-static void svcauth_unix_domain_release(struct auth_domain *dom)
+static void svcauth_unix_domain_release_rcu(struct rcu_head *head)
{
+ struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
struct unix_domain *ud = container_of(dom, struct unix_domain, h);
kfree(dom->name);
kfree(ud);
}
+static void svcauth_unix_domain_release(struct auth_domain *dom)
+{
+ call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu);
+}
+
struct auth_domain *unix_domain_find(char *name)
{
struct auth_domain *rv;
struct unix_domain *new = NULL;
- rv = auth_domain_lookup(name, NULL);
+ rv = auth_domain_find(name);
while(1) {
if (rv) {
if (new && rv != &new->h)
@@ -91,6 +97,7 @@ struct ip_map {
char m_class[8]; /* e.g. "nfsd" */
struct in6_addr m_addr;
struct unix_domain *m_client;
+ struct rcu_head m_rcu;
};
static void ip_map_put(struct kref *kref)
@@ -101,7 +108,7 @@ static void ip_map_put(struct kref *kref)
if (test_bit(CACHE_VALID, &item->flags) &&
!test_bit(CACHE_NEGATIVE, &item->flags))
auth_domain_put(&im->m_client->h);
- kfree(im);
+ kfree_rcu(im, m_rcu);
}
static inline int hash_ip6(const struct in6_addr *ip)
@@ -280,9 +287,9 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
strcpy(ip.m_class, class);
ip.m_addr = *addr;
- ch = sunrpc_cache_lookup(cd, &ip.h,
- hash_str(class, IP_HASHBITS) ^
- hash_ip6(addr));
+ ch = sunrpc_cache_lookup_rcu(cd, &ip.h,
+ hash_str(class, IP_HASHBITS) ^
+ hash_ip6(addr));
if (ch)
return container_of(ch, struct ip_map, h);
@@ -412,6 +419,7 @@ struct unix_gid {
struct cache_head h;
kuid_t uid;
struct group_info *gi;
+ struct rcu_head rcu;
};
static int unix_gid_hash(kuid_t uid)
@@ -426,7 +434,7 @@ static void unix_gid_put(struct kref *kref)
if (test_bit(CACHE_VALID, &item->flags) &&
!test_bit(CACHE_NEGATIVE, &item->flags))
put_group_info(ug->gi);
- kfree(ug);
+ kfree_rcu(ug, rcu);
}
static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
@@ -619,7 +627,7 @@ static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid)
struct cache_head *ch;
ug.uid = uid;
- ch = sunrpc_cache_lookup(cd, &ug.h, unix_gid_hash(uid));
+ ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid));
if (ch)
return container_of(ch, struct unix_gid, h);
else
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index db8bb6b3a2b0..986f3ed7d1a2 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -325,59 +325,34 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
/*
* Generic recvfrom routine.
*/
-static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr,
- int buflen)
+static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov,
+ unsigned int nr, size_t buflen, unsigned int base)
{
struct svc_sock *svsk =
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
- struct msghdr msg = {
- .msg_flags = MSG_DONTWAIT,
- };
- int len;
+ struct msghdr msg = { NULL };
+ ssize_t len;
rqstp->rq_xprt_hlen = 0;
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nr, buflen);
- len = sock_recvmsg(svsk->sk_sock, &msg, msg.msg_flags);
+ iov_iter_kvec(&msg.msg_iter, READ, iov, nr, buflen);
+ if (base != 0) {
+ iov_iter_advance(&msg.msg_iter, base);
+ buflen -= base;
+ }
+ len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
/* If we read a full record, then assume there may be more
* data to read (stream based sockets only!)
*/
if (len == buflen)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- dprintk("svc: socket %p recvfrom(%p, %zu) = %d\n",
+ dprintk("svc: socket %p recvfrom(%p, %zu) = %zd\n",
svsk, iov[0].iov_base, iov[0].iov_len, len);
return len;
}
-static int svc_partial_recvfrom(struct svc_rqst *rqstp,
- struct kvec *iov, int nr,
- int buflen, unsigned int base)
-{
- size_t save_iovlen;
- void *save_iovbase;
- unsigned int i;
- int ret;
-
- if (base == 0)
- return svc_recvfrom(rqstp, iov, nr, buflen);
-
- for (i = 0; i < nr; i++) {
- if (iov[i].iov_len > base)
- break;
- base -= iov[i].iov_len;
- }
- save_iovlen = iov[i].iov_len;
- save_iovbase = iov[i].iov_base;
- iov[i].iov_len -= base;
- iov[i].iov_base += base;
- ret = svc_recvfrom(rqstp, &iov[i], nr - i, buflen);
- iov[i].iov_len = save_iovlen;
- iov[i].iov_base = save_iovbase;
- return ret;
-}
-
/*
* Set socket snd and rcv buffer lengths
*/
@@ -962,7 +937,8 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
iov.iov_len = want;
- if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
+ len = svc_recvfrom(rqstp, &iov, 1, want, 0);
+ if (len < 0)
goto error;
svsk->sk_tcplen += len;
@@ -1088,14 +1064,13 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
vec = rqstp->rq_vec;
- pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0],
- svsk->sk_datalen + want);
+ pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], base + want);
rqstp->rq_respages = &rqstp->rq_pages[pnum];
rqstp->rq_next_page = rqstp->rq_respages + 1;
/* Now receive data */
- len = svc_partial_recvfrom(rqstp, vec, pnum, want, base);
+ len = svc_recvfrom(rqstp, vec, pnum, base + want, base);
if (len >= 0) {
svsk->sk_tcplen += len;
svsk->sk_datalen += len;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index d3a1a237cee6..f3c147d70286 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -5,8 +5,6 @@
* Support for backward direction RPCs on RPC/RDMA (server-side).
*/
-#include <linux/module.h>
-
#include <linux/sunrpc/svc_rdma.h>
#include "xprt_rdma.h"
@@ -32,7 +30,6 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct kvec *dst, *src = &rcvbuf->head[0];
struct rpc_rqst *req;
- unsigned long cwnd;
u32 credits;
size_t len;
__be32 xid;
@@ -66,6 +63,8 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
if (dst->iov_len < len)
goto out_unlock;
memcpy(dst->iov_base, p, len);
+ xprt_pin_rqst(req);
+ spin_unlock(&xprt->queue_lock);
credits = be32_to_cpup(rdma_resp + 2);
if (credits == 0)
@@ -74,15 +73,13 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
credits = r_xprt->rx_buf.rb_bc_max_requests;
spin_lock_bh(&xprt->transport_lock);
- cwnd = xprt->cwnd;
xprt->cwnd = credits << RPC_CWNDSHIFT;
- if (xprt->cwnd > cwnd)
- xprt_release_rqst_cong(req->rq_task);
spin_unlock_bh(&xprt->transport_lock);
-
+ spin_lock(&xprt->queue_lock);
ret = 0;
xprt_complete_rqst(req->rq_task, rcvbuf->len);
+ xprt_unpin_rqst(req);
rcvbuf->len = 0;
out_unlock:
@@ -251,7 +248,6 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt)
dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
xprt_free(xprt);
- module_put(THIS_MODULE);
}
static const struct rpc_xprt_ops xprt_rdma_bc_procs = {
@@ -323,20 +319,9 @@ xprt_setup_rdma_bc(struct xprt_create *args)
args->bc_xprt->xpt_bc_xprt = xprt;
xprt->bc_xprt = args->bc_xprt;
- if (!try_module_get(THIS_MODULE))
- goto out_fail;
-
/* Final put for backchannel xprt is in __svc_rdma_free */
xprt_get(xprt);
return xprt;
-
-out_fail:
- xprt_rdma_free_addresses(xprt);
- args->bc_xprt->xpt_bc_xprt = NULL;
- args->bc_xprt->xpt_bc_xps = NULL;
- xprt_put(xprt);
- xprt_free(xprt);
- return ERR_PTR(-EINVAL);
}
struct xprt_class xprt_rdma_bc = {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 2848cafd4a17..2f7ec8912f49 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -475,10 +475,12 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
/* Qualify the transport resource defaults with the
* capabilities of this particular device */
- newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
- /* transport hdr, head iovec, one page list entry, tail iovec */
- if (newxprt->sc_max_send_sges < 4) {
- pr_err("svcrdma: too few Send SGEs available (%d)\n",
+ /* Transport header, head iovec, tail iovec */
+ newxprt->sc_max_send_sges = 3;
+ /* Add one SGE per page list entry */
+ newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
+ if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
+ pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
newxprt->sc_max_send_sges);
goto errout;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 1b51e04d3566..ae77c71c1f64 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -361,7 +361,7 @@ static ssize_t
xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
struct kvec *kvec, size_t count, size_t seek)
{
- iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, kvec, 1, count);
+ iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
return xs_sock_recvmsg(sock, msg, flags, seek);
}
@@ -370,7 +370,7 @@ xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
struct bio_vec *bvec, unsigned long nr, size_t count,
size_t seek)
{
- iov_iter_bvec(&msg->msg_iter, READ | ITER_BVEC, bvec, nr, count);
+ iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
return xs_sock_recvmsg(sock, msg, flags, seek);
}
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 4bdea0057171..efb16f69bd2c 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -394,7 +394,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
iov.iov_base = &s;
iov.iov_len = sizeof(s);
msg.msg_name = NULL;
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
+ iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len);
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK)
return -EWOULDBLOCK;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 276edbc04f38..d753e362d2d9 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -489,7 +489,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
iov.iov_base = kaddr + offset;
iov.iov_len = size;
- iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
+ iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
rc = tls_push_data(sk, &msg_iter, size,
flags, TLS_RECORD_TYPE_DATA);
kunmap(page);
@@ -538,7 +538,7 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
{
struct iov_iter msg_iter;
- iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
+ iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 5cd88ba8acd1..7b1af8b59cd2 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -799,7 +799,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
unsigned char record_type = TLS_RECORD_TYPE_DATA;
- bool is_kvec = msg->msg_iter.type & ITER_KVEC;
+ bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool eor = !(msg->msg_flags & MSG_MORE);
size_t try_to_copy, copied = 0;
struct sk_msg *msg_pl, *msg_en;
@@ -1457,7 +1457,7 @@ int tls_sw_recvmsg(struct sock *sk,
bool cmsg = false;
int target, err = 0;
long timeo;
- bool is_kvec = msg->msg_iter.type & ITER_KVEC;
+ bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
int num_async = 0;
flags |= nonblock;
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 4a9ee2d83158..140270a13d54 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -8,7 +8,6 @@ config XFRM
config XFRM_OFFLOAD
bool
- depends on XFRM
config XFRM_ALGO
tristate
diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c
index 2ad33ce1ea17..eca8d84d99bf 100644
--- a/net/xfrm/xfrm_hash.c
+++ b/net/xfrm/xfrm_hash.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/xfrm.h>
diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c
index 2535c3677c7b..ca7960adf5a3 100644
--- a/samples/vfio-mdev/mbochs.c
+++ b/samples/vfio-mdev/mbochs.c
@@ -71,11 +71,19 @@
#define MBOCHS_NAME "mbochs"
#define MBOCHS_CLASS_NAME "mbochs"
+#define MBOCHS_EDID_REGION_INDEX VFIO_PCI_NUM_REGIONS
+#define MBOCHS_NUM_REGIONS (MBOCHS_EDID_REGION_INDEX+1)
+
#define MBOCHS_CONFIG_SPACE_SIZE 0xff
#define MBOCHS_MMIO_BAR_OFFSET PAGE_SIZE
#define MBOCHS_MMIO_BAR_SIZE PAGE_SIZE
-#define MBOCHS_MEMORY_BAR_OFFSET (MBOCHS_MMIO_BAR_OFFSET + \
+#define MBOCHS_EDID_OFFSET (MBOCHS_MMIO_BAR_OFFSET + \
MBOCHS_MMIO_BAR_SIZE)
+#define MBOCHS_EDID_SIZE PAGE_SIZE
+#define MBOCHS_MEMORY_BAR_OFFSET (MBOCHS_EDID_OFFSET + \
+ MBOCHS_EDID_SIZE)
+
+#define MBOCHS_EDID_BLOB_OFFSET (MBOCHS_EDID_SIZE/2)
#define STORE_LE16(addr, val) (*(u16 *)addr = val)
#define STORE_LE32(addr, val) (*(u32 *)addr = val)
@@ -95,16 +103,24 @@ MODULE_PARM_DESC(mem, "megabytes available to " MBOCHS_NAME " devices");
static const struct mbochs_type {
const char *name;
u32 mbytes;
+ u32 max_x;
+ u32 max_y;
} mbochs_types[] = {
{
.name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_1,
.mbytes = 4,
+ .max_x = 800,
+ .max_y = 600,
}, {
.name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_2,
.mbytes = 16,
+ .max_x = 1920,
+ .max_y = 1440,
}, {
.name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_3,
.mbytes = 64,
+ .max_x = 0,
+ .max_y = 0,
},
};
@@ -115,6 +131,11 @@ static struct cdev mbochs_cdev;
static struct device mbochs_dev;
static int mbochs_used_mbytes;
+struct vfio_region_info_ext {
+ struct vfio_region_info base;
+ struct vfio_region_info_cap_type type;
+};
+
struct mbochs_mode {
u32 drm_format;
u32 bytepp;
@@ -144,13 +165,14 @@ struct mdev_state {
u32 memory_bar_mask;
struct mutex ops_lock;
struct mdev_device *mdev;
- struct vfio_device_info dev_info;
const struct mbochs_type *type;
u16 vbe[VBE_DISPI_INDEX_COUNT];
u64 memsize;
struct page **pages;
pgoff_t pagecount;
+ struct vfio_region_gfx_edid edid_regs;
+ u8 edid_blob[0x400];
struct list_head dmabufs;
u32 active_id;
@@ -342,10 +364,20 @@ static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset,
char *buf, u32 count)
{
struct device *dev = mdev_dev(mdev_state->mdev);
+ struct vfio_region_gfx_edid *edid;
u16 reg16 = 0;
int index;
switch (offset) {
+ case 0x000 ... 0x3ff: /* edid block */
+ edid = &mdev_state->edid_regs;
+ if (edid->link_state != VFIO_DEVICE_GFX_LINK_STATE_UP ||
+ offset >= edid->edid_size) {
+ memset(buf, 0, count);
+ break;
+ }
+ memcpy(buf, mdev_state->edid_blob + offset, count);
+ break;
case 0x500 ... 0x515: /* bochs dispi interface */
if (count != 2)
goto unhandled;
@@ -365,6 +397,44 @@ unhandled:
}
}
+static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset,
+ char *buf, u32 count, bool is_write)
+{
+ char *regs = (void *)&mdev_state->edid_regs;
+
+ if (offset + count > sizeof(mdev_state->edid_regs))
+ return;
+ if (count != 4)
+ return;
+ if (offset % 4)
+ return;
+
+ if (is_write) {
+ switch (offset) {
+ case offsetof(struct vfio_region_gfx_edid, link_state):
+ case offsetof(struct vfio_region_gfx_edid, edid_size):
+ memcpy(regs + offset, buf, count);
+ break;
+ default:
+ /* read-only regs */
+ break;
+ }
+ } else {
+ memcpy(buf, regs + offset, count);
+ }
+}
+
+static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset,
+ char *buf, u32 count, bool is_write)
+{
+ if (offset + count > mdev_state->edid_regs.edid_max_size)
+ return;
+ if (is_write)
+ memcpy(mdev_state->edid_blob + offset, buf, count);
+ else
+ memcpy(buf, mdev_state->edid_blob + offset, count);
+}
+
static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
loff_t pos, bool is_write)
{
@@ -384,13 +454,25 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
memcpy(buf, (mdev_state->vconfig + pos), count);
} else if (pos >= MBOCHS_MMIO_BAR_OFFSET &&
- pos + count <= MBOCHS_MEMORY_BAR_OFFSET) {
+ pos + count <= (MBOCHS_MMIO_BAR_OFFSET +
+ MBOCHS_MMIO_BAR_SIZE)) {
pos -= MBOCHS_MMIO_BAR_OFFSET;
if (is_write)
handle_mmio_write(mdev_state, pos, buf, count);
else
handle_mmio_read(mdev_state, pos, buf, count);
+ } else if (pos >= MBOCHS_EDID_OFFSET &&
+ pos + count <= (MBOCHS_EDID_OFFSET +
+ MBOCHS_EDID_SIZE)) {
+ pos -= MBOCHS_EDID_OFFSET;
+ if (pos < MBOCHS_EDID_BLOB_OFFSET) {
+ handle_edid_regs(mdev_state, pos, buf, count, is_write);
+ } else {
+ pos -= MBOCHS_EDID_BLOB_OFFSET;
+ handle_edid_blob(mdev_state, pos, buf, count, is_write);
+ }
+
} else if (pos >= MBOCHS_MEMORY_BAR_OFFSET &&
pos + count <=
MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
@@ -471,6 +553,10 @@ static int mbochs_create(struct kobject *kobj, struct mdev_device *mdev)
mdev_state->next_id = 1;
mdev_state->type = type;
+ mdev_state->edid_regs.max_xres = type->max_x;
+ mdev_state->edid_regs.max_yres = type->max_y;
+ mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET;
+ mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob);
mbochs_create_config_space(mdev_state);
mbochs_reset(mdev);
@@ -932,16 +1018,16 @@ static int mbochs_dmabuf_export(struct mbochs_dmabuf *dmabuf)
}
static int mbochs_get_region_info(struct mdev_device *mdev,
- struct vfio_region_info *region_info,
- u16 *cap_type_id, void **cap_type)
+ struct vfio_region_info_ext *ext)
{
+ struct vfio_region_info *region_info = &ext->base;
struct mdev_state *mdev_state;
mdev_state = mdev_get_drvdata(mdev);
if (!mdev_state)
return -EINVAL;
- if (region_info->index >= VFIO_PCI_NUM_REGIONS)
+ if (region_info->index >= MBOCHS_NUM_REGIONS)
return -EINVAL;
switch (region_info->index) {
@@ -964,6 +1050,20 @@ static int mbochs_get_region_info(struct mdev_device *mdev,
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE);
break;
+ case MBOCHS_EDID_REGION_INDEX:
+ ext->base.argsz = sizeof(*ext);
+ ext->base.offset = MBOCHS_EDID_OFFSET;
+ ext->base.size = MBOCHS_EDID_SIZE;
+ ext->base.flags = (VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_CAPS);
+ ext->base.cap_offset = offsetof(typeof(*ext), type);
+ ext->type.header.id = VFIO_REGION_INFO_CAP_TYPE;
+ ext->type.header.version = 1;
+ ext->type.header.next = 0;
+ ext->type.type = VFIO_REGION_TYPE_GFX;
+ ext->type.subtype = VFIO_REGION_SUBTYPE_GFX_EDID;
+ break;
default:
region_info->size = 0;
region_info->offset = 0;
@@ -984,7 +1084,7 @@ static int mbochs_get_device_info(struct mdev_device *mdev,
struct vfio_device_info *dev_info)
{
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
- dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
+ dev_info->num_regions = MBOCHS_NUM_REGIONS;
dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
return 0;
}
@@ -1084,7 +1184,7 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg)
{
int ret = 0;
- unsigned long minsz;
+ unsigned long minsz, outsz;
struct mdev_state *mdev_state;
mdev_state = mdev_get_drvdata(mdev);
@@ -1106,8 +1206,6 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (ret)
return ret;
- memcpy(&mdev_state->dev_info, &info, sizeof(info));
-
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
@@ -1115,24 +1213,24 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
}
case VFIO_DEVICE_GET_REGION_INFO:
{
- struct vfio_region_info info;
- u16 cap_type_id = 0;
- void *cap_type = NULL;
+ struct vfio_region_info_ext info;
- minsz = offsetofend(struct vfio_region_info, offset);
+ minsz = offsetofend(typeof(info), base.offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
- if (info.argsz < minsz)
+ outsz = info.base.argsz;
+ if (outsz < minsz)
+ return -EINVAL;
+ if (outsz > sizeof(info))
return -EINVAL;
- ret = mbochs_get_region_info(mdev, &info, &cap_type_id,
- &cap_type);
+ ret = mbochs_get_region_info(mdev, &info);
if (ret)
return ret;
- if (copy_to_user((void __user *)arg, &info, minsz))
+ if (copy_to_user((void __user *)arg, &info, outsz))
return -EFAULT;
return 0;
@@ -1148,7 +1246,7 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
return -EFAULT;
if ((info.argsz < minsz) ||
- (info.index >= mdev_state->dev_info.num_irqs))
+ (info.index >= VFIO_PCI_NUM_IRQS))
return -EINVAL;
ret = mbochs_get_irq_info(mdev, &info);
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index ca21a35fa244..bb015551c2d9 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -140,17 +140,9 @@ cc-option-yn = $(call try-run,\
cc-disable-warning = $(call try-run,\
$(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
-# cc-name
-# Expands to either gcc or clang
-cc-name = $(shell $(CC) -v 2>&1 | grep -q "clang version" && echo clang || echo gcc)
-
# cc-version
cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
-# cc-fullversion
-cc-fullversion = $(shell $(CONFIG_SHELL) \
- $(srctree)/scripts/gcc-version.sh -p $(CC))
-
# cc-ifversion
# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 24b2fb1d1297..768306add591 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -29,6 +29,7 @@ warning-1 += $(call cc-option, -Wmissing-include-dirs)
warning-1 += $(call cc-option, -Wunused-but-set-variable)
warning-1 += $(call cc-option, -Wunused-const-variable)
warning-1 += $(call cc-option, -Wpacked-not-aligned)
+warning-1 += $(call cc-option, -Wstringop-truncation)
warning-1 += $(call cc-disable-warning, missing-field-initializers)
warning-1 += $(call cc-disable-warning, sign-compare)
@@ -64,7 +65,7 @@ endif
KBUILD_CFLAGS += $(warning)
else
-ifeq ($(cc-name),clang)
+ifdef CONFIG_CC_IS_CLANG
KBUILD_CFLAGS += $(call cc-disable-warning, initializer-overrides)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-value)
KBUILD_CFLAGS += $(call cc-disable-warning, format)
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 0a482f341576..46c5c6809806 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -26,6 +26,16 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) \
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE) \
+= -fplugin-arg-randomize_layout_plugin-performance-mode
+gcc-plugin-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak_plugin.so
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
+ += -DSTACKLEAK_PLUGIN
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
+ += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE)
+ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable
+endif
+export DISABLE_STACKLEAK_PLUGIN
+
# All the plugin CFLAGS are collected here in case a build target needs to
# filter them out of the KBUILD_CFLAGS.
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 161b0224d6ae..c883ec55654f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -4934,17 +4934,6 @@ sub process {
while ($line =~ m{($Constant|$Lval)}g) {
my $var = $1;
-#gcc binary extension
- if ($var =~ /^$Binary$/) {
- if (WARN("GCC_BINARY_CONSTANT",
- "Avoid gcc v4.3+ binary constant extension: <$var>\n" . $herecurr) &&
- $fix) {
- my $hexval = sprintf("0x%x", oct($var));
- $fixed[$fixlinenr] =~
- s/\b$var\b/$hexval/;
- }
- }
-
#CamelCase
if ($var !~ /^$Constant$/ &&
$var =~ /[A-Z][a-z]|[a-z][A-Z]/ &&
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index cb0c889e13aa..0d5c799688f0 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
@@ -139,4 +139,55 @@ config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE
in structures. This reduces the performance hit of RANDSTRUCT
at the cost of weakened randomization.
+config GCC_PLUGIN_STACKLEAK
+ bool "Erase the kernel stack before returning from syscalls"
+ depends on GCC_PLUGINS
+ depends on HAVE_ARCH_STACKLEAK
+ help
+ This option makes the kernel erase the kernel stack before
+ returning from system calls. That reduces the information which
+ kernel stack leak bugs can reveal and blocks some uninitialized
+ stack variable attacks.
+
+ The tradeoff is the performance impact: on a single CPU system kernel
+ compilation sees a 1% slowdown, other systems and workloads may vary
+ and you are advised to test this feature on your expected workload
+ before deploying it.
+
+ This plugin was ported from grsecurity/PaX. More information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
+config STACKLEAK_TRACK_MIN_SIZE
+ int "Minimum stack frame size of functions tracked by STACKLEAK"
+ default 100
+ range 0 4096
+ depends on GCC_PLUGIN_STACKLEAK
+ help
+ The STACKLEAK gcc plugin instruments the kernel code for tracking
+ the lowest border of the kernel stack (and for some other purposes).
+ It inserts the stackleak_track_stack() call for the functions with
+ a stack frame size greater than or equal to this parameter.
+ If unsure, leave the default value 100.
+
+config STACKLEAK_METRICS
+ bool "Show STACKLEAK metrics in the /proc file system"
+ depends on GCC_PLUGIN_STACKLEAK
+ depends on PROC_FS
+ help
+ If this is set, STACKLEAK metrics for every task are available in
+ the /proc file system. In particular, /proc/<pid>/stack_depth
+ shows the maximum kernel stack consumption for the current and
+ previous syscalls. Although this information is not precise, it
+ can be useful for estimating the STACKLEAK performance impact for
+ your workloads.
+
+config STACKLEAK_RUNTIME_DISABLE
+ bool "Allow runtime disabling of kernel stack erasing"
+ depends on GCC_PLUGIN_STACKLEAK
+ help
+ This option provides 'stack_erasing' sysctl, which can be used in
+ runtime to control kernel stack erasing for kernels built with
+ CONFIG_GCC_PLUGIN_STACKLEAK.
+
endif
diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
new file mode 100644
index 000000000000..2f48da98b5d4
--- /dev/null
+++ b/scripts/gcc-plugins/stackleak_plugin.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2011-2017 by the PaX Team <pageexec@freemail.hu>
+ * Modified by Alexander Popov <alex.popov@linux.com>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ * but for the kernel it doesn't matter since it doesn't link against
+ * any of the gcc libraries
+ *
+ * This gcc plugin is needed for tracking the lowest border of the kernel stack.
+ * It instruments the kernel code inserting stackleak_track_stack() calls:
+ * - after alloca();
+ * - for the functions with a stack frame size greater than or equal
+ * to the "track-min-size" plugin parameter.
+ *
+ * This plugin is ported from grsecurity/PaX. For more information see:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+ *
+ * Debugging:
+ * - use fprintf() to stderr, debug_generic_expr(), debug_gimple_stmt(),
+ * print_rtl() and print_simple_rtl();
+ * - add "-fdump-tree-all -fdump-rtl-all" to the plugin CFLAGS in
+ * Makefile.gcc-plugins to see the verbose dumps of the gcc passes;
+ * - use gcc -E to understand the preprocessing shenanigans;
+ * - use gcc with enabled CFG/GIMPLE/SSA verification (--enable-checking).
+ */
+
+#include "gcc-common.h"
+
+__visible int plugin_is_GPL_compatible;
+
+static int track_frame_size = -1;
+static const char track_function[] = "stackleak_track_stack";
+
+/*
+ * Mark these global variables (roots) for gcc garbage collector since
+ * they point to the garbage-collected memory.
+ */
+static GTY(()) tree track_function_decl;
+
+static struct plugin_info stackleak_plugin_info = {
+ .version = "201707101337",
+ .help = "track-min-size=nn\ttrack stack for functions with a stack frame size >= nn bytes\n"
+ "disable\t\tdo not activate the plugin\n"
+};
+
+static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after)
+{
+ gimple stmt;
+ gcall *stackleak_track_stack;
+ cgraph_node_ptr node;
+ int frequency;
+ basic_block bb;
+
+ /* Insert call to void stackleak_track_stack(void) */
+ stmt = gimple_build_call(track_function_decl, 0);
+ stackleak_track_stack = as_a_gcall(stmt);
+ if (after) {
+ gsi_insert_after(gsi, stackleak_track_stack,
+ GSI_CONTINUE_LINKING);
+ } else {
+ gsi_insert_before(gsi, stackleak_track_stack, GSI_SAME_STMT);
+ }
+
+ /* Update the cgraph */
+ bb = gimple_bb(stackleak_track_stack);
+ node = cgraph_get_create_node(track_function_decl);
+ gcc_assert(node);
+ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb);
+ cgraph_create_edge(cgraph_get_node(current_function_decl), node,
+ stackleak_track_stack, bb->count, frequency);
+}
+
+static bool is_alloca(gimple stmt)
+{
+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
+ return true;
+
+#if BUILDING_GCC_VERSION >= 4007
+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
+ return true;
+#endif
+
+ return false;
+}
+
+/*
+ * Work with the GIMPLE representation of the code. Insert the
+ * stackleak_track_stack() call after alloca() and into the beginning
+ * of the function if it is not instrumented.
+ */
+static unsigned int stackleak_instrument_execute(void)
+{
+ basic_block bb, entry_bb;
+ bool prologue_instrumented = false, is_leaf = true;
+ gimple_stmt_iterator gsi;
+
+ /*
+ * ENTRY_BLOCK_PTR is a basic block which represents possible entry
+ * point of a function. This block does not contain any code and
+ * has a CFG edge to its successor.
+ */
+ gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ entry_bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+
+ /*
+ * Loop through the GIMPLE statements in each of cfun basic blocks.
+ * cfun is a global variable which represents the function that is
+ * currently processed.
+ */
+ FOR_EACH_BB_FN(bb, cfun) {
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ gimple stmt;
+
+ stmt = gsi_stmt(gsi);
+
+ /* Leaf function is a function which makes no calls */
+ if (is_gimple_call(stmt))
+ is_leaf = false;
+
+ if (!is_alloca(stmt))
+ continue;
+
+ /* Insert stackleak_track_stack() call after alloca() */
+ stackleak_add_track_stack(&gsi, true);
+ if (bb == entry_bb)
+ prologue_instrumented = true;
+ }
+ }
+
+ if (prologue_instrumented)
+ return 0;
+
+ /*
+ * Special cases to skip the instrumentation.
+ *
+ * Taking the address of static inline functions materializes them,
+ * but we mustn't instrument some of them as the resulting stack
+ * alignment required by the function call ABI will break other
+ * assumptions regarding the expected (but not otherwise enforced)
+ * register clobbering ABI.
+ *
+ * Case in point: native_save_fl on amd64 when optimized for size
+ * clobbers rdx if it were instrumented here.
+ *
+ * TODO: any more special cases?
+ */
+ if (is_leaf &&
+ !TREE_PUBLIC(current_function_decl) &&
+ DECL_DECLARED_INLINE_P(current_function_decl)) {
+ return 0;
+ }
+
+ if (is_leaf &&
+ !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)),
+ "_paravirt_", 10)) {
+ return 0;
+ }
+
+ /* Insert stackleak_track_stack() call at the function beginning */
+ bb = entry_bb;
+ if (!single_pred_p(bb)) {
+ /* gcc_assert(bb_loop_depth(bb) ||
+ (bb->flags & BB_IRREDUCIBLE_LOOP)); */
+ split_edge(single_succ_edge(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+ }
+ gsi = gsi_after_labels(bb);
+ stackleak_add_track_stack(&gsi, false);
+
+ return 0;
+}
+
+static bool large_stack_frame(void)
+{
+#if BUILDING_GCC_VERSION >= 8000
+ return maybe_ge(get_frame_size(), track_frame_size);
+#else
+ return (get_frame_size() >= track_frame_size);
+#endif
+}
+
+/*
+ * Work with the RTL representation of the code.
+ * Remove the unneeded stackleak_track_stack() calls from the functions
+ * which don't call alloca() and don't have a large enough stack frame size.
+ */
+static unsigned int stackleak_cleanup_execute(void)
+{
+ rtx_insn *insn, *next;
+
+ if (cfun->calls_alloca)
+ return 0;
+
+ if (large_stack_frame())
+ return 0;
+
+ /*
+ * Find stackleak_track_stack() calls. Loop through the chain of insns,
+ * which is an RTL representation of the code for a function.
+ *
+ * The example of a matching insn:
+ * (call_insn 8 4 10 2 (call (mem (symbol_ref ("stackleak_track_stack")
+ * [flags 0x41] <function_decl 0x7f7cd3302a80 stackleak_track_stack>)
+ * [0 stackleak_track_stack S1 A8]) (0)) 675 {*call} (expr_list
+ * (symbol_ref ("stackleak_track_stack") [flags 0x41] <function_decl
+ * 0x7f7cd3302a80 stackleak_track_stack>) (expr_list (0) (nil))) (nil))
+ */
+ for (insn = get_insns(); insn; insn = next) {
+ rtx body;
+
+ next = NEXT_INSN(insn);
+
+ /* Check the expression code of the insn */
+ if (!CALL_P(insn))
+ continue;
+
+ /*
+ * Check the expression code of the insn body, which is an RTL
+ * Expression (RTX) describing the side effect performed by
+ * that insn.
+ */
+ body = PATTERN(insn);
+
+ if (GET_CODE(body) == PARALLEL)
+ body = XVECEXP(body, 0, 0);
+
+ if (GET_CODE(body) != CALL)
+ continue;
+
+ /*
+ * Check the first operand of the call expression. It should
+ * be a mem RTX describing the needed subroutine with a
+ * symbol_ref RTX.
+ */
+ body = XEXP(body, 0);
+ if (GET_CODE(body) != MEM)
+ continue;
+
+ body = XEXP(body, 0);
+ if (GET_CODE(body) != SYMBOL_REF)
+ continue;
+
+ if (SYMBOL_REF_DECL(body) != track_function_decl)
+ continue;
+
+ /* Delete the stackleak_track_stack() call */
+ delete_insn_and_edges(insn);
+#if BUILDING_GCC_VERSION >= 4007 && BUILDING_GCC_VERSION < 8000
+ if (GET_CODE(next) == NOTE &&
+ NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
+ insn = next;
+ next = NEXT_INSN(insn);
+ delete_insn_and_edges(insn);
+ }
+#endif
+ }
+
+ return 0;
+}
+
+static bool stackleak_gate(void)
+{
+ tree section;
+
+ section = lookup_attribute("section",
+ DECL_ATTRIBUTES(current_function_decl));
+ if (section && TREE_VALUE(section)) {
+ section = TREE_VALUE(TREE_VALUE(section));
+
+ if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10))
+ return false;
+ if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13))
+ return false;
+ if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13))
+ return false;
+ if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13))
+ return false;
+ }
+
+ return track_frame_size >= 0;
+}
+
+/* Build the function declaration for stackleak_track_stack() */
+static void stackleak_start_unit(void *gcc_data __unused,
+ void *user_data __unused)
+{
+ tree fntype;
+
+ /* void stackleak_track_stack(void) */
+ fntype = build_function_type_list(void_type_node, NULL_TREE);
+ track_function_decl = build_fn_decl(track_function, fntype);
+ DECL_ASSEMBLER_NAME(track_function_decl); /* for LTO */
+ TREE_PUBLIC(track_function_decl) = 1;
+ TREE_USED(track_function_decl) = 1;
+ DECL_EXTERNAL(track_function_decl) = 1;
+ DECL_ARTIFICIAL(track_function_decl) = 1;
+ DECL_PRESERVE_P(track_function_decl) = 1;
+}
+
+/*
+ * Pass gate function is a predicate function that gets executed before the
+ * corresponding pass. If the return value is 'true' the pass gets executed,
+ * otherwise, it is skipped.
+ */
+static bool stackleak_instrument_gate(void)
+{
+ return stackleak_gate();
+}
+
+#define PASS_NAME stackleak_instrument
+#define PROPERTIES_REQUIRED PROP_gimple_leh | PROP_cfg
+#define TODO_FLAGS_START TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts
+#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func \
+ | TODO_update_ssa | TODO_rebuild_cgraph_edges
+#include "gcc-generate-gimple-pass.h"
+
+static bool stackleak_cleanup_gate(void)
+{
+ return stackleak_gate();
+}
+
+#define PASS_NAME stackleak_cleanup
+#define TODO_FLAGS_FINISH TODO_dump_func
+#include "gcc-generate-rtl-pass.h"
+
+/*
+ * Every gcc plugin exports a plugin_init() function that is called right
+ * after the plugin is loaded. This function is responsible for registering
+ * the plugin callbacks and doing other required initialization.
+ */
+__visible int plugin_init(struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version)
+{
+ const char * const plugin_name = plugin_info->base_name;
+ const int argc = plugin_info->argc;
+ const struct plugin_argument * const argv = plugin_info->argv;
+ int i = 0;
+
+ /* Extra GGC root tables describing our GTY-ed data */
+ static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = {
+ {
+ .base = &track_function_decl,
+ .nelt = 1,
+ .stride = sizeof(track_function_decl),
+ .cb = &gt_ggc_mx_tree_node,
+ .pchw = &gt_pch_nx_tree_node
+ },
+ LAST_GGC_ROOT_TAB
+ };
+
+ /*
+ * The stackleak_instrument pass should be executed before the
+ * "optimized" pass, which is the control flow graph cleanup that is
+ * performed just before expanding gcc trees to the RTL. In former
+ * versions of the plugin this new pass was inserted before the
+ * "tree_profile" pass, which is currently called "profile".
+ */
+ PASS_INFO(stackleak_instrument, "optimized", 1,
+ PASS_POS_INSERT_BEFORE);
+
+ /*
+ * The stackleak_cleanup pass should be executed after the
+ * "reload" pass, when the stack frame size is final.
+ */
+ PASS_INFO(stackleak_cleanup, "reload", 1, PASS_POS_INSERT_AFTER);
+
+ if (!plugin_default_version_check(version, &gcc_version)) {
+ error(G_("incompatible gcc/plugin versions"));
+ return 1;
+ }
+
+ /* Parse the plugin arguments */
+ for (i = 0; i < argc; i++) {
+ if (!strcmp(argv[i].key, "disable"))
+ return 0;
+
+ if (!strcmp(argv[i].key, "track-min-size")) {
+ if (!argv[i].value) {
+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"),
+ plugin_name, argv[i].key);
+ return 1;
+ }
+
+ track_frame_size = atoi(argv[i].value);
+ if (track_frame_size < 0) {
+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"),
+ plugin_name, argv[i].key, argv[i].value);
+ return 1;
+ }
+ } else {
+ error(G_("unknown option '-fplugin-arg-%s-%s'"),
+ plugin_name, argv[i].key);
+ return 1;
+ }
+ }
+
+ /* Give the information about the plugin */
+ register_callback(plugin_name, PLUGIN_INFO, NULL,
+ &stackleak_plugin_info);
+
+ /* Register to be called before processing a translation unit */
+ register_callback(plugin_name, PLUGIN_START_UNIT,
+ &stackleak_start_unit, NULL);
+
+ /* Register an extra GCC garbage collector (GGC) root table */
+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL,
+ (void *)&gt_ggc_r_gt_stackleak);
+
+ /*
+ * Hook into the Pass Manager to register new gcc passes.
+ *
+ * The stack frame size info is available only at the last RTL pass,
+ * when it's too late to insert complex code like a function call.
+ * So we register two gcc passes to instrument every function at first
+ * and remove the unneeded instrumentation later.
+ */
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+ &stackleak_instrument_pass_info);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+ &stackleak_cleanup_pass_info);
+
+ return 0;
+}
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 67ed9f6ccdf8..63b609243d03 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -68,21 +68,7 @@ PHONY += $(simple-targets)
$(simple-targets): $(obj)/conf
$< $(silent) --$@ $(Kconfig)
-PHONY += oldnoconfig silentoldconfig savedefconfig defconfig
-
-# oldnoconfig is an alias of olddefconfig, because people already are dependent
-# on its behavior (sets new symbols to their default value but not 'n') with the
-# counter-intuitive name.
-oldnoconfig: olddefconfig
- @echo " WARNING: \"oldnoconfig\" target will be removed after Linux 4.19"
- @echo " Please use \"olddefconfig\" instead, which is an alias."
-
-# We do not expect manual invokcation of "silentoldcofig" (or "syncconfig").
-silentoldconfig: syncconfig
- @echo " WARNING: \"silentoldconfig\" has been renamed to \"syncconfig\""
- @echo " and is now an internal implementation detail."
- @echo " What you want is probably \"oldconfig\"."
- @echo " \"silentoldconfig\" will be removed after Linux 4.19"
+PHONY += savedefconfig defconfig
savedefconfig: $(obj)/conf
$< $(silent) --$@=defconfig $(Kconfig)
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 7b2b37260669..98e0c7a34699 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -460,12 +460,6 @@ static struct option long_opts[] = {
{"randconfig", no_argument, NULL, randconfig},
{"listnewconfig", no_argument, NULL, listnewconfig},
{"olddefconfig", no_argument, NULL, olddefconfig},
- /*
- * oldnoconfig is an alias of olddefconfig, because people already
- * are dependent on its behavior(sets new symbols to their default
- * value but not 'n') with the counter-intuitive name.
- */
- {"oldnoconfig", no_argument, NULL, olddefconfig},
{NULL, 0, NULL, 0}
};
@@ -480,7 +474,6 @@ static void conf_usage(const char *progname)
printf(" --syncconfig Similar to oldconfig but generates configuration in\n"
" include/{generated/,config/}\n");
printf(" --olddefconfig Same as oldconfig but sets new symbols to their default value\n");
- printf(" --oldnoconfig An alias of olddefconfig\n");
printf(" --defconfig <file> New config with default defined in <file>\n");
printf(" --savedefconfig <file> Save the minimal current configuration to <file>\n");
printf(" --allnoconfig New config where all options are answered with no\n");
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index 67d131447631..da66e7742282 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -33,12 +33,15 @@ usage() {
echo " -n use allnoconfig instead of alldefconfig"
echo " -r list redundant entries when merging fragments"
echo " -O dir to put generated output files. Consider setting \$KCONFIG_CONFIG instead."
+ echo
+ echo "Used prefix: '$CONFIG_PREFIX'. You can redefine it with \$CONFIG_ environment variable."
}
RUNMAKE=true
ALLTARGET=alldefconfig
WARNREDUN=false
OUTPUT=.
+CONFIG_PREFIX=${CONFIG_-CONFIG_}
while true; do
case $1 in
@@ -99,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then
fi
MERGE_LIST=$*
-SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p"
+SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p"
+
TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
echo "Using $INITFILE as base"
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index e09fe4d7307c..8963203319ea 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -1742,7 +1742,7 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
if (error)
return error;
- parent = aa_get_ns(dir->i_private);
+ parent = aa_get_ns(dir->i_private);
/* rmdir calls the generic securityfs functions to remove files
* from the apparmor dir. It is up to the apparmor ns locking
* to avoid races.
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 4285943f7260..d0afed9ebd0e 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -496,7 +496,7 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
/* update caching of label on file_ctx */
spin_lock(&fctx->lock);
old = rcu_dereference_protected(fctx->label,
- spin_is_locked(&fctx->lock));
+ lockdep_is_held(&fctx->lock));
l = aa_label_merge(old, label, GFP_ATOMIC);
if (l) {
if (l != old) {
diff --git a/security/apparmor/include/cred.h b/security/apparmor/include/cred.h
index e287b7d0d4be..265ae6641a06 100644
--- a/security/apparmor/include/cred.h
+++ b/security/apparmor/include/cred.h
@@ -151,6 +151,8 @@ static inline struct aa_label *begin_current_label_crit_section(void)
{
struct aa_label *label = aa_current_raw_label();
+ might_sleep();
+
if (label_is_stale(label)) {
label = aa_get_newest_label(label);
if (aa_replace_current_label(label) == 0)
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
index ec7228e857a9..7334ac966d01 100644
--- a/security/apparmor/include/net.h
+++ b/security/apparmor/include/net.h
@@ -83,6 +83,13 @@ struct aa_sk_ctx {
__e; \
})
+struct aa_secmark {
+ u8 audit;
+ u8 deny;
+ u32 secid;
+ char *label;
+};
+
extern struct aa_sfs_entry aa_sfs_entry_network[];
void audit_net_cb(struct audit_buffer *ab, void *va);
@@ -103,4 +110,7 @@ int aa_sk_perm(const char *op, u32 request, struct sock *sk);
int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
struct socket *sock);
+int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+ u32 secid, struct sock *sk);
+
#endif /* __AA_NET_H */
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index ab64c6b5db5a..8e6707c837be 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -155,6 +155,9 @@ struct aa_profile {
struct aa_rlimit rlimits;
+ int secmark_count;
+ struct aa_secmark *secmark;
+
struct aa_loaddata *rawdata;
unsigned char *hash;
char *dirname;
diff --git a/security/apparmor/include/secid.h b/security/apparmor/include/secid.h
index dee6fa3b6081..fa2062711b63 100644
--- a/security/apparmor/include/secid.h
+++ b/security/apparmor/include/secid.h
@@ -22,6 +22,9 @@ struct aa_label;
/* secid value that will not be allocated */
#define AA_SECID_INVALID 0
+/* secid value that matches any other secid */
+#define AA_SECID_WILDCARD 1
+
struct aa_label *aa_secid_to_label(u32 secid);
int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 974affe50531..76491e7f4177 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -90,10 +90,12 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
const char *end = fqname + n;
const char *name = skipn_spaces(fqname, n);
- if (!name)
- return NULL;
*ns_name = NULL;
*ns_len = 0;
+
+ if (!name)
+ return NULL;
+
if (name[0] == ':') {
char *split = strnchr(&name[1], end - &name[1], ':');
*ns_name = skipn_spaces(&name[1], end - &name[1]);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index aa35939443c4..42446a216f3b 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -23,6 +23,8 @@
#include <linux/sysctl.h>
#include <linux/audit.h>
#include <linux/user_namespace.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
#include <net/sock.h>
#include "include/apparmor.h"
@@ -114,13 +116,13 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
struct aa_label *tracer, *tracee;
int error;
- tracer = begin_current_label_crit_section();
+ tracer = __begin_current_label_crit_section();
tracee = aa_get_task_label(child);
error = aa_may_ptrace(tracer, tracee,
(mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
: AA_PTRACE_TRACE);
aa_put_label(tracee);
- end_current_label_crit_section(tracer);
+ __end_current_label_crit_section(tracer);
return error;
}
@@ -130,11 +132,11 @@ static int apparmor_ptrace_traceme(struct task_struct *parent)
struct aa_label *tracer, *tracee;
int error;
- tracee = begin_current_label_crit_section();
+ tracee = __begin_current_label_crit_section();
tracer = aa_get_task_label(parent);
error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
aa_put_label(tracer);
- end_current_label_crit_section(tracee);
+ __end_current_label_crit_section(tracee);
return error;
}
@@ -1020,6 +1022,7 @@ static int apparmor_socket_shutdown(struct socket *sock, int how)
return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
}
+#ifdef CONFIG_NETWORK_SECMARK
/**
* apparmor_socket_sock_recv_skb - check perms before associating skb to sk
*
@@ -1030,8 +1033,15 @@ static int apparmor_socket_shutdown(struct socket *sock, int how)
*/
static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- return 0;
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ if (!skb->secmark)
+ return 0;
+
+ return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE,
+ skb->secmark, sk);
}
+#endif
static struct aa_label *sk_peer_label(struct sock *sk)
@@ -1126,6 +1136,20 @@ static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
ctx->label = aa_get_current_label();
}
+#ifdef CONFIG_NETWORK_SECMARK
+static int apparmor_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ if (!skb->secmark)
+ return 0;
+
+ return apparmor_secmark_check(ctx->label, OP_CONNECT, AA_MAY_CONNECT,
+ skb->secmark, sk);
+}
+#endif
+
static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
@@ -1177,12 +1201,17 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
+#ifdef CONFIG_NETWORK_SECMARK
LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
+#endif
LSM_HOOK_INIT(socket_getpeersec_stream,
apparmor_socket_getpeersec_stream),
LSM_HOOK_INIT(socket_getpeersec_dgram,
apparmor_socket_getpeersec_dgram),
LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
+#ifdef CONFIG_NETWORK_SECMARK
+ LSM_HOOK_INIT(inet_conn_request, apparmor_inet_conn_request),
+#endif
LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
LSM_HOOK_INIT(cred_free, apparmor_cred_free),
@@ -1538,6 +1567,97 @@ static inline int apparmor_init_sysctl(void)
}
#endif /* CONFIG_SYSCTL */
+#if defined(CONFIG_NETFILTER) && defined(CONFIG_NETWORK_SECMARK)
+static unsigned int apparmor_ip_postroute(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct aa_sk_ctx *ctx;
+ struct sock *sk;
+
+ if (!skb->secmark)
+ return NF_ACCEPT;
+
+ sk = skb_to_full_sk(skb);
+ if (sk == NULL)
+ return NF_ACCEPT;
+
+ ctx = SK_CTX(sk);
+ if (!apparmor_secmark_check(ctx->label, OP_SENDMSG, AA_MAY_SEND,
+ skb->secmark, sk))
+ return NF_ACCEPT;
+
+ return NF_DROP_ERR(-ECONNREFUSED);
+
+}
+
+static unsigned int apparmor_ipv4_postroute(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ return apparmor_ip_postroute(priv, skb, state);
+}
+
+static unsigned int apparmor_ipv6_postroute(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ return apparmor_ip_postroute(priv, skb, state);
+}
+
+static const struct nf_hook_ops apparmor_nf_ops[] = {
+ {
+ .hook = apparmor_ipv4_postroute,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_SELINUX_FIRST,
+ },
+#if IS_ENABLED(CONFIG_IPV6)
+ {
+ .hook = apparmor_ipv6_postroute,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_SELINUX_FIRST,
+ },
+#endif
+};
+
+static int __net_init apparmor_nf_register(struct net *net)
+{
+ int ret;
+
+ ret = nf_register_net_hooks(net, apparmor_nf_ops,
+ ARRAY_SIZE(apparmor_nf_ops));
+ return ret;
+}
+
+static void __net_exit apparmor_nf_unregister(struct net *net)
+{
+ nf_unregister_net_hooks(net, apparmor_nf_ops,
+ ARRAY_SIZE(apparmor_nf_ops));
+}
+
+static struct pernet_operations apparmor_net_ops = {
+ .init = apparmor_nf_register,
+ .exit = apparmor_nf_unregister,
+};
+
+static int __init apparmor_nf_ip_init(void)
+{
+ int err;
+
+ if (!apparmor_enabled)
+ return 0;
+
+ err = register_pernet_subsys(&apparmor_net_ops);
+ if (err)
+ panic("Apparmor: register_pernet_subsys: error %d\n", err);
+
+ return 0;
+}
+__initcall(apparmor_nf_ip_init);
+#endif
+
static int __init apparmor_init(void)
{
int error;
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
index bb24cfa0a164..c07fde444792 100644
--- a/security/apparmor/net.c
+++ b/security/apparmor/net.c
@@ -18,6 +18,7 @@
#include "include/label.h"
#include "include/net.h"
#include "include/policy.h"
+#include "include/secid.h"
#include "net_names.h"
@@ -146,17 +147,20 @@ int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
struct sock *sk)
{
- struct aa_profile *profile;
- DEFINE_AUDIT_SK(sa, op, sk);
+ int error = 0;
AA_BUG(!label);
AA_BUG(!sk);
- if (unconfined(label))
- return 0;
+ if (!unconfined(label)) {
+ struct aa_profile *profile;
+ DEFINE_AUDIT_SK(sa, op, sk);
- return fn_for_each_confined(label, profile,
- aa_profile_af_sk_perm(profile, &sa, request, sk));
+ error = fn_for_each_confined(label, profile,
+ aa_profile_af_sk_perm(profile, &sa, request, sk));
+ }
+
+ return error;
}
int aa_sk_perm(const char *op, u32 request, struct sock *sk)
@@ -185,3 +189,70 @@ int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
return aa_label_sk_perm(label, op, request, sock->sk);
}
+
+#ifdef CONFIG_NETWORK_SECMARK
+static int apparmor_secmark_init(struct aa_secmark *secmark)
+{
+ struct aa_label *label;
+
+ if (secmark->label[0] == '*') {
+ secmark->secid = AA_SECID_WILDCARD;
+ return 0;
+ }
+
+ label = aa_label_strn_parse(&root_ns->unconfined->label,
+ secmark->label, strlen(secmark->label),
+ GFP_ATOMIC, false, false);
+
+ if (IS_ERR(label))
+ return PTR_ERR(label);
+
+ secmark->secid = label->secid;
+
+ return 0;
+}
+
+static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
+ struct common_audit_data *sa, struct sock *sk)
+{
+ int i, ret;
+ struct aa_perms perms = { };
+
+ if (profile->secmark_count == 0)
+ return 0;
+
+ for (i = 0; i < profile->secmark_count; i++) {
+ if (!profile->secmark[i].secid) {
+ ret = apparmor_secmark_init(&profile->secmark[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (profile->secmark[i].secid == secid ||
+ profile->secmark[i].secid == AA_SECID_WILDCARD) {
+ if (profile->secmark[i].deny)
+ perms.deny = ALL_PERMS_MASK;
+ else
+ perms.allow = ALL_PERMS_MASK;
+
+ if (profile->secmark[i].audit)
+ perms.audit = ALL_PERMS_MASK;
+ }
+ }
+
+ aa_apply_modes_to_perms(profile, &perms);
+
+ return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
+}
+
+int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+ u32 secid, struct sock *sk)
+{
+ struct aa_profile *profile;
+ DEFINE_AUDIT_SK(sa, op, sk);
+
+ return fn_for_each_confined(label, profile,
+ aa_secmark_perm(profile, request, secid,
+ &sa, sk));
+}
+#endif
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 1590e2de4e84..df9c5890a878 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -231,6 +231,9 @@ void aa_free_profile(struct aa_profile *profile)
for (i = 0; i < profile->xattr_count; i++)
kzfree(profile->xattrs[i]);
kzfree(profile->xattrs);
+ for (i = 0; i < profile->secmark_count; i++)
+ kzfree(profile->secmark[i].label);
+ kzfree(profile->secmark);
kzfree(profile->dirname);
aa_put_dfa(profile->xmatch);
aa_put_dfa(profile->policy.dfa);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 21cb384d712a..379682e2a8d5 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -292,6 +292,19 @@ fail:
return 0;
}
+static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
+{
+ if (unpack_nameX(e, AA_U8, name)) {
+ if (!inbounds(e, sizeof(u8)))
+ return 0;
+ if (data)
+ *data = get_unaligned((u8 *)e->pos);
+ e->pos += sizeof(u8);
+ return 1;
+ }
+ return 0;
+}
+
static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
{
if (unpack_nameX(e, AA_U32, name)) {
@@ -529,6 +542,49 @@ fail:
return 0;
}
+static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
+{
+ void *pos = e->pos;
+ int i, size;
+
+ if (unpack_nameX(e, AA_STRUCT, "secmark")) {
+ size = unpack_array(e, NULL);
+
+ profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
+ GFP_KERNEL);
+ if (!profile->secmark)
+ goto fail;
+
+ profile->secmark_count = size;
+
+ for (i = 0; i < size; i++) {
+ if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
+ goto fail;
+ if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
+ goto fail;
+ if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
+ goto fail;
+ }
+ if (!unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ return 1;
+
+fail:
+ if (profile->secmark) {
+ for (i = 0; i < size; i++)
+ kfree(profile->secmark[i].label);
+ kfree(profile->secmark);
+ profile->secmark_count = 0;
+ }
+
+ e->pos = pos;
+ return 0;
+}
+
static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
{
void *pos = e->pos;
@@ -727,6 +783,11 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
goto fail;
}
+ if (!unpack_secmark(e, profile)) {
+ info = "failed to unpack profile secmark rules";
+ goto fail;
+ }
+
if (unpack_nameX(e, AA_STRUCT, "policydb")) {
/* generic policy dfa - optional and may be NULL */
info = "failed to unpack policydb";
diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c
index 4ccec1bcf6f5..05373d9a3d6a 100644
--- a/security/apparmor/secid.c
+++ b/security/apparmor/secid.c
@@ -32,8 +32,7 @@
* secids - do not pin labels with a refcount. They rely on the label
* properly updating/freeing them
*/
-
-#define AA_FIRST_SECID 1
+#define AA_FIRST_SECID 2
static DEFINE_IDR(aa_secids);
static DEFINE_SPINLOCK(secid_lock);
diff --git a/security/keys/Makefile b/security/keys/Makefile
index ef1581b337a3..9cef54064f60 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
obj-$(CONFIG_KEY_DH_OPERATIONS) += dh.o
+obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += keyctl_pkey.o
#
# Key types
diff --git a/security/keys/compat.c b/security/keys/compat.c
index e87c89c0177c..9482df601dc3 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -141,6 +141,24 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
return keyctl_restrict_keyring(arg2, compat_ptr(arg3),
compat_ptr(arg4));
+ case KEYCTL_PKEY_QUERY:
+ if (arg3 != 0)
+ return -EINVAL;
+ return keyctl_pkey_query(arg2,
+ compat_ptr(arg4),
+ compat_ptr(arg5));
+
+ case KEYCTL_PKEY_ENCRYPT:
+ case KEYCTL_PKEY_DECRYPT:
+ case KEYCTL_PKEY_SIGN:
+ return keyctl_pkey_e_d_s(option,
+ compat_ptr(arg2), compat_ptr(arg3),
+ compat_ptr(arg4), compat_ptr(arg5));
+
+ case KEYCTL_PKEY_VERIFY:
+ return keyctl_pkey_verify(compat_ptr(arg2), compat_ptr(arg3),
+ compat_ptr(arg4), compat_ptr(arg5));
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 9f8208dc0e55..74cb0ff42fed 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -298,6 +298,45 @@ static inline long compat_keyctl_dh_compute(
#endif
#endif
+#ifdef CONFIG_ASYMMETRIC_KEY_TYPE
+extern long keyctl_pkey_query(key_serial_t,
+ const char __user *,
+ struct keyctl_pkey_query __user *);
+
+extern long keyctl_pkey_verify(const struct keyctl_pkey_params __user *,
+ const char __user *,
+ const void __user *, const void __user *);
+
+extern long keyctl_pkey_e_d_s(int,
+ const struct keyctl_pkey_params __user *,
+ const char __user *,
+ const void __user *, void __user *);
+#else
+static inline long keyctl_pkey_query(key_serial_t id,
+ const char __user *_info,
+ struct keyctl_pkey_query __user *_res)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline long keyctl_pkey_verify(const struct keyctl_pkey_params __user *params,
+ const char __user *_info,
+ const void __user *_in,
+ const void __user *_in2)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline long keyctl_pkey_e_d_s(int op,
+ const struct keyctl_pkey_params __user *params,
+ const char __user *_info,
+ const void __user *_in,
+ void __user *_out)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/*
* Debugging key validation
*/
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 1ffe60bb2845..18619690ce77 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1747,6 +1747,30 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
(const char __user *) arg3,
(const char __user *) arg4);
+ case KEYCTL_PKEY_QUERY:
+ if (arg3 != 0)
+ return -EINVAL;
+ return keyctl_pkey_query((key_serial_t)arg2,
+ (const char __user *)arg4,
+ (struct keyctl_pkey_query *)arg5);
+
+ case KEYCTL_PKEY_ENCRYPT:
+ case KEYCTL_PKEY_DECRYPT:
+ case KEYCTL_PKEY_SIGN:
+ return keyctl_pkey_e_d_s(
+ option,
+ (const struct keyctl_pkey_params __user *)arg2,
+ (const char __user *)arg3,
+ (const void __user *)arg4,
+ (void __user *)arg5);
+
+ case KEYCTL_PKEY_VERIFY:
+ return keyctl_pkey_verify(
+ (const struct keyctl_pkey_params __user *)arg2,
+ (const char __user *)arg3,
+ (const void __user *)arg4,
+ (const void __user *)arg5);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
new file mode 100644
index 000000000000..783978842f13
--- /dev/null
+++ b/security/keys/keyctl_pkey.c
@@ -0,0 +1,323 @@
+/* Public-key operation keyctls
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/parser.h>
+#include <linux/uaccess.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+static void keyctl_pkey_params_free(struct kernel_pkey_params *params)
+{
+ kfree(params->info);
+ key_put(params->key);
+}
+
+enum {
+ Opt_err = -1,
+ Opt_enc, /* "enc=<encoding>" eg. "enc=oaep" */
+ Opt_hash, /* "hash=<digest-name>" eg. "hash=sha1" */
+};
+
+static const match_table_t param_keys = {
+ { Opt_enc, "enc=%s" },
+ { Opt_hash, "hash=%s" },
+ { Opt_err, NULL }
+};
+
+/*
+ * Parse the information string which consists of key=val pairs.
+ */
+static int keyctl_pkey_params_parse(struct kernel_pkey_params *params)
+{
+ unsigned long token_mask = 0;
+ substring_t args[MAX_OPT_ARGS];
+ char *c = params->info, *p, *q;
+ int token;
+
+ while ((p = strsep(&c, " \t"))) {
+ if (*p == '\0' || *p == ' ' || *p == '\t')
+ continue;
+ token = match_token(p, param_keys, args);
+ if (__test_and_set_bit(token, &token_mask))
+ return -EINVAL;
+ q = args[0].from;
+ if (!q[0])
+ return -EINVAL;
+
+ switch (token) {
+ case Opt_enc:
+ params->encoding = q;
+ break;
+
+ case Opt_hash:
+ params->hash_algo = q;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Interpret parameters. Callers must always call the free function
+ * on params, even if an error is returned.
+ */
+static int keyctl_pkey_params_get(key_serial_t id,
+ const char __user *_info,
+ struct kernel_pkey_params *params)
+{
+ key_ref_t key_ref;
+ void *p;
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+ params->encoding = "raw";
+
+ p = strndup_user(_info, PAGE_SIZE);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+ params->info = p;
+
+ ret = keyctl_pkey_params_parse(params);
+ if (ret < 0)
+ return ret;
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+ params->key = key_ref_to_ptr(key_ref);
+
+ if (!params->key->type->asym_query)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/*
+ * Get parameters from userspace. Callers must always call the free function
+ * on params, even if an error is returned.
+ */
+static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_params,
+ const char __user *_info,
+ int op,
+ struct kernel_pkey_params *params)
+{
+ struct keyctl_pkey_params uparams;
+ struct kernel_pkey_query info;
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+ params->encoding = "raw";
+
+ if (copy_from_user(&uparams, _params, sizeof(uparams)) != 0)
+ return -EFAULT;
+
+ ret = keyctl_pkey_params_get(uparams.key_id, _info, params);
+ if (ret < 0)
+ return ret;
+
+ ret = params->key->type->asym_query(params, &info);
+ if (ret < 0)
+ return ret;
+
+ switch (op) {
+ case KEYCTL_PKEY_ENCRYPT:
+ case KEYCTL_PKEY_DECRYPT:
+ if (uparams.in_len > info.max_enc_size ||
+ uparams.out_len > info.max_dec_size)
+ return -EINVAL;
+ break;
+ case KEYCTL_PKEY_SIGN:
+ case KEYCTL_PKEY_VERIFY:
+ if (uparams.in_len > info.max_sig_size ||
+ uparams.out_len > info.max_data_size)
+ return -EINVAL;
+ break;
+ default:
+ BUG();
+ }
+
+ params->in_len = uparams.in_len;
+ params->out_len = uparams.out_len;
+ return 0;
+}
+
+/*
+ * Query information about an asymmetric key.
+ */
+long keyctl_pkey_query(key_serial_t id,
+ const char __user *_info,
+ struct keyctl_pkey_query __user *_res)
+{
+ struct kernel_pkey_params params;
+ struct kernel_pkey_query res;
+ long ret;
+
+ memset(&params, 0, sizeof(params));
+
+ ret = keyctl_pkey_params_get(id, _info, &params);
+ if (ret < 0)
+ goto error;
+
+ ret = params.key->type->asym_query(&params, &res);
+ if (ret < 0)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_to_user(_res, &res, sizeof(res)) == 0 &&
+ clear_user(_res->__spare, sizeof(_res->__spare)) == 0)
+ ret = 0;
+
+error:
+ keyctl_pkey_params_free(&params);
+ return ret;
+}
+
+/*
+ * Encrypt/decrypt/sign
+ *
+ * Encrypt data, decrypt data or sign data using a public key.
+ *
+ * _info is a string of supplementary information in key=val format. For
+ * instance, it might contain:
+ *
+ * "enc=pkcs1 hash=sha256"
+ *
+ * where enc= specifies the encoding and hash= selects the OID to go in that
+ * particular encoding if required. If enc= isn't supplied, it's assumed that
+ * the caller is supplying raw values.
+ *
+ * If successful, the amount of data written into the output buffer is
+ * returned.
+ */
+long keyctl_pkey_e_d_s(int op,
+ const struct keyctl_pkey_params __user *_params,
+ const char __user *_info,
+ const void __user *_in,
+ void __user *_out)
+{
+ struct kernel_pkey_params params;
+ void *in, *out;
+ long ret;
+
+ ret = keyctl_pkey_params_get_2(_params, _info, op, &params);
+ if (ret < 0)
+ goto error_params;
+
+ ret = -EOPNOTSUPP;
+ if (!params.key->type->asym_eds_op)
+ goto error_params;
+
+ switch (op) {
+ case KEYCTL_PKEY_ENCRYPT:
+ params.op = kernel_pkey_encrypt;
+ break;
+ case KEYCTL_PKEY_DECRYPT:
+ params.op = kernel_pkey_decrypt;
+ break;
+ case KEYCTL_PKEY_SIGN:
+ params.op = kernel_pkey_sign;
+ break;
+ default:
+ BUG();
+ }
+
+ in = memdup_user(_in, params.in_len);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto error_params;
+ }
+
+ ret = -ENOMEM;
+ out = kmalloc(params.out_len, GFP_KERNEL);
+ if (!out)
+ goto error_in;
+
+ ret = params.key->type->asym_eds_op(&params, in, out);
+ if (ret < 0)
+ goto error_out;
+
+ if (copy_to_user(_out, out, ret) != 0)
+ ret = -EFAULT;
+
+error_out:
+ kfree(out);
+error_in:
+ kfree(in);
+error_params:
+ keyctl_pkey_params_free(&params);
+ return ret;
+}
+
+/*
+ * Verify a signature.
+ *
+ * Verify a public key signature using the given key, or if not given, search
+ * for a matching key.
+ *
+ * _info is a string of supplementary information in key=val format. For
+ * instance, it might contain:
+ *
+ * "enc=pkcs1 hash=sha256"
+ *
+ * where enc= specifies the signature blob encoding and hash= selects the OID
+ * to go in that particular encoding. If enc= isn't supplied, it's assumed
+ * that the caller is supplying raw values.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_pkey_verify(const struct keyctl_pkey_params __user *_params,
+ const char __user *_info,
+ const void __user *_in,
+ const void __user *_in2)
+{
+ struct kernel_pkey_params params;
+ void *in, *in2;
+ long ret;
+
+ ret = keyctl_pkey_params_get_2(_params, _info, KEYCTL_PKEY_VERIFY,
+ &params);
+ if (ret < 0)
+ goto error_params;
+
+ ret = -EOPNOTSUPP;
+ if (!params.key->type->asym_verify_signature)
+ goto error_params;
+
+ in = memdup_user(_in, params.in_len);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto error_params;
+ }
+
+ in2 = memdup_user(_in2, params.in2_len);
+ if (IS_ERR(in2)) {
+ ret = PTR_ERR(in2);
+ goto error_in;
+ }
+
+ params.op = kernel_pkey_verify;
+ ret = params.key->type->asym_verify_signature(&params, in, in2);
+
+ kfree(in2);
+error_in:
+ kfree(in);
+error_params:
+ keyctl_pkey_params_free(&params);
+ return ret;
+}
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index b69d3b1777c2..ff6789365a12 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -30,7 +30,7 @@
#include <linux/tpm.h>
#include <linux/tpm_command.h>
-#include "trusted.h"
+#include <keys/trusted.h>
static const char hmac_alg[] = "hmac(sha1)";
static const char hash_alg[] = "sha1";
@@ -121,7 +121,7 @@ out:
/*
* calculate authorization info fields to send to TPM
*/
-static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+int TSS_authhmac(unsigned char *digest, const unsigned char *key,
unsigned int keylen, unsigned char *h1,
unsigned char *h2, unsigned char h3, ...)
{
@@ -168,11 +168,12 @@ out:
kzfree(sdesc);
return ret;
}
+EXPORT_SYMBOL_GPL(TSS_authhmac);
/*
* verify the AUTH1_COMMAND (Seal) result from TPM
*/
-static int TSS_checkhmac1(unsigned char *buffer,
+int TSS_checkhmac1(unsigned char *buffer,
const uint32_t command,
const unsigned char *ononce,
const unsigned char *key,
@@ -249,6 +250,7 @@ out:
kzfree(sdesc);
return ret;
}
+EXPORT_SYMBOL_GPL(TSS_checkhmac1);
/*
* verify the AUTH2_COMMAND (unseal) result from TPM
@@ -355,7 +357,7 @@ out:
* For key specific tpm requests, we will generate and send our
* own TPM command packets using the drivers send function.
*/
-static int trusted_tpm_send(unsigned char *cmd, size_t buflen)
+int trusted_tpm_send(unsigned char *cmd, size_t buflen)
{
int rc;
@@ -367,6 +369,7 @@ static int trusted_tpm_send(unsigned char *cmd, size_t buflen)
rc = -EPERM;
return rc;
}
+EXPORT_SYMBOL_GPL(trusted_tpm_send);
/*
* Lock a trusted key, by extending a selected PCR.
@@ -425,7 +428,7 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
/*
* Create an object independent authorisation protocol (oiap) session
*/
-static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
+int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
{
int ret;
@@ -442,6 +445,7 @@ static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
TPM_NONCE_SIZE);
return 0;
}
+EXPORT_SYMBOL_GPL(oiap);
struct tpm_digests {
unsigned char encauth[SHA1_DIGEST_SIZE];
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index fcd965f1d69e..9be76c808fcc 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -146,53 +146,22 @@ static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
struct snd_interval *s = hw_param_interval(params, rule->var);
const struct snd_interval *r =
hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval t = {
- .min = s->min, .max = s->max, .integer = 1,
- };
+ struct snd_interval t = {0};
+ unsigned int step = 0;
int i;
for (i = 0; i < CIP_SFC_COUNT; ++i) {
- unsigned int rate = amdtp_rate_table[i];
- unsigned int step = amdtp_syt_intervals[i];
-
- if (!snd_interval_test(r, rate))
- continue;
-
- t.min = roundup(t.min, step);
- t.max = rounddown(t.max, step);
+ if (snd_interval_test(r, amdtp_rate_table[i]))
+ step = max(step, amdtp_syt_intervals[i]);
}
- if (snd_interval_checkempty(&t))
- return -EINVAL;
+ t.min = roundup(s->min, step);
+ t.max = rounddown(s->max, step);
+ t.integer = 1;
return snd_interval_refine(s, &t);
}
-static int apply_constraint_to_rate(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- struct snd_interval *r =
- hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
- const struct snd_interval *s = hw_param_interval_c(params, rule->deps[0]);
- struct snd_interval t = {
- .min = UINT_MAX, .max = 0, .integer = 1,
- };
- int i;
-
- for (i = 0; i < CIP_SFC_COUNT; ++i) {
- unsigned int step = amdtp_syt_intervals[i];
- unsigned int rate = amdtp_rate_table[i];
-
- if (s->min % step || s->max % step)
- continue;
-
- t.min = min(t.min, rate);
- t.max = max(t.max, rate);
- }
-
- return snd_interval_refine(r, &t);
-}
-
/**
* amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
* @s: the AMDTP stream, which must be initialized.
@@ -250,24 +219,16 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
*/
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
apply_constraint_to_size, NULL,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
goto end;
- err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- apply_constraint_to_rate, NULL,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
- if (err < 0)
- goto end;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
apply_constraint_to_size, NULL,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
goto end;
- err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- apply_constraint_to_rate, NULL,
- SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
- if (err < 0)
- goto end;
end:
return err;
}
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 0f6dbcffe711..ed50b222d36e 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -240,8 +240,8 @@ static void dice_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&dice->dwork);
if (dice->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(dice->card);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(dice->card);
}
mutex_destroy(&dice->mutex);
diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h
index 04402c14cb23..9847b669cf3c 100644
--- a/sound/pci/ca0106/ca0106.h
+++ b/sound/pci/ca0106/ca0106.h
@@ -582,7 +582,7 @@
#define SPI_PL_BIT_R_R (2<<7) /* right channel = right */
#define SPI_PL_BIT_R_C (3<<7) /* right channel = (L+R)/2 */
#define SPI_IZD_REG 2
-#define SPI_IZD_BIT (1<<4) /* infinite zero detect */
+#define SPI_IZD_BIT (0<<4) /* infinite zero detect */
#define SPI_FMT_REG 3
#define SPI_FMT_BIT_RJ (0<<0) /* right justified mode */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index b607be7236d3..d6e62e90e8d4 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -2084,19 +2084,19 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
prog->expected_attach_type = type;
}
-#define BPF_PROG_SEC_IMPL(string, ptype, eatype, atype) \
- { string, sizeof(string) - 1, ptype, eatype, atype }
+#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
+ { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
/* Programs that can NOT be attached. */
-#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, -EINVAL)
+#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
/* Programs that can be attached. */
#define BPF_APROG_SEC(string, ptype, atype) \
- BPF_PROG_SEC_IMPL(string, ptype, 0, atype)
+ BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
/* Programs that must specify expected attach type at load time. */
#define BPF_EAPROG_SEC(string, ptype, eatype) \
- BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype)
+ BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
/* Programs that can be attached but attach type can't be identified by section
* name. Kept for backward compatibility.
@@ -2108,6 +2108,7 @@ static const struct {
size_t len;
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
+ int is_attachable;
enum bpf_attach_type attach_type;
} section_names[] = {
BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
@@ -2198,7 +2199,7 @@ int libbpf_attach_type_by_name(const char *name,
for (i = 0; i < ARRAY_SIZE(section_names); i++) {
if (strncmp(name, section_names[i].sec, section_names[i].len))
continue;
- if (section_names[i].attach_type == -EINVAL)
+ if (!section_names[i].is_attachable)
return -EINVAL;
*attach_type = section_names[i].attach_type;
return 0;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index f119eb628dbb..e86f8be89157 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1819,6 +1819,12 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
tp->offset = strtoul(fmt2_str, NULL, 10);
}
+ if (tev->uprobes) {
+ fmt2_str = strchr(p, '(');
+ if (fmt2_str)
+ tp->ref_ctr_offset = strtoul(fmt2_str + 1, NULL, 0);
+ }
+
tev->nargs = argc - 2;
tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
if (tev->args == NULL) {
@@ -2012,6 +2018,22 @@ static int synthesize_probe_trace_arg(struct probe_trace_arg *arg,
return err;
}
+static int
+synthesize_uprobe_trace_def(struct probe_trace_event *tev, struct strbuf *buf)
+{
+ struct probe_trace_point *tp = &tev->point;
+ int err;
+
+ err = strbuf_addf(buf, "%s:0x%lx", tp->module, tp->address);
+
+ if (err >= 0 && tp->ref_ctr_offset) {
+ if (!uprobe_ref_ctr_is_supported())
+ return -1;
+ err = strbuf_addf(buf, "(0x%lx)", tp->ref_ctr_offset);
+ }
+ return err >= 0 ? 0 : -1;
+}
+
char *synthesize_probe_trace_command(struct probe_trace_event *tev)
{
struct probe_trace_point *tp = &tev->point;
@@ -2041,15 +2063,17 @@ char *synthesize_probe_trace_command(struct probe_trace_event *tev)
}
/* Use the tp->address for uprobes */
- if (tev->uprobes)
- err = strbuf_addf(&buf, "%s:0x%lx", tp->module, tp->address);
- else if (!strncmp(tp->symbol, "0x", 2))
+ if (tev->uprobes) {
+ err = synthesize_uprobe_trace_def(tev, &buf);
+ } else if (!strncmp(tp->symbol, "0x", 2)) {
/* Absolute address. See try_to_find_absolute_address() */
err = strbuf_addf(&buf, "%s%s0x%lx", tp->module ?: "",
tp->module ? ":" : "", tp->address);
- else
+ } else {
err = strbuf_addf(&buf, "%s%s%s+%lu", tp->module ?: "",
tp->module ? ":" : "", tp->symbol, tp->offset);
+ }
+
if (err)
goto error;
@@ -2633,6 +2657,13 @@ static void warn_uprobe_event_compat(struct probe_trace_event *tev)
{
int i;
char *buf = synthesize_probe_trace_command(tev);
+ struct probe_trace_point *tp = &tev->point;
+
+ if (tp->ref_ctr_offset && !uprobe_ref_ctr_is_supported()) {
+ pr_warning("A semaphore is associated with %s:%s and "
+ "seems your kernel doesn't support it.\n",
+ tev->group, tev->event);
+ }
/* Old uprobe event doesn't support memory dereference */
if (!tev->uprobes || tev->nargs == 0 || !buf)
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 45b14f020558..15a98c3a2a2f 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -27,6 +27,7 @@ struct probe_trace_point {
char *symbol; /* Base symbol */
char *module; /* Module name */
unsigned long offset; /* Offset from symbol */
+ unsigned long ref_ctr_offset; /* SDT reference counter offset */
unsigned long address; /* Actual address of the trace point */
bool retprobe; /* Return probe flag */
};
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index b76088fadf3d..aac7817d9e14 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -696,8 +696,16 @@ out_err:
#ifdef HAVE_GELF_GETNOTE_SUPPORT
static unsigned long long sdt_note__get_addr(struct sdt_note *note)
{
- return note->bit32 ? (unsigned long long)note->addr.a32[0]
- : (unsigned long long)note->addr.a64[0];
+ return note->bit32 ?
+ (unsigned long long)note->addr.a32[SDT_NOTE_IDX_LOC] :
+ (unsigned long long)note->addr.a64[SDT_NOTE_IDX_LOC];
+}
+
+static unsigned long long sdt_note__get_ref_ctr_offset(struct sdt_note *note)
+{
+ return note->bit32 ?
+ (unsigned long long)note->addr.a32[SDT_NOTE_IDX_REFCTR] :
+ (unsigned long long)note->addr.a64[SDT_NOTE_IDX_REFCTR];
}
static const char * const type_to_suffix[] = {
@@ -775,14 +783,21 @@ static char *synthesize_sdt_probe_command(struct sdt_note *note,
{
struct strbuf buf;
char *ret = NULL, **args;
- int i, args_count;
+ int i, args_count, err;
+ unsigned long long ref_ctr_offset;
if (strbuf_init(&buf, 32) < 0)
return NULL;
- if (strbuf_addf(&buf, "p:%s/%s %s:0x%llx",
- sdtgrp, note->name, pathname,
- sdt_note__get_addr(note)) < 0)
+ err = strbuf_addf(&buf, "p:%s/%s %s:0x%llx",
+ sdtgrp, note->name, pathname,
+ sdt_note__get_addr(note));
+
+ ref_ctr_offset = sdt_note__get_ref_ctr_offset(note);
+ if (ref_ctr_offset && err >= 0)
+ err = strbuf_addf(&buf, "(0x%llx)", ref_ctr_offset);
+
+ if (err < 0)
goto error;
if (!note->args)
@@ -998,6 +1013,7 @@ int probe_cache__show_all_caches(struct strfilter *filter)
enum ftrace_readme {
FTRACE_README_PROBE_TYPE_X = 0,
FTRACE_README_KRETPROBE_OFFSET,
+ FTRACE_README_UPROBE_REF_CTR,
FTRACE_README_END,
};
@@ -1009,6 +1025,7 @@ static struct {
[idx] = {.pattern = pat, .avail = false}
DEFINE_TYPE(FTRACE_README_PROBE_TYPE_X, "*type: * x8/16/32/64,*"),
DEFINE_TYPE(FTRACE_README_KRETPROBE_OFFSET, "*place (kretprobe): *"),
+ DEFINE_TYPE(FTRACE_README_UPROBE_REF_CTR, "*ref_ctr_offset*"),
};
static bool scan_ftrace_readme(enum ftrace_readme type)
@@ -1064,3 +1081,8 @@ bool kretprobe_offset_is_supported(void)
{
return scan_ftrace_readme(FTRACE_README_KRETPROBE_OFFSET);
}
+
+bool uprobe_ref_ctr_is_supported(void)
+{
+ return scan_ftrace_readme(FTRACE_README_UPROBE_REF_CTR);
+}
diff --git a/tools/perf/util/probe-file.h b/tools/perf/util/probe-file.h
index 63f29b1d22c1..2a249182f2a6 100644
--- a/tools/perf/util/probe-file.h
+++ b/tools/perf/util/probe-file.h
@@ -69,6 +69,7 @@ struct probe_cache_entry *probe_cache__find_by_name(struct probe_cache *pcache,
int probe_cache__show_all_caches(struct strfilter *filter);
bool probe_type_is_available(enum probe_type type);
bool kretprobe_offset_is_supported(void);
+bool uprobe_ref_ctr_is_supported(void);
#else /* ! HAVE_LIBELF_SUPPORT */
static inline struct probe_cache *probe_cache__new(const char *tgt __maybe_unused, struct nsinfo *nsi __maybe_unused)
{
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 29770ea61768..0281d5e2cd67 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1947,6 +1947,34 @@ void kcore_extract__delete(struct kcore_extract *kce)
}
#ifdef HAVE_GELF_GETNOTE_SUPPORT
+
+static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off)
+{
+ if (!base_off)
+ return;
+
+ if (tmp->bit32)
+ tmp->addr.a32[SDT_NOTE_IDX_LOC] =
+ tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off -
+ tmp->addr.a32[SDT_NOTE_IDX_BASE];
+ else
+ tmp->addr.a64[SDT_NOTE_IDX_LOC] =
+ tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off -
+ tmp->addr.a64[SDT_NOTE_IDX_BASE];
+}
+
+static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr,
+ GElf_Addr base_off)
+{
+ if (!base_off)
+ return;
+
+ if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR])
+ tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
+ else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR])
+ tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
+}
+
/**
* populate_sdt_note : Parse raw data and identify SDT note
* @elf: elf of the opened file
@@ -1964,7 +1992,6 @@ static int populate_sdt_note(Elf **elf, const char *data, size_t len,
const char *provider, *name, *args;
struct sdt_note *tmp = NULL;
GElf_Ehdr ehdr;
- GElf_Addr base_off = 0;
GElf_Shdr shdr;
int ret = -EINVAL;
@@ -2060,17 +2087,12 @@ static int populate_sdt_note(Elf **elf, const char *data, size_t len,
* base address in the description of the SDT note. If its different,
* then accordingly, adjust the note location.
*/
- if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL)) {
- base_off = shdr.sh_offset;
- if (base_off) {
- if (tmp->bit32)
- tmp->addr.a32[0] = tmp->addr.a32[0] + base_off -
- tmp->addr.a32[1];
- else
- tmp->addr.a64[0] = tmp->addr.a64[0] + base_off -
- tmp->addr.a64[1];
- }
- }
+ if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL))
+ sdt_adjust_loc(tmp, shdr.sh_offset);
+
+ /* Adjust reference counter offset */
+ if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL))
+ sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset);
list_add_tail(&tmp->note_list, sdt_notes);
return 0;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index f25fae4b5743..20f49779116b 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -379,12 +379,19 @@ int get_sdt_note_list(struct list_head *head, const char *target);
int cleanup_sdt_note_list(struct list_head *sdt_notes);
int sdt_notes__get_count(struct list_head *start);
+#define SDT_PROBES_SCN ".probes"
#define SDT_BASE_SCN ".stapsdt.base"
#define SDT_NOTE_SCN ".note.stapsdt"
#define SDT_NOTE_TYPE 3
#define SDT_NOTE_NAME "stapsdt"
#define NR_ADDR 3
+enum {
+ SDT_NOTE_IDX_LOC = 0,
+ SDT_NOTE_IDX_BASE,
+ SDT_NOTE_IDX_REFCTR,
+};
+
struct mem_info *mem_info__new(void);
struct mem_info *mem_info__get(struct mem_info *mi);
void mem_info__put(struct mem_info *mi);
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index d3273b5b3173..ae8180b11d5f 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -11,6 +11,8 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include "bpf_rlimit.h"
+
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
const char *cfg_map_name = "jmp_table";
bool cfg_attach = true;
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh
index 42544a969abc..a9bc6f82abc1 100755
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh
@@ -10,7 +10,7 @@ wait_for_ip()
echo -n "Wait for testing link-local IP to become available "
for _i in $(seq ${MAX_PING_TRIES}); do
echo -n "."
- if ping -6 -q -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then
+ if $PING6 -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then
echo " OK"
return
fi
@@ -58,5 +58,6 @@ BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.o"
BPF_PROG_SECTION="cgroup_id_logger"
BPF_PROG_ID=0
PROG="${DIR}/test_skb_cgroup_id_user"
+type ping6 >/dev/null 2>&1 && PING6="ping6" || PING6="ping -6"
main
diff --git a/tools/testing/selftests/bpf/test_sock_addr.sh b/tools/testing/selftests/bpf/test_sock_addr.sh
index 9832a875a828..3b9fdb8094aa 100755
--- a/tools/testing/selftests/bpf/test_sock_addr.sh
+++ b/tools/testing/selftests/bpf/test_sock_addr.sh
@@ -4,7 +4,8 @@ set -eu
ping_once()
{
- ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1
+ type ping${1} >/dev/null 2>&1 && PING="ping${1}" || PING="ping -${1}"
+ $PING -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1
}
wait_for_ip()
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 36f3d3009d1a..6f61df62f690 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -76,7 +76,7 @@ struct bpf_test {
int fixup_percpu_cgroup_storage[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
- uint32_t retval;
+ uint32_t retval, retval_unpriv;
enum {
UNDEF,
ACCEPT,
@@ -3084,6 +3084,8 @@ static struct bpf_test tests[] = {
.fixup_prog1 = { 2 },
.result = ACCEPT,
.retval = 42,
+ /* Verifier rewrite for unpriv skips tail call here. */
+ .retval_unpriv = 2,
},
{
"stack pointer arithmetic",
@@ -6455,6 +6457,256 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
+ "map access: known scalar += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "map access: value_ptr += known scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "map access: unknown scalar += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "map access: value_ptr += unknown scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "map access: value_ptr += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 pointer += pointer prohibited",
+ },
+ {
+ "map access: known scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+ },
+ {
+ "map access: value_ptr -= known scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+ },
+ {
+ "map access: value_ptr -= known scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "map access: unknown scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+ },
+ {
+ "map access: value_ptr -= unknown scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is negative",
+ },
+ {
+ "map access: value_ptr -= unknown scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "map access: value_ptr -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 pointer -= pointer prohibited",
+ },
+ {
"map lookup helper access to map",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -13899,6 +14151,33 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
}
}
+static int set_admin(bool admin)
+{
+ cap_t caps;
+ const cap_value_t cap_val = CAP_SYS_ADMIN;
+ int ret = -1;
+
+ caps = cap_get_proc();
+ if (!caps) {
+ perror("cap_get_proc");
+ return -1;
+ }
+ if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
+ admin ? CAP_SET : CAP_CLEAR)) {
+ perror("cap_set_flag");
+ goto out;
+ }
+ if (cap_set_proc(caps)) {
+ perror("cap_set_proc");
+ goto out;
+ }
+ ret = 0;
+out:
+ if (cap_free(caps))
+ perror("cap_free");
+ return ret;
+}
+
static void do_test_single(struct bpf_test *test, bool unpriv,
int *passes, int *errors)
{
@@ -13907,6 +14186,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
struct bpf_insn *prog = test->insns;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
+ uint32_t expected_val;
uint32_t retval;
int i, err;
@@ -13926,6 +14206,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
test->result_unpriv : test->result;
expected_err = unpriv && test->errstr_unpriv ?
test->errstr_unpriv : test->errstr;
+ expected_val = unpriv && test->retval_unpriv ?
+ test->retval_unpriv : test->retval;
reject_from_alignment = fd_prog < 0 &&
(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
@@ -13959,16 +14241,20 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
__u8 tmp[TEST_DATA_LEN << 2];
__u32 size_tmp = sizeof(tmp);
+ if (unpriv)
+ set_admin(true);
err = bpf_prog_test_run(fd_prog, 1, test->data,
sizeof(test->data), tmp, &size_tmp,
&retval, NULL);
+ if (unpriv)
+ set_admin(false);
if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
printf("Unexpected bpf_prog_test_run error\n");
goto fail_log;
}
- if (!err && retval != test->retval &&
- test->retval != POINTER_VALUE) {
- printf("FAIL retval %d != %d\n", retval, test->retval);
+ if (!err && retval != expected_val &&
+ expected_val != POINTER_VALUE) {
+ printf("FAIL retval %d != %d\n", retval, expected_val);
goto fail_log;
}
}
@@ -14011,33 +14297,6 @@ static bool is_admin(void)
return (sysadmin == CAP_SET);
}
-static int set_admin(bool admin)
-{
- cap_t caps;
- const cap_value_t cap_val = CAP_SYS_ADMIN;
- int ret = -1;
-
- caps = cap_get_proc();
- if (!caps) {
- perror("cap_get_proc");
- return -1;
- }
- if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
- admin ? CAP_SET : CAP_CLEAR)) {
- perror("cap_set_flag");
- goto out;
- }
- if (cap_set_proc(caps)) {
- perror("cap_set_proc");
- goto out;
- }
- ret = 0;
-out:
- if (cap_free(caps))
- perror("cap_free");
- return ret;
-}
-
static void get_unpriv_disabled()
{
char buf[2];
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
index 0150bb2741eb..117f6f35d72f 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
@@ -25,24 +25,24 @@
# Thus we set MTU to 10K on all involved interfaces. Then both unicast and
# multicast traffic uses 8K frames.
#
-# +-----------------------+ +----------------------------------+
-# | H1 | | H2 |
-# | | | unicast --> + $h2.111 |
-# | | | traffic | 192.0.2.129/28 |
-# | multicast | | | e-qos-map 0:1 |
-# | traffic | | | |
-# | $h1 + <----- | | + $h2 |
-# +-----|-----------------+ +--------------|-------------------+
-# | |
-# +-----|-------------------------------------------------|-------------------+
-# | + $swp1 + $swp2 |
-# | | >1Gbps | >1Gbps |
-# | +---|----------------+ +----------|----------------+ |
-# | | + $swp1.1 | | + $swp2.111 | |
+# +---------------------------+ +----------------------------------+
+# | H1 | | H2 |
+# | | | unicast --> + $h2.111 |
+# | multicast | | traffic | 192.0.2.129/28 |
+# | traffic | | | e-qos-map 0:1 |
+# | $h1 + <----- | | | |
+# | 192.0.2.65/28 | | | + $h2 |
+# +---------------|-----------+ +--------------|-------------------+
+# | |
+# +---------------|---------------------------------------|-------------------+
+# | $swp1 + + $swp2 |
+# | >1Gbps | | >1Gbps |
+# | +-------------|------+ +----------|----------------+ |
+# | | $swp1.1 + | | + $swp2.111 | |
# | | BR1 | SW | BR111 | |
-# | | + $swp3.1 | | + $swp3.111 | |
-# | +---|----------------+ +----------|----------------+ |
-# | \_________________________________________________/ |
+# | | $swp3.1 + | | + $swp3.111 | |
+# | +-------------|------+ +----------|----------------+ |
+# | \_______________________________________/ |
# | | |
# | + $swp3 |
# | | 1Gbps bottleneck |
@@ -51,6 +51,7 @@
# |
# +--|-----------------+
# | + $h3 H3 |
+# | | 192.0.2.66/28 |
# | | |
# | + $h3.111 |
# | 192.0.2.130/28 |
@@ -59,6 +60,7 @@
ALL_TESTS="
ping_ipv4
test_mc_aware
+ test_uc_aware
"
lib_dir=$(dirname $0)/../../../net/forwarding
@@ -68,14 +70,14 @@ source $lib_dir/lib.sh
h1_create()
{
- simple_if_init $h1
+ simple_if_init $h1 192.0.2.65/28
mtu_set $h1 10000
}
h1_destroy()
{
mtu_restore $h1
- simple_if_fini $h1
+ simple_if_fini $h1 192.0.2.65/28
}
h2_create()
@@ -97,7 +99,7 @@ h2_destroy()
h3_create()
{
- simple_if_init $h3
+ simple_if_init $h3 192.0.2.66/28
mtu_set $h3 10000
vlan_create $h3 111 v$h3 192.0.2.130/28
@@ -108,7 +110,7 @@ h3_destroy()
vlan_destroy $h3 111
mtu_restore $h3
- simple_if_fini $h3
+ simple_if_fini $h3 192.0.2.66/28
}
switch_create()
@@ -251,7 +253,7 @@ measure_uc_rate()
# average ingress rate to somewhat mitigate this.
local min_ingress=2147483648
- mausezahn $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \
+ $MZ $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \
-a own -b $h3mac -t udp -q &
sleep 1
@@ -291,7 +293,7 @@ test_mc_aware()
check_err $? "Could not get high enough UC-only ingress rate"
local ucth1=${uc_rate[1]}
- mausezahn $h1 -p 8000 -c 0 -a own -b bc -t udp -q &
+ $MZ $h1 -p 8000 -c 0 -a own -b bc -t udp -q &
local d0=$(date +%s)
local t0=$(ethtool_stats_get $h3 rx_octets_prio_0)
@@ -311,7 +313,7 @@ test_mc_aware()
ret = 100 * ($ucth1 - $ucth2) / $ucth1
if (ret > 0) { ret } else { 0 }
")
- check_err $(bc <<< "$deg > 10")
+ check_err $(bc <<< "$deg > 25")
local interval=$((d1 - d0))
local mc_ir=$(rate $u0 $u1 $interval)
@@ -335,6 +337,51 @@ test_mc_aware()
echo " egress UC throughput $(humanize ${uc_rate_2[1]})"
echo " ingress MC throughput $(humanize $mc_ir)"
echo " egress MC throughput $(humanize $mc_er)"
+ echo
+}
+
+test_uc_aware()
+{
+ RET=0
+
+ $MZ $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \
+ -a own -b $h3mac -t udp -q &
+
+ local d0=$(date +%s)
+ local t0=$(ethtool_stats_get $h3 rx_octets_prio_1)
+ local u0=$(ethtool_stats_get $swp2 rx_octets_prio_1)
+ sleep 1
+
+ local attempts=50
+ local passes=0
+ local i
+
+ for ((i = 0; i < attempts; ++i)); do
+ if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then
+ ((passes++))
+ fi
+
+ sleep 0.1
+ done
+
+ local d1=$(date +%s)
+ local t1=$(ethtool_stats_get $h3 rx_octets_prio_1)
+ local u1=$(ethtool_stats_get $swp2 rx_octets_prio_1)
+
+ local interval=$((d1 - d0))
+ local uc_ir=$(rate $u0 $u1 $interval)
+ local uc_er=$(rate $t0 $t1 $interval)
+
+ ((attempts == passes))
+ check_err $?
+
+ # Suppress noise from killing mausezahn.
+ { kill %% && wait; } 2>/dev/null
+
+ log_test "MC performace under UC overload"
+ echo " ingress UC throughput $(humanize ${uc_ir})"
+ echo " egress UC throughput $(humanize ${uc_er})"
+ echo " sent $attempts BC ARPs, got $passes responses"
}
trap cleanup EXIT
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
index 8d647fb572dd..41128219231a 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
@@ -25,18 +25,18 @@ fi
reset_trigger
-echo "Test create synthetic event with an error"
-echo 'wakeup_latency u64 lat pid_t pid char' > synthetic_events > /dev/null
+echo "Test remove synthetic event"
+echo '!wakeup_latency u64 lat pid_t pid char comm[16]' >> synthetic_events
if [ -d events/synthetic/wakeup_latency ]; then
- fail "Created wakeup_latency synthetic event with an invalid format"
+ fail "Failed to delete wakeup_latency synthetic event"
fi
reset_trigger
-echo "Test remove synthetic event"
-echo '!wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events
+echo "Test create synthetic event with an error"
+echo 'wakeup_latency u64 lat pid_t pid char' > synthetic_events > /dev/null
if [ -d events/synthetic/wakeup_latency ]; then
- fail "Failed to delete wakeup_latency synthetic event"
+ fail "Created wakeup_latency synthetic event with an invalid format"
fi
exit 0
diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile
index ede4d3dae750..689f6c8ebcd8 100644
--- a/tools/testing/selftests/powerpc/cache_shape/Makefile
+++ b/tools/testing/selftests/powerpc/cache_shape/Makefile
@@ -1,12 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_PROGS := cache_shape
-
-all: $(TEST_PROGS)
-
-$(TEST_PROGS): ../harness.c ../utils.c
+TEST_GEN_PROGS := cache_shape
top_srcdir = ../../../../..
include ../../lib.mk
-clean:
- rm -f $(TEST_PROGS) *.o
+$(TEST_GEN_PROGS): ../harness.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index bd5dfa509272..23f4caf48ffc 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -5,6 +5,9 @@ noarg:
# The EBB handler is 64-bit code and everything links against it
CFLAGS += -m64
+# Toolchains may build PIE by default which breaks the assembly
+LDFLAGS += -no-pie
+
TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \
cycles_with_freeze_test pmc56_overflow_test \
ebb_vs_cpu_event_test cpu_event_vs_ebb_test \
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
index 9b35ca8e8f13..8d3f006c98cc 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
+TEST_GEN_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
perf-hwbreak ptrace-syscall
@@ -7,14 +7,9 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
top_srcdir = ../../../../..
include ../../lib.mk
-all: $(TEST_PROGS)
-
CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm -fno-pie
-ptrace-pkey core-pkey: child.h
-ptrace-pkey core-pkey: LDLIBS += -pthread
-
-$(TEST_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
+$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: child.h
+$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: LDLIBS += -pthread
-clean:
- rm -f $(TEST_PROGS) *.o
+$(TEST_GEN_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
index 327fa943c7f3..dbdffa2e2c82 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
@@ -67,8 +67,8 @@ trans:
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4),
- [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a),
- [flt_2] "r" (&b), [flt_4] "r" (&d)
+ [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
+ [flt_4] "b" (&d)
: "memory", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile
index 44690f1bb26a..85861c46b445 100644
--- a/tools/testing/selftests/powerpc/security/Makefile
+++ b/tools/testing/selftests/powerpc/security/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0+
TEST_GEN_PROGS := rfi_flush
+top_srcdir = ../../../../..
CFLAGS += -I../../../../../usr/include
diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
index 564ed45bbf73..0a7d0afb26b8 100644
--- a/tools/testing/selftests/powerpc/security/rfi_flush.c
+++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
@@ -49,6 +49,7 @@ int rfi_flush_test(void)
struct perf_event_read v;
__u64 l1d_misses_total = 0;
unsigned long iterations = 100000, zero_size = 24 * 1024;
+ unsigned long l1d_misses_expected;
int rfi_flush_org, rfi_flush;
SKIP_IF(geteuid() != 0);
@@ -71,6 +72,12 @@ int rfi_flush_test(void)
iter = repetitions;
+ /*
+ * We expect to see l1d miss for each cacheline access when rfi_flush
+ * is set. Allow a small variation on this.
+ */
+ l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
+
again:
FAIL_IF(perf_event_reset(fd));
@@ -78,10 +85,9 @@ again:
FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
- /* Expect at least zero_size/CACHELINE_SIZE misses per iteration */
- if (v.l1d_misses >= (iterations * zero_size / CACHELINE_SIZE) && rfi_flush)
+ if (rfi_flush && v.l1d_misses >= l1d_misses_expected)
passes++;
- else if (v.l1d_misses < iterations && !rfi_flush)
+ else if (!rfi_flush && v.l1d_misses < (l1d_misses_expected / 2))
passes++;
l1d_misses_total += v.l1d_misses;
@@ -92,13 +98,15 @@ again:
if (passes < repetitions) {
printf("FAIL (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d failures]\n",
rfi_flush, l1d_misses_total, rfi_flush ? '<' : '>',
- rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations,
+ rfi_flush ? repetitions * l1d_misses_expected :
+ repetitions * l1d_misses_expected / 2,
repetitions - passes, repetitions);
rc = 1;
} else
printf("PASS (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d pass]\n",
rfi_flush, l1d_misses_total, rfi_flush ? '>' : '<',
- rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations,
+ rfi_flush ? repetitions * l1d_misses_expected :
+ repetitions * l1d_misses_expected / 2,
passes, repetitions);
if (rfi_flush == rfi_flush_org) {
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
index 1fca25c6ace0..209a958dca12 100644
--- a/tools/testing/selftests/powerpc/signal/Makefile
+++ b/tools/testing/selftests/powerpc/signal/Makefile
@@ -1,15 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_PROGS := signal signal_tm
-
-all: $(TEST_PROGS)
-
-$(TEST_PROGS): ../harness.c ../utils.c signal.S
+TEST_GEN_PROGS := signal signal_tm
CFLAGS += -maltivec
-signal_tm: CFLAGS += -mhtm
+$(OUTPUT)/signal_tm: CFLAGS += -mhtm
top_srcdir = ../../../../..
include ../../lib.mk
-clean:
- rm -f $(TEST_PROGS) *.o
+$(TEST_GEN_PROGS): ../harness.c ../utils.c signal.S
diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile
index fcd2dcb8972b..bdc081afedb0 100644
--- a/tools/testing/selftests/powerpc/switch_endian/Makefile
+++ b/tools/testing/selftests/powerpc/switch_endian/Makefile
@@ -8,6 +8,7 @@ EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
top_srcdir = ../../../../..
include ../../lib.mk
+$(OUTPUT)/switch_endian_test: ASFLAGS += -I $(OUTPUT)
$(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
$(OUTPUT)/check-reversed.o: $(OUTPUT)/check.o
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
index 43c342845be0..ed62f4153d3e 100644
--- a/tools/testing/selftests/powerpc/utils.c
+++ b/tools/testing/selftests/powerpc/utils.c
@@ -25,7 +25,6 @@
#include "utils.h"
static char auxv[4096];
-extern unsigned int dscr_insn[];
int read_auxv(char *buf, ssize_t buf_size)
{
@@ -247,7 +246,8 @@ static void sigill_handler(int signr, siginfo_t *info, void *unused)
ucontext_t *ctx = (ucontext_t *)unused;
unsigned long *pc = &UCONTEXT_NIA(ctx);
- if (*pc == (unsigned long)&dscr_insn) {
+ /* mtspr 3,RS to check for move to DSCR below */
+ if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) {
if (!warned++)
printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n");
*pc += 4;
@@ -271,5 +271,5 @@ void set_dscr(unsigned long val)
init = 1;
}
- asm volatile("dscr_insn: mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
+ asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
}