aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/Changes2
-rw-r--r--Documentation/devicetree/bindings/arm/gic-v3.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bma180.txt8
-rw-r--r--Documentation/devicetree/bindings/input/cypress,cyapa.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt20
-rw-r--r--Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt3
-rw-r--r--Documentation/devicetree/bindings/regulator/pbias-regulator.txt7
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mt65xx.txt16
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal.txt27
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/input/multi-touch-protocol.txt2
-rw-r--r--Documentation/networking/vrf.txt96
-rw-r--r--Documentation/power/pci.txt51
-rw-r--r--Documentation/ptp/testptp.c1
-rw-r--r--Documentation/sysctl/net.txt16
-rw-r--r--Documentation/thermal/power_allocator.txt2
-rw-r--r--MAINTAINERS42
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/pci.c7
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arm/boot/dts/am335x-phycore-som.dtsi4
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts46
-rw-r--r--arch/arm/boot/dts/dm8148-evm.dts4
-rw-r--r--arch/arm/boot/dts/dm8148-t410.dts6
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi8
-rw-r--r--arch/arm/boot/dts/dra7.dtsi5
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-common.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3.dtsi25
-rw-r--r--arch/arm/boot/dts/omap4.dtsi3
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts4
-rw-r--r--arch/arm/boot/dts/omap5.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi1
-rw-r--r--arch/arm/boot/dts/stih407.dtsi82
-rw-r--r--arch/arm/boot/dts/stih410.dtsi82
-rw-r--r--arch/arm/configs/omap2plus_defconfig5
-rw-r--r--arch/arm/include/asm/kvm_host.h1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/board-generic.c7
-rw-r--r--arch/arm/mach-omap2/id.c8
-rw-r--r--arch/arm/mach-omap2/io.c1
-rw-r--r--arch/arm/mach-omap2/omap_device.c3
-rw-r--r--arch/arm/mach-omap2/pm.h3
-rw-r--r--arch/arm/mach-omap2/soc.h2
-rw-r--r--arch/arm/mach-omap2/timer.c8
-rw-r--r--arch/arm/mach-omap2/vc.c2
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/addr-map.h7
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c21
-rw-r--r--arch/arm/mm/alignment.c30
-rw-r--r--arch/arm/plat-pxa/ssp.c1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h4
-rw-r--r--arch/arm64/kernel/efi.c3
-rw-r--r--arch/arm64/kernel/entry-ftrace.S22
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c2
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/pci/pci.c5
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m68k/configs/amiga_defconfig9
-rw-r--r--arch/m68k/configs/apollo_defconfig9
-rw-r--r--arch/m68k/configs/atari_defconfig9
-rw-r--r--arch/m68k/configs/bvme6000_defconfig9
-rw-r--r--arch/m68k/configs/hp300_defconfig9
-rw-r--r--arch/m68k/configs/mac_defconfig9
-rw-r--r--arch/m68k/configs/multi_defconfig9
-rw-r--r--arch/m68k/configs/mvme147_defconfig9
-rw-r--r--arch/m68k/configs/mvme16x_defconfig9
-rw-r--r--arch/m68k/configs/q40_defconfig9
-rw-r--r--arch/m68k/configs/sun3_defconfig9
-rw-r--r--arch/m68k/configs/sun3x_defconfig9
-rw-r--r--arch/m68k/include/asm/linkage.h30
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h19
-rw-r--r--arch/m68k/kernel/syscalltable.S20
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/pci/pci-common.c9
-rw-r--r--arch/mips/ath79/irq.c22
-rw-r--r--arch/mips/cavium-octeon/setup.c2
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/cpu.h1
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/include/asm/maar.h9
-rw-r--r--arch/mips/include/asm/mips-cm.h39
-rw-r--r--arch/mips/include/asm/mipsregs.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c1
-rw-r--r--arch/mips/jz4740/gpio.c1
-rw-r--r--arch/mips/kernel/cps-vec.S12
-rw-r--r--arch/mips/kernel/cpu-probe.c21
-rw-r--r--arch/mips/kernel/octeon_switch.S26
-rw-r--r--arch/mips/kernel/r2300_switch.S28
-rw-r--r--arch/mips/kernel/scall32-o32.S41
-rw-r--r--arch/mips/kernel/scall64-64.S40
-rw-r--r--arch/mips/kernel/scall64-n32.S21
-rw-r--r--arch/mips/kernel/scall64-o32.S21
-rw-r--r--arch/mips/kernel/setup.c10
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/loongson64/common/env.c3
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/mm/init.c177
-rw-r--r--arch/mips/net/bpf_jit_asm.S63
-rw-r--r--arch/mips/pci/pci.c6
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/unit-asb2305/pci.c1
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/pci-common.c8
-rw-r--r--arch/powerpc/kvm/book3s.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/tile/gxio/mpipe.c33
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/kernel/usb.c1
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/x86/entry/entry_64.S16
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/efi.h12
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/pvclock-abi.h1
-rw-r--r--arch/x86/include/uapi/asm/bitsperlong.h2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c17
-rw-r--r--arch/x86/kernel/cpu/perf_event_msr.c4
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/crash.c7
-rw-r--r--arch/x86/kernel/paravirt.c16
-rw-r--r--arch/x86/kernel/process.c55
-rw-r--r--arch/x86/kernel/process_32.c28
-rw-r--r--arch/x86/kernel/process_64.c24
-rw-r--r--arch/x86/kvm/mmu.c25
-rw-r--r--arch/x86/kvm/svm.c129
-rw-r--r--arch/x86/kvm/vmx.c11
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/pci/common.c1
-rw-r--r--arch/x86/platform/efi/efi.c67
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/kernel/pci.c4
-rw-r--r--block/blk-mq-cpumap.c9
-rw-r--r--block/blk-mq-sysfs.c34
-rw-r--r--block/blk-mq-tag.c27
-rw-r--r--block/blk-mq-tag.h2
-rw-r--r--block/blk-mq.c118
-rw-r--r--block/blk-mq.h3
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c4
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/pci_link.c16
-rw-r--r--drivers/atm/he.c7
-rw-r--r--drivers/atm/solos-pci.c12
-rw-r--r--drivers/base/cacheinfo.c10
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/nvme-core.c52
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c38
-rw-r--r--drivers/block/xen-blkfront.c19
-rw-r--r--drivers/char/hw_random/xgene-rng.c7
-rw-r--r--drivers/clocksource/rockchip_timer.c2
-rw-r--r--drivers/clocksource/timer-keystone.c2
-rw-r--r--drivers/crypto/marvell/cesa.h27
-rw-r--r--drivers/crypto/marvell/cipher.c7
-rw-r--r--drivers/crypto/marvell/hash.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c3
-rw-r--r--drivers/dma/at_xdmac.c15
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/dw/core.c4
-rw-r--r--drivers/dma/idma64.c16
-rw-r--r--drivers/dma/pxa_dma.c31
-rw-r--r--drivers/dma/sun4i-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c46
-rw-r--r--drivers/dma/zx296702_dma.c2
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firmware/Kconfig8
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c88
-rw-r--r--drivers/firmware/efi/libstub/efistub.h4
-rw-r--r--drivers/firmware/qcom_scm-64.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h17
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c155
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c85
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c26
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c31
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c36
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c11
-rw-r--r--drivers/hv/channel_mgmt.c17
-rw-r--r--drivers/hwmon/abx500.c1
-rw-r--r--drivers/hwmon/gpio-fan.c1
-rw-r--r--drivers/hwmon/pwm-fan.c1
-rw-r--r--drivers/idle/intel_idle.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c67
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c21
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c293
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h21
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/joystick/walkera0701.c4
-rw-r--r--drivers/input/keyboard/omap4-keypad.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c26
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c4
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c4
-rw-r--r--drivers/input/mouse/synaptics.c12
-rw-r--r--drivers/input/serio/libps2.c22
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c34
-rw-r--r--drivers/input/touchscreen/mms114.c4
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/iova.c120
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c24
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c3
-rw-r--r--drivers/irqchip/irq-mips-gic.c12
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/mei/debugfs.c3
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/host/pxamci.c66
-rw-r--r--drivers/mmc/host/sunxi-mmc.c53
-rw-r--r--drivers/mtd/ubi/io.c5
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/mtd/ubi/wl.c1
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c24
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c15
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c22
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c1
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c18
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c111
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c17
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c74
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c47
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/fjes/fjes_hw.c8
-rw-r--r--drivers/net/geneve.c32
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c1
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux.c19
-rw-r--r--drivers/net/phy/mdio_bus.c31
-rw-r--r--drivers/net/phy/phy_device.c62
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/usb/Kconfig11
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/ch9200.c432
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/of/of_mdio.c27
-rw-r--r--drivers/of/of_pci_irq.c22
-rw-r--r--drivers/parisc/dino.c3
-rw-r--r--drivers/parisc/lba_pci.c1
-rw-r--r--drivers/pci/access.c27
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c1
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/regulator/anatop-regulator.c1
-rw-r--r--drivers/regulator/core.c21
-rw-r--r--drivers/regulator/gpio-regulator.c1
-rw-r--r--drivers/regulator/pbias-regulator.c56
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/regulator/vexpress.c1
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-bcm2835.c6
-rw-r--r--drivers/spi/spi-meson-spifc.c1
-rw-r--r--drivers/spi/spi-mt65xx.c53
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c4
-rw-r--r--drivers/spi/spi.c3
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/staging/android/TODO20
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c2
-rw-r--r--drivers/staging/fbtft/fb_watterott.c4
-rw-r--r--drivers/staging/fbtft/fbtft-core.c10
-rw-r--r--drivers/staging/fbtft/flexfb.c11
-rw-r--r--drivers/staging/lustre/README.txt16
-rw-r--r--drivers/staging/most/Kconfig1
-rw-r--r--drivers/staging/most/hdm-dim2/Kconfig1
-rw-r--r--drivers/staging/most/hdm-usb/Kconfig2
-rw-r--r--drivers/staging/most/mostcore/Kconfig1
-rw-r--r--drivers/staging/unisys/visorbus/Makefile1
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c13
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c18
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c5
-rw-r--r--drivers/target/target_core_device.c45
-rw-r--r--drivers/target/target_core_hba.c2
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c91
-rw-r--r--drivers/target/target_core_tpg.c5
-rw-r--r--drivers/thermal/Kconfig17
-rw-r--r--drivers/thermal/cpu_cooling.c52
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c1
-rw-r--r--drivers/thermal/power_allocator.c253
-rw-r--r--drivers/thermal/thermal_core.c28
-rw-r--r--drivers/thermal/ti-soc-thermal/Kconfig8
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c25
-rw-r--r--drivers/usb/chipidea/udc.c84
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c4
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/epautoconf.c1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c43
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c11
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c46
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c3
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c3
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/host/xhci-mem.c17
-rw-r--r--drivers/usb/host/xhci-pci.c90
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/musb/musb_core.c7
-rw-r--r--drivers/usb/musb/musb_cppi41.c3
-rw-r--r--drivers/usb/musb/musb_dsps.c7
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/phy/phy-generic.c3
-rw-r--r--drivers/usb/phy/phy-isp1301.c1
-rw-r--r--drivers/usb/serial/option.c24
-rw-r--r--drivers/usb/serial/whiteheat.c31
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/bcm2835_wdt.c10
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/mena21_wdt.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c1
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c7
-rw-r--r--fs/btrfs/extent_io.c65
-rw-r--r--fs/btrfs/inode.c45
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c32
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/cifs/cifsencrypt.c53
-rw-r--r--fs/cifs/smb2ops.c8
-rw-r--r--fs/cifs/smb2pdu.c84
-rw-r--r--fs/dax.c13
-rw-r--r--fs/nfs/delegation.c8
-rw-r--r--fs/nfs/delegation.h2
-rw-r--r--fs/nfs/direct.c7
-rw-r--r--fs/nfs/filelayout/filelayout.c31
-rw-r--r--fs/nfs/nfs42proc.c4
-rw-r--r--fs/nfs/nfs4proc.c127
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c35
-rw-r--r--fs/nfs/pnfs.h7
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c9
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c8
-rw-r--r--fs/ubifs/xattr.c3
-rw-r--r--fs/userfaultfd.c8
-rw-r--r--include/asm-generic/word-at-a-time.h80
-rw-r--r--include/drm/drm_crtc_helper.h1
-rw-r--r--include/drm/drm_dp_helper.h4
-rw-r--r--include/drm/drm_dp_mst_helper.h1
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/backing-dev.h11
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/iova.h4
-rw-r--r--include/linux/memcontrol.h1
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mm.h21
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/phy.h6
-rw-r--r--include/linux/rcupdate.h11
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/xprtsock.h3
-rw-r--r--include/linux/thermal.h8
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/net/af_unix.h6
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/inet_timewait_sock.h14
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip6_tunnel.h17
-rw-r--r--include/net/ip_fib.h30
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/route.h2
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/uapi/asm-generic/unistd.h8
-rw-r--r--include/uapi/linux/lwtunnel.h4
-rw-r--r--include/uapi/linux/userfaultfd.h2
-rw-r--r--ipc/msg.c14
-rw-r--r--ipc/shm.c13
-rw-r--r--ipc/util.c8
-rw-r--r--kernel/events/core.c114
-rw-r--r--kernel/irq/proc.c19
-rw-r--r--kernel/rcu/tree.c5
-rw-r--r--kernel/sched/core.c14
-rw-r--r--kernel/sched/wait.c7
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--lib/iommu-common.c6
-rw-r--r--lib/rhashtable.c5
-rw-r--r--lib/string.c88
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/memcontrol.c31
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/slab.c13
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/bluetooth/smp.c12
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/fib_rules.c14
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/net-sysfs.c12
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/skbuff.c9
-rw-r--r--net/core/sock.c12
-rw-r--r--net/dccp/ackvec.c12
-rw-r--r--net/dccp/ccid.c3
-rw-r--r--net/dccp/minisocks.c4
-rw-r--r--net/dsa/dsa.c41
-rw-r--r--net/dsa/slave.c11
-rw-r--r--net/dsa/tag_trailer.c2
-rw-r--r--net/ipv4/arp.c39
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c16
-rw-r--r--net/ipv4/ip_tunnel_core.c54
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp_cubic.c10
-rw-r--r--net/ipv4/tcp_minisocks.c13
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/ip6_fib.c26
-rw-r--r--net/ipv6/ip6_gre.c93
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/ip6_tunnel.c147
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/l2tp/l2tp_core.c11
-rw-r--r--net/mac80211/cfg.c13
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nft_compat.c24
-rw-r--r--net/netlink/af_netlink.c63
-rw-r--r--net/netlink/af_netlink.h10
-rw-r--r--net/openvswitch/Kconfig3
-rw-r--r--net/openvswitch/conntrack.c8
-rw-r--r--net/openvswitch/datapath.c4
-rw-r--r--net/openvswitch/flow_netlink.c82
-rw-r--r--net/openvswitch/flow_table.c23
-rw-r--r--net/openvswitch/flow_table.h2
-rw-r--r--net/packet/af_packet.c32
-rw-r--r--net/sched/cls_fw.c30
-rw-r--r--net/sctp/associola.c20
-rw-r--r--net/sctp/protocol.c64
-rw-r--r--net/sctp/sm_sideeffect.c44
-rw-r--r--net/sunrpc/sched.c14
-rw-r--r--net/sunrpc/xprt.c6
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c19
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c5
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c10
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
-rw-r--r--net/sunrpc/xprtsock.c15
-rw-r--r--net/tipc/msg.c1
-rw-r--r--net/unix/af_unix.c15
-rw-r--r--samples/kprobes/jprobe_example.c14
-rw-r--r--samples/kprobes/kprobe_example.c6
-rw-r--r--samples/kprobes/kretprobe_example.c4
-rw-r--r--scripts/extract-cert.c4
-rwxr-xr-xscripts/sign-file.c94
-rw-r--r--security/keys/gc.c8
-rw-r--r--sound/arm/Kconfig15
-rw-r--r--sound/pci/hda/hda_tegra.c30
-rw-r--r--sound/pci/hda/patch_realtek.c31
-rw-r--r--sound/soc/au1x/psc-i2s.c1
-rw-r--r--sound/soc/codecs/rt5645.c22
-rw-r--r--sound/soc/codecs/wm0010.c23
-rw-r--r--sound/soc/codecs/wm8960.c26
-rw-r--r--sound/soc/codecs/wm8962.c3
-rw-r--r--sound/soc/davinci/davinci-mcasp.c14
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c3
-rw-r--r--sound/soc/fsl/fsl_ssi.c5
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c20
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c17
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c4
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/soc/soc-utils.c9
-rw-r--r--sound/soc/spear/Kconfig2
-rw-r--r--sound/soc/sti/uniperif_player.c14
-rw-r--r--sound/soc/sti/uniperif_reader.c6
-rw-r--r--tools/build/Makefile.feature8
-rw-r--r--tools/build/feature/Makefile10
-rw-r--r--tools/build/feature/test-all.c10
-rw-r--r--tools/build/feature/test-get_cpuid.c7
-rw-r--r--tools/build/feature/test-numa_num_possible_cpus.c6
-rw-r--r--tools/lib/traceevent/event-parse.c23
-rw-r--r--tools/perf/Documentation/intel-pt.txt15
-rw-r--r--tools/perf/config/Makefile20
-rw-r--r--tools/perf/util/probe-event.c13
-rw-r--r--tools/perf/util/session.c5
-rw-r--r--tools/perf/util/stat.c16
-rw-r--r--tools/perf/util/symbol-elf.c37
-rw-r--r--tools/perf/util/util.c2
-rw-r--r--tools/power/x86/turbostat/turbostat.c39
-rw-r--r--tools/testing/selftests/membarrier/Makefile7
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test.c5
-rw-r--r--tools/testing/selftests/vm/Makefile9
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c54
-rw-r--r--virt/kvm/kvm_main.c4
689 files changed, 7239 insertions, 3952 deletions
diff --git a/Documentation/Changes b/Documentation/Changes
index 6d8863004858..f447f0516f07 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -43,7 +43,7 @@ o udev 081 # udevd --version
o grub 0.93 # grub --version || grub-install --version
o mcelog 0.6 # mcelog --version
o iptables 1.4.2 # iptables -V
-o openssl & libcrypto 1.0.1k # openssl version
+o openssl & libcrypto 1.0.0 # openssl version
Kernel compilation
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt
index ddfade40ac59..7803e77d85cb 100644
--- a/Documentation/devicetree/bindings/arm/gic-v3.txt
+++ b/Documentation/devicetree/bindings/arm/gic-v3.txt
@@ -57,6 +57,8 @@ used to route Message Signalled Interrupts (MSI) to the CPUs.
These nodes must have the following properties:
- compatible : Should at least contain "arm,gic-v3-its".
- msi-controller : Boolean property. Identifies the node as an MSI controller
+- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
+ which will generate the MSI.
- reg: Specifies the base physical address and size of the ITS
registers.
@@ -83,6 +85,7 @@ Examples:
gic-its@2c200000 {
compatible = "arm,gic-v3-its";
msi-controller;
+ #msi-cells = <1>;
reg = <0x0 0x2c200000 0 0x200000>;
};
};
@@ -107,12 +110,14 @@ Examples:
gic-its@2c200000 {
compatible = "arm,gic-v3-its";
msi-controller;
+ #msi-cells = <1>;
reg = <0x0 0x2c200000 0 0x200000>;
};
gic-its@2c400000 {
compatible = "arm,gic-v3-its";
msi-controller;
+ #msi-cells = <1>;
reg = <0x0 0x2c400000 0 0x200000>;
};
};
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index a8274eabae2e..b8e41c148a3c 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -497,7 +497,7 @@ cpus {
};
idle-states {
- entry-method = "arm,psci";
+ entry-method = "psci";
CPU_RETENTION_0_0: cpu-retention-0-0 {
compatible = "arm,idle-state";
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index 5788d5cf1252..82d40e2505f6 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -16,7 +16,9 @@ properties, each containing a 'gpio-list':
GPIO properties should be named "[<name>-]gpios", with <name> being the purpose
of this GPIO for the device. While a non-existent <name> is considered valid
for compatibility reasons (resolving to the "gpios" property), it is not allowed
-for new bindings.
+for new bindings. Also, GPIO properties named "[<name>-]gpio" are valid and old
+bindings use it, but are only supported for compatibility reasons and should not
+be used for newer bindings since it has been deprecated.
GPIO properties can contain one or more GPIO phandles, but only in exceptional
cases should they contain more than one. If your device uses several GPIOs with
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt
index c5933573e0f6..4a3679d54457 100644
--- a/Documentation/devicetree/bindings/iio/accel/bma180.txt
+++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt
@@ -1,10 +1,11 @@
-* Bosch BMA180 triaxial acceleration sensor
+* Bosch BMA180 / BMA250 triaxial acceleration sensor
http://omapworld.com/BMA180_111_1002839.pdf
+http://ae-bst.resource.bosch.com/media/products/dokumente/bma250/bst-bma250-ds002-05.pdf
Required properties:
- - compatible : should be "bosch,bma180"
+ - compatible : should be "bosch,bma180" or "bosch,bma250"
- reg : the I2C address of the sensor
Optional properties:
@@ -13,6 +14,9 @@ Optional properties:
- interrupts : interrupt mapping for GPIO IRQ, it should by configured with
flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING
+ For the bma250 the first interrupt listed must be the one
+ connected to the INT1 pin, the second (optional) interrupt
+ listed must be the one connected to the INT2 pin.
Example:
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
index 635a3b036630..8d91ba9ff2fd 100644
--- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt
+++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
@@ -25,7 +25,7 @@ Example:
/* Cypress Gen3 touchpad */
touchpad@67 {
compatible = "cypress,cyapa";
- reg = <0x24>;
+ reg = <0x67>;
interrupt-parent = <&gpio>;
interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */
wakeup-source;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
index 391717a68f3b..ec96b1f01478 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
@@ -4,8 +4,8 @@ The MISC interrupt controller is a secondary controller for lower priority
interrupt.
Required Properties:
-- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc"
- as fallback
+- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or
+ "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc"
- reg: Base address and size of the controllers memory area
- interrupt-parent: phandle of the parent interrupt controller.
- interrupts: Interrupt specifier for the controllers interrupt.
@@ -13,6 +13,9 @@ Required Properties:
- #interrupt-cells : Specifies the number of cells needed to encode interrupt
source, should be 1
+Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x,
+use ar7240 for all other SoCs.
+
Please refer to interrupts.txt in this directory for details of the common
Interrupt Controllers bindings used by client devices.
@@ -28,3 +31,16 @@ Example:
interrupt-controller;
#interrupt-cells = <1>;
};
+
+Another example:
+
+ interrupt-controller@18060010 {
+ compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc";
+ reg = <0x18060010 0x4>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index d8ef5bf50f11..7fab84b33531 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -7,7 +7,8 @@ OHCI and EHCI controllers.
Required properties:
- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
- "renesas,pci-r8a7791" for the R8A7791 SoC.
+ "renesas,pci-r8a7791" for the R8A7791 SoC;
+ "renesas,pci-r8a7794" for the R8A7794 SoC.
- reg: A list of physical regions to access the device: the first is
the operational registers for the OHCI/EHCI controllers and the
second is for the bridge configuration and control registers.
diff --git a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
index 32aa26f1e434..acbcb452a69a 100644
--- a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
@@ -2,7 +2,12 @@ PBIAS internal regulator for SD card dual voltage i/o pads on OMAP SoCs.
Required properties:
- compatible:
- - "ti,pbias-omap" for OMAP2, OMAP3, OMAP4, OMAP5, DRA7.
+ - should be "ti,pbias-dra7" for DRA7
+ - should be "ti,pbias-omap2" for OMAP2
+ - should be "ti,pbias-omap3" for OMAP3
+ - should be "ti,pbias-omap4" for OMAP4
+ - should be "ti,pbias-omap5" for OMAP5
+ - "ti,pbias-omap" is deprecated
- reg: pbias register offset from syscon base and size of pbias register.
- syscon : phandle of the system control module
- regulator-name : should be
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
index dcefc438272f..6160ffbcb3d3 100644
--- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -15,17 +15,18 @@ Required properties:
- interrupts: Should contain spi interrupt
- clocks: phandles to input clocks.
- The first should be <&topckgen CLK_TOP_SPI_SEL>.
- The second should be one of the following.
+ The first should be one of the following. It's PLL.
- <&clk26m>: specify parent clock 26MHZ.
- <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ.
It's the default one.
- <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ.
- <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ.
- <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ.
+ The second should be <&topckgen CLK_TOP_SPI_SEL>. It's clock mux.
+ The third is <&pericfg CLK_PERI_SPI0>. It's clock gate.
-- clock-names: shall be "spi-clk" for the controller clock, and
- "parent-clk" for the parent clock.
+- clock-names: shall be "parent-clk" for the parent clock, "sel-clk" for the
+ muxes clock, and "spi-clk" for the clock gate.
Optional properties:
- mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi
@@ -44,8 +45,11 @@ spi: spi@1100a000 {
#size-cells = <0>;
reg = <0 0x1100a000 0 0x1000>;
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&topckgen CLK_TOP_SPI_SEL>, <&topckgen CLK_TOP_SYSPLL3_D2>;
- clock-names = "spi-clk", "parent-clk";
+ clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&pericfg CLK_PERI_SPI0>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk";
+
mediatek,pad-select = <0>;
status = "disabled";
};
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 8a49362dea6e..41b817f7b670 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -55,19 +55,11 @@ of heat dissipation). For example a fan's cooling states correspond to
the different fan speeds possible. Cooling states are referred to by
single unsigned integers, where larger numbers mean greater heat
dissipation. The precise set of cooling states associated with a device
-(as referred to be the cooling-min-state and cooling-max-state
+(as referred to by the cooling-min-level and cooling-max-level
properties) should be defined in a particular device's binding.
For more examples of cooling devices, refer to the example sections below.
Required properties:
-- cooling-min-state: An integer indicating the smallest
- Type: unsigned cooling state accepted. Typically 0.
- Size: one cell
-
-- cooling-max-state: An integer indicating the largest
- Type: unsigned cooling state accepted.
- Size: one cell
-
- #cooling-cells: Used to provide cooling device specific information
Type: unsigned while referring to it. Must be at least 2, in order
Size: one cell to specify minimum and maximum cooling state used
@@ -77,6 +69,15 @@ Required properties:
See Cooling device maps section below for more details
on how consumers refer to cooling devices.
+Optional properties:
+- cooling-min-level: An integer indicating the smallest
+ Type: unsigned cooling state accepted. Typically 0.
+ Size: one cell
+
+- cooling-max-level: An integer indicating the largest
+ Type: unsigned cooling state accepted.
+ Size: one cell
+
* Trip points
The trip node is a node to describe a point in the temperature domain
@@ -225,8 +226,8 @@ cpus {
396000 950000
198000 850000
>;
- cooling-min-state = <0>;
- cooling-max-state = <3>;
+ cooling-min-level = <0>;
+ cooling-max-level = <3>;
#cooling-cells = <2>; /* min followed by max */
};
...
@@ -240,8 +241,8 @@ cpus {
*/
fan0: fan@0x48 {
...
- cooling-min-state = <0>;
- cooling-max-state = <9>;
+ cooling-min-level = <0>;
+ cooling-max-level = <9>;
#cooling-cells = <2>; /* min followed by max */
};
};
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index d71ef07bca5d..a057b75ba4b5 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -6,6 +6,7 @@ Required properties:
"lsi,zevio-usb"
"qcom,ci-hdrc"
"chipidea,usb2"
+ "xlnx,zynq-usb-2.20a"
- reg: base address and length of the registers
- interrupts: interrupt for the USB controller
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ac5f0c34ae00..82d2ac97af74 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -203,6 +203,7 @@ sitronix Sitronix Technology Corporation
skyworks Skyworks Solutions, Inc.
smsc Standard Microsystems Corporation
snps Synopsys, Inc.
+socionext Socionext Inc.
solidrun SolidRun
solomon Solomon Systech Limited
sony Sony Corporation
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index b85d000faeb4..c51f1146f3bd 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is
ABS_MT_POSITION_X := T_X
ABS_MT_POSITION_Y := T_Y
ABS_MT_TOOL_X := C_X
- ABS_MT_TOOL_X := C_Y
+ ABS_MT_TOOL_Y := C_Y
Unfortunately, there is not enough information to specify both the touching
ellipse and the tool ellipse, so one has to resort to approximations. One
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
new file mode 100644
index 000000000000..031ef4a63485
--- /dev/null
+++ b/Documentation/networking/vrf.txt
@@ -0,0 +1,96 @@
+Virtual Routing and Forwarding (VRF)
+====================================
+The VRF device combined with ip rules provides the ability to create virtual
+routing and forwarding domains (aka VRFs, VRF-lite to be specific) in the
+Linux network stack. One use case is the multi-tenancy problem where each
+tenant has their own unique routing tables and in the very least need
+different default gateways.
+
+Processes can be "VRF aware" by binding a socket to the VRF device. Packets
+through the socket then use the routing table associated with the VRF
+device. An important feature of the VRF device implementation is that it
+impacts only Layer 3 and above so L2 tools (e.g., LLDP) are not affected
+(ie., they do not need to be run in each VRF). The design also allows
+the use of higher priority ip rules (Policy Based Routing, PBR) to take
+precedence over the VRF device rules directing specific traffic as desired.
+
+In addition, VRF devices allow VRFs to be nested within namespaces. For
+example network namespaces provide separation of network interfaces at L1
+(Layer 1 separation), VLANs on the interfaces within a namespace provide
+L2 separation and then VRF devices provide L3 separation.
+
+Design
+------
+A VRF device is created with an associated route table. Network interfaces
+are then enslaved to a VRF device:
+
+ +-----------------------------+
+ | vrf-blue | ===> route table 10
+ +-----------------------------+
+ | | |
+ +------+ +------+ +-------------+
+ | eth1 | | eth2 | ... | bond1 |
+ +------+ +------+ +-------------+
+ | |
+ +------+ +------+
+ | eth8 | | eth9 |
+ +------+ +------+
+
+Packets received on an enslaved device and are switched to the VRF device
+using an rx_handler which gives the impression that packets flow through
+the VRF device. Similarly on egress routing rules are used to send packets
+to the VRF device driver before getting sent out the actual interface. This
+allows tcpdump on a VRF device to capture all packets into and out of the
+VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
+using the VRF device to specify rules that apply to the VRF domain as a whole.
+
+[1] Packets in the forwarded state do not flow through the device, so those
+ packets are not seen by tcpdump. Will revisit this limitation in a
+ future release.
+
+[2] Iptables on ingress is limited to NF_INET_PRE_ROUTING only with skb->dev
+ set to real ingress device and egress is limited to NF_INET_POST_ROUTING.
+ Will revisit this limitation in a future release.
+
+
+Setup
+-----
+1. VRF device is created with an association to a FIB table.
+ e.g, ip link add vrf-blue type vrf table 10
+ ip link set dev vrf-blue up
+
+2. Rules are added that send lookups to the associated FIB table when the
+ iif or oif is the VRF device. e.g.,
+ ip ru add oif vrf-blue table 10
+ ip ru add iif vrf-blue table 10
+
+ Set the default route for the table (and hence default route for the VRF).
+ e.g, ip route add table 10 prohibit default
+
+3. Enslave L3 interfaces to a VRF device.
+ e.g, ip link set dev eth1 master vrf-blue
+
+ Local and connected routes for enslaved devices are automatically moved to
+ the table associated with VRF device. Any additional routes depending on
+ the enslaved device will need to be reinserted following the enslavement.
+
+4. Additional VRF routes are added to associated table.
+ e.g., ip route add table 10 ...
+
+
+Applications
+------------
+Applications that are to work within a VRF need to bind their socket to the
+VRF device:
+
+ setsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, dev, strlen(dev)+1);
+
+or to specify the output device using cmsg and IP_PKTINFO.
+
+
+Limitations
+-----------
+VRF device currently only works for IPv4. Support for IPv6 is under development.
+
+Index of original ingress interface is not available via cmsg. Will address
+soon.
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index 62328d76b55b..b0e911e0e8f5 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned
(alternatively, the runtime_suspend() callback will have to check if the
device should really be suspended and return -EAGAIN if that is not the case).
-The runtime PM of PCI devices is disabled by default. It is also blocked by
-pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI
-driver implements the runtime PM callbacks and intends to use the runtime PM
-framework provided by the PM core and the PCI subsystem, it should enable this
-feature by executing the pm_runtime_enable() helper function. However, the
-driver should not call the pm_runtime_allow() helper function unblocking
-the runtime PM of the device. Instead, it should allow user space or some
-platform-specific code to do that (user space can do it via sysfs), although
-once it has called pm_runtime_enable(), it must be prepared to handle the
+The runtime PM of PCI devices is enabled by default by the PCI core. PCI
+device drivers do not need to enable it and should not attempt to do so.
+However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid()
+helper function. In addition to that, the runtime PM usage counter of
+each PCI device is incremented by local_pci_probe() before executing the
+probe callback provided by the device's driver.
+
+If a PCI driver implements the runtime PM callbacks and intends to use the
+runtime PM framework provided by the PM core and the PCI subsystem, it needs
+to decrement the device's runtime PM usage counter in its probe callback
+function. If it doesn't do that, the counter will always be different from
+zero for the device and it will never be runtime-suspended. The simplest
+way to do that is by calling pm_runtime_put_noidle(), but if the driver
+wants to schedule an autosuspend right away, for example, it may call
+pm_runtime_put_autosuspend() instead for this purpose. Generally, it
+just needs to call a function that decrements the devices usage counter
+from its probe routine to make runtime PM work for the device.
+
+It is important to remember that the driver's runtime_suspend() callback
+may be executed right after the usage counter has been decremented, because
+user space may already have cuased the pm_runtime_allow() helper function
+unblocking the runtime PM of the device to run via sysfs, so the driver must
+be prepared to cope with that.
+
+The driver itself should not call pm_runtime_allow(), though. Instead, it
+should let user space or some platform-specific code do that (user space can
+do it via sysfs as stated above), but it must be prepared to handle the
runtime PM of the device correctly as soon as pm_runtime_allow() is called
-(which may happen at any time). [It also is possible that user space causes
-pm_runtime_allow() to be called via sysfs before the driver is loaded, so in
-fact the driver has to be prepared to handle the runtime PM of the device as
-soon as it calls pm_runtime_enable().]
+(which may happen at any time, even before the driver is loaded).
+
+When the driver's remove callback runs, it has to balance the decrementation
+of the device's runtime PM usage counter at the probe time. For this reason,
+if it has decremented the counter in its probe callback, it must run
+pm_runtime_get_noresume() in its remove callback. [Since the core carries
+out a runtime resume of the device and bumps up the device's usage counter
+before running the driver's remove callback, the runtime PM of the device
+is effectively disabled for the duration of the remove execution and all
+runtime PM helper functions incrementing the device's usage counter are
+then effectively equivalent to pm_runtime_get_noresume().]
The runtime PM framework works by processing requests to suspend or resume
devices, or to check if they are idle (in which cases it is reasonable to
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index 2bc8abc57fa0..6c6247aaa7b9 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 6294b5186ae5..809ab6efcc74 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -54,13 +54,15 @@ default_qdisc
--------------
The default queuing discipline to use for network devices. This allows
-overriding the default queue discipline of pfifo_fast with an
-alternative. Since the default queuing discipline is created with the
-no additional parameters so is best suited to queuing disciplines that
-work well without configuration like stochastic fair queue (sfq),
-CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines
-like Hierarchical Token Bucket or Deficit Round Robin which require setting
-up classes and bandwidths.
+overriding the default of pfifo_fast with an alternative. Since the default
+queuing discipline is created without additional parameters so is best suited
+to queuing disciplines that work well without configuration like stochastic
+fair queue (sfq), CoDel (codel) or fair queue CoDel (fq_codel). Don't use
+queuing disciplines like Hierarchical Token Bucket or Deficit Round Robin
+which require setting up classes and bandwidths. Note that physical multiqueue
+interfaces still use mq as root qdisc, which in turn uses this default for its
+leaves. Virtual devices (like e.g. lo or veth) ignore this setting and instead
+default to noqueue.
Default: pfifo_fast
busy_read
diff --git a/Documentation/thermal/power_allocator.txt b/Documentation/thermal/power_allocator.txt
index c3797b529991..a1ce2235f121 100644
--- a/Documentation/thermal/power_allocator.txt
+++ b/Documentation/thermal/power_allocator.txt
@@ -4,7 +4,7 @@ Power allocator governor tunables
Trip points
-----------
-The governor requires the following two passive trip points:
+The governor works optimally with the following two passive trip points:
1. "switch on" trip point: temperature above which the governor
control loop starts operating. This is the first passive trip
diff --git a/MAINTAINERS b/MAINTAINERS
index 274f85405584..797236befd27 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -615,9 +615,8 @@ F: Documentation/hwmon/fam15h_power
F: drivers/hwmon/fam15h_power.c
AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
-M: Thomas Dahlmann <dahlmann.thomas@arcor.de>
L: linux-geode@lists.infradead.org (moderated for non-subscribers)
-S: Supported
+S: Orphan
F: drivers/usb/gadget/udc/amd5536udc.*
AMD GEODE PROCESSOR/CHIPSET SUPPORT
@@ -808,6 +807,13 @@ S: Maintained
F: drivers/video/fbdev/arcfb.c
F: drivers/video/fbdev/core/fb_defio.c
+ARCNET NETWORK LAYER
+M: Michael Grzeschik <m.grzeschik@pengutronix.de>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/arcnet/
+F: include/uapi/linux/if_arcnet.h
+
ARM MFM AND FLOPPY DRIVERS
M: Ian Molton <spyro@f2s.com>
S: Maintained
@@ -3394,7 +3400,6 @@ F: drivers/staging/dgnc/
DIGI EPCA PCI PRODUCTS
M: Lidza Louina <lidza.louina@gmail.com>
-M: Mark Hounschell <markh@compro.net>
M: Daeseok Youn <daeseok.youn@gmail.com>
L: driverdev-devel@linuxdriverproject.org
S: Maintained
@@ -5952,7 +5957,7 @@ F: virt/kvm/
KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
M: Joerg Roedel <joro@8bytes.org>
L: kvm@vger.kernel.org
-W: http://kvm.qumranet.com
+W: http://www.linux-kvm.org/
S: Maintained
F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
@@ -5960,7 +5965,7 @@ F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
M: Alexander Graf <agraf@suse.com>
L: kvm-ppc@vger.kernel.org
-W: http://kvm.qumranet.com
+W: http://www.linux-kvm.org/
T: git git://github.com/agraf/linux-2.6.git
S: Supported
F: arch/powerpc/include/asm/kvm*
@@ -8500,7 +8505,6 @@ F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Shahed Shaikh <shahed.shaikh@qlogic.com>
M: Dept-GELinuxNICDev@qlogic.com
L: netdev@vger.kernel.org
S: Supported
@@ -9904,8 +9908,8 @@ F: drivers/staging/media/lirc/
STAGING - LUSTRE PARALLEL FILESYSTEM
M: Oleg Drokin <oleg.drokin@intel.com>
M: Andreas Dilger <andreas.dilger@intel.com>
-L: HPDD-discuss@lists.01.org (moderated for non-subscribers)
-W: http://lustre.opensfs.org/
+L: lustre-devel@lists.lustre.org (moderated for non-subscribers)
+W: http://wiki.lustre.org/
S: Maintained
F: drivers/staging/lustre
@@ -10338,6 +10342,16 @@ F: include/uapi/linux/thermal.h
F: include/linux/cpu_cooling.h
F: Documentation/devicetree/bindings/thermal/
+THERMAL/CPU_COOLING
+M: Amit Daniel Kachhap <amit.kachhap@gmail.com>
+M: Viresh Kumar <viresh.kumar@linaro.org>
+M: Javi Merino <javi.merino@arm.com>
+L: linux-pm@vger.kernel.org
+S: Supported
+F: Documentation/thermal/cpu-cooling-api.txt
+F: drivers/thermal/cpu_cooling.c
+F: include/linux/cpu_cooling.h
+
THINGM BLINK(1) USB RGB LED DRIVER
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
S: Maintained
@@ -11187,7 +11201,7 @@ F: drivers/vlynq/vlynq.c
F: include/linux/vlynq.h
VME SUBSYSTEM
-M: Martyn Welch <martyn.welch@ge.com>
+M: Martyn Welch <martyn@welchs.me.uk>
M: Manohar Vanga <manohar.vanga@gmail.com>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: devel@driverdev.osuosl.org
@@ -11239,7 +11253,6 @@ VOLTAGE AND CURRENT REGULATOR FRAMEWORK
M: Liam Girdwood <lgirdwood@gmail.com>
M: Mark Brown <broonie@kernel.org>
L: linux-kernel@vger.kernel.org
-W: http://opensource.wolfsonmicro.com/node/15
W: http://www.slimlogic.co.uk/?p=48
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git
S: Supported
@@ -11253,6 +11266,7 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/vrf.c
F: include/net/vrf.h
+F: Documentation/networking/vrf.txt
VT1211 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
@@ -11368,17 +11382,15 @@ WM97XX TOUCHSCREEN DRIVERS
M: Mark Brown <broonie@kernel.org>
M: Liam Girdwood <lrg@slimlogic.co.uk>
L: linux-input@vger.kernel.org
-T: git git://opensource.wolfsonmicro.com/linux-2.6-touch
-W: http://opensource.wolfsonmicro.com/node/7
+W: https://github.com/CirrusLogic/linux-drivers/wiki
S: Supported
F: drivers/input/touchscreen/*wm97*
F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS
L: patches@opensource.wolfsonmicro.com
-T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
-T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
-W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
+T: git https://github.com/CirrusLogic/linux-drivers.git
+W: https://github.com/CirrusLogic/linux-drivers/wiki
S: Supported
F: Documentation/hwmon/wm83??
F: arch/arm/mach-s3c64xx/mach-crag6410*
diff --git a/Makefile b/Makefile
index 84f4b31e3c6e..fd46821e428d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index cded02c890aa..5f387ee5b5c5 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -242,7 +242,12 @@ pci_restore_srm_config(void)
void pcibios_fixup_bus(struct pci_bus *bus)
{
- struct pci_dev *dev;
+ struct pci_dev *dev = bus->self;
+
+ if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_read_bridge_bases(bus);
+ }
list_for_each_entry(dev, &bus->devices, bus_list) {
pdev_save_srm_config(dev);
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 7611b10a2d23..0b10ef2a4372 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -48,4 +48,5 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 4d28fc3aac69..5dd084f3c81c 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -252,10 +252,10 @@
};
vdd1_reg: regulator@2 {
- /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+ /* VDD_MPU voltage limits 0.95V - 1.325V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <912500>;
- regulator-max-microvolt = <1312500>;
+ regulator-max-microvolt = <1378000>;
regulator-boot-on;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 3a05b94f59ed..568adf5efde0 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -98,13 +98,6 @@
pinctrl-0 = <&extcon_usb1_pins>;
};
- extcon_usb2: extcon_usb2 {
- compatible = "linux,extcon-usb-gpio";
- id-gpio = <&gpio7 24 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&extcon_usb2_pins>;
- };
-
hdmi0: connector {
compatible = "hdmi-connector";
label = "hdmi";
@@ -326,12 +319,6 @@
>;
};
- extcon_usb2_pins: extcon_usb2_pins {
- pinctrl-single,pins = <
- 0x3e8 (PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_ctsn.gpio7_24 */
- >;
- };
-
tpd12s015_pins: pinmux_tpd12s015_pins {
pinctrl-single,pins = <
0x3b0 (PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */
@@ -432,7 +419,7 @@
};
ldo3_reg: ldo3 {
- /* VDDA_1V8_PHY */
+ /* VDDA_1V8_PHYA */
regulator-name = "ldo3";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -440,6 +427,15 @@
regulator-boot-on;
};
+ ldo4_reg: ldo4 {
+ /* VDDA_1V8_PHYB */
+ regulator-name = "ldo4";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
ldo9_reg: ldo9 {
/* VDD_RTC */
regulator-name = "ldo9";
@@ -495,6 +491,14 @@
gpio-controller;
#gpio-cells = <2>;
};
+
+ extcon_usb2: tps659038_usb {
+ compatible = "ti,palmas-usb-vid";
+ ti,enable-vbus-detection;
+ ti,enable-id-detection;
+ id-gpios = <&gpio7 24 GPIO_ACTIVE_HIGH>;
+ };
+
};
tmp102: tmp102@48 {
@@ -517,7 +521,8 @@
mcp_rtc: rtc@6f {
compatible = "microchip,mcp7941x";
reg = <0x6f>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>; /* IRQ_SYS_1N */
+ interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
+ <&dra7_pmx_core 0x424>;
pinctrl-names = "default";
pinctrl-0 = <&mcp79410_pins_default>;
@@ -579,7 +584,6 @@
pinctrl-0 = <&mmc1_pins_default>;
vmmc-supply = <&ldo1_reg>;
- vmmc_aux-supply = <&vdd_3v3>;
bus-width = <4>;
cd-gpios = <&gpio6 27 0>; /* gpio 219 */
};
@@ -623,6 +627,14 @@
};
&usb2 {
+ /*
+ * Stand alone usage is peripheral only.
+ * However, with some resistor modifications
+ * this port can be used via expansion connectors
+ * as "host" or "dual-role". If so, provide
+ * the necessary dr_mode override in the expansion
+ * board's DT.
+ */
dr_mode = "peripheral";
};
@@ -681,7 +693,7 @@
&hdmi {
status = "ok";
- vdda-supply = <&ldo3_reg>;
+ vdda-supply = <&ldo4_reg>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_pins>;
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
index 92bacd3c8fab..109fd4711647 100644
--- a/arch/arm/boot/dts/dm8148-evm.dts
+++ b/arch/arm/boot/dts/dm8148-evm.dts
@@ -19,10 +19,10 @@
&cpsw_emac0 {
phy_id = <&davinci_mdio>, <0>;
- phy-mode = "mii";
+ phy-mode = "rgmii";
};
&cpsw_emac1 {
phy_id = <&davinci_mdio>, <1>;
- phy-mode = "mii";
+ phy-mode = "rgmii";
};
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
index 8c4bbc7573df..79838dd8dee7 100644
--- a/arch/arm/boot/dts/dm8148-t410.dts
+++ b/arch/arm/boot/dts/dm8148-t410.dts
@@ -8,7 +8,7 @@
#include "dm814x.dtsi"
/ {
- model = "DM8148 EVM";
+ model = "HP t410 Smart Zero Client";
compatible = "hp,t410", "ti,dm8148";
memory {
@@ -19,10 +19,10 @@
&cpsw_emac0 {
phy_id = <&davinci_mdio>, <0>;
- phy-mode = "mii";
+ phy-mode = "rgmii";
};
&cpsw_emac1 {
phy_id = <&davinci_mdio>, <1>;
- phy-mode = "mii";
+ phy-mode = "rgmii";
};
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 972c9c9e885b..7988b42e5764 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -181,9 +181,9 @@
ti,hwmods = "timer3";
};
- control: control@160000 {
+ control: control@140000 {
compatible = "ti,dm814-scm", "simple-bus";
- reg = <0x160000 0x16d000>;
+ reg = <0x140000 0x16d000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x160000 0x16d000>;
@@ -321,9 +321,9 @@
mac-address = [ 00 00 00 00 00 00 ];
};
- phy_sel: cpsw-phy-sel@0x48160650 {
+ phy_sel: cpsw-phy-sel@48140650 {
compatible = "ti,am3352-cpsw-phy-sel";
- reg= <0x48160650 0x4>;
+ reg= <0x48140650 0x4>;
reg-names = "gmii-sel";
};
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 5d65db9ebc2b..e289c706d27d 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -120,9 +120,10 @@
reg = <0x0 0x1400>;
#address-cells = <1>;
#size-cells = <1>;
+ ranges = <0 0x0 0x1400>;
pbias_regulator: pbias_regulator {
- compatible = "ti,pbias-omap";
+ compatible = "ti,pbias-dra7", "ti,pbias-omap";
reg = <0xe00 0x4>;
syscon = <&scm_conf>;
pbias_mmc_reg: pbias_mmc_omap5 {
@@ -1417,7 +1418,7 @@
ti,irqs-safe-map = <0>;
};
- mac: ethernet@4a100000 {
+ mac: ethernet@48484000 {
compatible = "ti,dra7-cpsw","ti,cpsw";
ti,hwmods = "gmac";
clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 2390f387c271..798dda072b2a 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -56,6 +56,7 @@
reg = <0x270 0x240>;
#address-cells = <1>;
#size-cells = <1>;
+ ranges = <0 0x270 0x240>;
scm_clocks: clocks {
#address-cells = <1>;
@@ -63,7 +64,7 @@
};
pbias_regulator: pbias_regulator {
- compatible = "ti,pbias-omap";
+ compatible = "ti,pbias-omap2", "ti,pbias-omap";
reg = <0x230 0x4>;
syscon = <&scm_conf>;
pbias_mmc_reg: pbias_mmc_omap2430 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index a5474113cd50..67659a0ed13e 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -202,7 +202,7 @@
tfp410_pins: pinmux_tfp410_pins {
pinctrl-single,pins = <
- 0x194 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
+ 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
>;
};
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index d5e5cd449b16..2230e1c03320 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -78,12 +78,6 @@
>;
};
- smsc9221_pins: pinmux_smsc9221_pins {
- pinctrl-single,pins = <
- 0x1a2 (PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */
- >;
- };
-
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index e458c2185e3c..5ad688c57a00 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -156,6 +156,12 @@
OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
>;
};
+
+ smsc9221_pins: pinmux_smsc9221_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */
+ >;
+ };
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 69a40cfc1f29..8a2b25332b8c 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -113,10 +113,22 @@
};
scm_conf: scm_conf@270 {
- compatible = "syscon";
+ compatible = "syscon", "simple-bus";
reg = <0x270 0x330>;
#address-cells = <1>;
#size-cells = <1>;
+ ranges = <0 0x270 0x330>;
+
+ pbias_regulator: pbias_regulator {
+ compatible = "ti,pbias-omap3", "ti,pbias-omap";
+ reg = <0x2b0 0x4>;
+ syscon = <&scm_conf>;
+ pbias_mmc_reg: pbias_mmc_omap2430 {
+ regulator-name = "pbias_mmc_omap2430";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ };
scm_clocks: clocks {
#address-cells = <1>;
@@ -202,17 +214,6 @@
dma-requests = <96>;
};
- pbias_regulator: pbias_regulator {
- compatible = "ti,pbias-omap";
- reg = <0x2b0 0x4>;
- syscon = <&scm_conf>;
- pbias_mmc_reg: pbias_mmc_omap2430 {
- regulator-name = "pbias_mmc_omap2430";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3000000>;
- };
- };
-
gpio1: gpio@48310000 {
compatible = "ti,omap3-gpio";
reg = <0x48310000 0x200>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index abc4473e6f8a..5a206c100ce2 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -196,9 +196,10 @@
reg = <0x5a0 0x170>;
#address-cells = <1>;
#size-cells = <1>;
+ ranges = <0 0x5a0 0x170>;
pbias_regulator: pbias_regulator {
- compatible = "ti,pbias-omap";
+ compatible = "ti,pbias-omap4", "ti,pbias-omap";
reg = <0x60 0x4>;
syscon = <&omap4_padconf_global>;
pbias_mmc_reg: pbias_mmc_omap4 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 3cc8f357d5b8..3cb030f9d2c4 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -174,8 +174,8 @@
i2c5_pins: pinmux_i2c5_pins {
pinctrl-single,pins = <
- 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
- 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
+ 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
+ 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
>;
};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 4205a8ac9ddb..4c04389dab32 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -185,9 +185,10 @@
reg = <0x5a0 0xec>;
#address-cells = <1>;
#size-cells = <1>;
+ ranges = <0 0x5a0 0xec>;
pbias_regulator: pbias_regulator {
- compatible = "ti,pbias-omap";
+ compatible = "ti,pbias-omap5", "ti,pbias-omap";
reg = <0x60 0x4>;
syscon = <&omap5_padconf_global>;
pbias_mmc_reg: pbias_mmc_omap5 {
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 2fa7a0dc83f7..275c78ccc0f3 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -158,6 +158,7 @@
};
&hdmi {
+ ddc-i2c-bus = <&i2c5>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
index 3efa3b2ebe90..6b914e4bb099 100644
--- a/arch/arm/boot/dts/stih407.dtsi
+++ b/arch/arm/boot/dts/stih407.dtsi
@@ -103,48 +103,46 @@
<&clk_s_d0_quadfs 0>,
<&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 0>;
- ranges;
-
- sti-hdmi@8d04000 {
- compatible = "st,stih407-hdmi";
- reg = <0x8d04000 0x1000>;
- reg-names = "hdmi-reg";
- interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
- interrupt-names = "irq";
- clock-names = "pix",
- "tmds",
- "phy",
- "audio",
- "main_parent",
- "aux_parent";
-
- clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
- <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
- <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
- <&clk_s_d0_flexgen CLK_PCM_0>,
- <&clk_s_d2_quadfs 0>,
- <&clk_s_d2_quadfs 1>;
-
- hdmi,hpd-gpio = <&pio5 3>;
- reset-names = "hdmi";
- resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
- ddc = <&hdmiddc>;
-
- };
-
- sti-hda@8d02000 {
- compatible = "st,stih407-hda";
- reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
- reg-names = "hda-reg", "video-dacs-ctrl";
- clock-names = "pix",
- "hddac",
- "main_parent",
- "aux_parent";
- clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
- <&clk_s_d2_flexgen CLK_HDDAC>,
- <&clk_s_d2_quadfs 0>,
- <&clk_s_d2_quadfs 1>;
- };
+ };
+
+ sti-hdmi@8d04000 {
+ compatible = "st,stih407-hdmi";
+ reg = <0x8d04000 0x1000>;
+ reg-names = "hdmi-reg";
+ interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
+ interrupt-names = "irq";
+ clock-names = "pix",
+ "tmds",
+ "phy",
+ "audio",
+ "main_parent",
+ "aux_parent";
+
+ clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
+ <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
+ <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
+ <&clk_s_d0_flexgen CLK_PCM_0>,
+ <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>;
+
+ hdmi,hpd-gpio = <&pio5 3>;
+ reset-names = "hdmi";
+ resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
+ ddc = <&hdmiddc>;
+ };
+
+ sti-hda@8d02000 {
+ compatible = "st,stih407-hda";
+ reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
+ reg-names = "hda-reg", "video-dacs-ctrl";
+ clock-names = "pix",
+ "hddac",
+ "main_parent",
+ "aux_parent";
+ clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
+ <&clk_s_d2_flexgen CLK_HDDAC>,
+ <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>;
};
};
};
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 6f40bc99c22f..8c6e61a27234 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -178,48 +178,46 @@
<&clk_s_d0_quadfs 0>,
<&clk_s_d2_quadfs 0>,
<&clk_s_d2_quadfs 0>;
- ranges;
-
- sti-hdmi@8d04000 {
- compatible = "st,stih407-hdmi";
- reg = <0x8d04000 0x1000>;
- reg-names = "hdmi-reg";
- interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
- interrupt-names = "irq";
- clock-names = "pix",
- "tmds",
- "phy",
- "audio",
- "main_parent",
- "aux_parent";
-
- clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
- <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
- <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
- <&clk_s_d0_flexgen CLK_PCM_0>,
- <&clk_s_d2_quadfs 0>,
- <&clk_s_d2_quadfs 1>;
-
- hdmi,hpd-gpio = <&pio5 3>;
- reset-names = "hdmi";
- resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
- ddc = <&hdmiddc>;
-
- };
-
- sti-hda@8d02000 {
- compatible = "st,stih407-hda";
- reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
- reg-names = "hda-reg", "video-dacs-ctrl";
- clock-names = "pix",
- "hddac",
- "main_parent",
- "aux_parent";
- clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
- <&clk_s_d2_flexgen CLK_HDDAC>,
- <&clk_s_d2_quadfs 0>,
- <&clk_s_d2_quadfs 1>;
- };
+ };
+
+ sti-hdmi@8d04000 {
+ compatible = "st,stih407-hdmi";
+ reg = <0x8d04000 0x1000>;
+ reg-names = "hdmi-reg";
+ interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
+ interrupt-names = "irq";
+ clock-names = "pix",
+ "tmds",
+ "phy",
+ "audio",
+ "main_parent",
+ "aux_parent";
+
+ clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
+ <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
+ <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
+ <&clk_s_d0_flexgen CLK_PCM_0>,
+ <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>;
+
+ hdmi,hpd-gpio = <&pio5 3>;
+ reset-names = "hdmi";
+ resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
+ ddc = <&hdmiddc>;
+ };
+
+ sti-hda@8d02000 {
+ compatible = "st,stih407-hda";
+ reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
+ reg-names = "hda-reg", "video-dacs-ctrl";
+ clock-names = "pix",
+ "hddac",
+ "main_parent",
+ "aux_parent";
+ clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
+ <&clk_s_d2_flexgen CLK_HDDAC>,
+ <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>;
};
};
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 50c84e1876fc..3f15a5cae167 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -240,7 +240,8 @@ CONFIG_SSI_PROTOCOL=m
CONFIG_PINCTRL_SINGLE=y
CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_PCF857X=m
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCF857X=y
CONFIG_GPIO_TWL4030=y
CONFIG_GPIO_PALMAS=y
CONFIG_W1=m
@@ -350,6 +351,8 @@ CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_OMAP2PLUS=m
CONFIG_USB_MUSB_AM35X=m
CONFIG_USB_MUSB_DSPS=m
+CONFIG_USB_INVENTRA_DMA=y
+CONFIG_USB_TI_CPPI41_DMA=y
CONFIG_USB_DWC3=m
CONFIG_USB_TEST=m
CONFIG_AM335X_PHY_USB=y
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 3df1e975f72a..c4072d9f32c7 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HAVE_ONE_REG
+#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_VCPU_MAX_FEATURES 2
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 32640c431a08..7cba573c2cc9 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
* This may need to be greater than __NR_last_syscall+1 in order to
* account for the padding in the syscall table
*/
-#define __NR_syscalls (388)
+#define __NR_syscalls (392)
/*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 0c3f5a0dafd3..7a2a32a1d5a8 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -414,6 +414,8 @@
#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
#define __NR_bpf (__NR_SYSCALL_BASE+386)
#define __NR_execveat (__NR_SYSCALL_BASE+387)
+#define __NR_userfaultfd (__NR_SYSCALL_BASE+388)
+#define __NR_membarrier (__NR_SYSCALL_BASE+389)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 05745eb838c5..fde6c88d560c 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -397,6 +397,8 @@
/* 385 */ CALL(sys_memfd_create)
CALL(sys_bpf)
CALL(sys_execveat)
+ CALL(sys_userfaultfd)
+ CALL(sys_membarrier)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 07d2e100caab..b3a0dff67e3f 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -44,10 +44,11 @@ config SOC_OMAP5
select ARM_CPU_SUSPEND if PM
select ARM_GIC
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if SMP
select HAVE_ARM_ARCH_TIMER
select ARM_ERRATA_798181 if SMP
+ select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
+ select PM_OPP if PM
config SOC_AM33XX
bool "TI AM33XX"
@@ -70,10 +71,13 @@ config SOC_DRA7XX
select ARCH_OMAP2PLUS
select ARM_CPU_SUSPEND if PM
select ARM_GIC
+ select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
select IRQ_CROSSBAR
select ARM_ERRATA_798181 if SMP
+ select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
+ select PM_OPP if PM
config ARCH_OMAP2PLUS
bool
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 24c9afc9e8a7..6133eaac685d 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -20,13 +20,6 @@
#include "common.h"
-#if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
-#define intc_of_init NULL
-#endif
-#ifndef CONFIG_ARCH_OMAP4
-#define gic_of_init NULL
-#endif
-
static const struct of_device_id omap_dt_match_table[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "ti,omap-infra", },
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index e3f713ffb06b..54a5ba54d2ff 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -653,8 +653,12 @@ void __init dra7xxx_check_revision(void)
omap_revision = DRA752_REV_ES1_0;
break;
case 1:
- default:
omap_revision = DRA752_REV_ES1_1;
+ break;
+ case 2:
+ default:
+ omap_revision = DRA752_REV_ES2_0;
+ break;
}
break;
@@ -674,7 +678,7 @@ void __init dra7xxx_check_revision(void)
/* Unknown default to latest silicon rev as default*/
pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n",
__func__, idcode, hawkeye, rev);
- omap_revision = DRA752_REV_ES1_1;
+ omap_revision = DRA752_REV_ES2_0;
}
sprintf(soc_name, "DRA%03x", omap_rev() >> 16);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 980c9372e6fd..3eaeaca5da05 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -676,6 +676,7 @@ void __init am43xx_init_early(void)
void __init am43xx_init_late(void)
{
omap_common_late_init();
+ omap2_clk_enable_autoidle_all();
}
#endif
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 4cb8fd9f741f..72ebc4c16bae 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -901,7 +901,8 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
return 0;
- if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
+ if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER &&
+ od->_driver_status != BUS_NOTIFY_BIND_DRIVER) {
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
dev_warn(dev, "%s: enabled but no driver. Idling\n",
__func__);
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 425bfcd67db6..b668719b9b25 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -103,7 +103,8 @@ static inline void enable_omap3630_toggle_l2_on_restore(void) { }
#define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0)
#define PM_OMAP4_CPU_OSWR_DISABLE (1 << 1)
-#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_PM) && (defined(CONFIG_ARCH_OMAP4) ||\
+ defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX))
extern u16 pm44xx_errata;
#define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id))
#else
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index f97654d11ea5..2d1d3845253c 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -469,6 +469,8 @@ IS_OMAP_TYPE(3430, 0x3430)
#define DRA7XX_CLASS 0x07000000
#define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8))
#define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8))
+#define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
+#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
void omap2xxx_check_revision(void);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index e4d8701f99f9..a55655127ef2 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -297,12 +297,8 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
if (IS_ERR(src))
return PTR_ERR(src);
- r = clk_set_parent(timer->fclk, src);
- if (r < 0) {
- pr_warn("%s: %s cannot set source\n", __func__, oh->name);
- clk_put(src);
- return r;
- }
+ WARN(clk_set_parent(timer->fclk, src) < 0,
+ "Cannot set timer parent clock, no PLL clock driver?");
clk_put(src);
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index e5a35f6b83a7..d44d311704ba 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -300,7 +300,7 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm)
val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET);
if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) ||
- (val & OMAP3430_PRM_POLCTRL_CLKREQ_POL)) {
+ (val & OMAP3430_PRM_POLCTRL_OFFMODE_POL)) {
val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL;
val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL;
pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n",
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index a3ebb517cca1..a727282bfa99 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -502,7 +502,7 @@ static void balloon3_irq_handler(struct irq_desc *desc)
balloon3_irq_enabled;
do {
struct irq_data *d = irq_desc_get_irq_data(desc);
- struct irq_chip *chip = irq_data_get_chip(d);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int irq;
/* clear useless edge notification */
diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h
index d28fe291233a..07b93fd24474 100644
--- a/arch/arm/mach-pxa/include/mach/addr-map.h
+++ b/arch/arm/mach-pxa/include/mach/addr-map.h
@@ -44,6 +44,13 @@
*/
/*
+ * DFI Bus for NAND, PXA3xx only
+ */
+#define NAND_PHYS 0x43100000
+#define NAND_VIRT IOMEM(0xf6300000)
+#define NAND_SIZE 0x00100000
+
+/*
* Internal Memory Controller (PXA27x and later)
*/
#define IMEMC_PHYS 0x58000000
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index ce0f8d6242e2..06005d3f2ba3 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -47,6 +47,13 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
#define ISRAM_START 0x5c000000
#define ISRAM_SIZE SZ_256K
+/*
+ * NAND NFC: DFI bus arbitration subset
+ */
+#define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0))
+#define NDCR_ND_ARB_EN (1 << 12)
+#define NDCR_ND_ARB_CNTL (1 << 19)
+
static void __iomem *sram;
static unsigned long wakeup_src;
@@ -362,7 +369,12 @@ static struct map_desc pxa3xx_io_desc[] __initdata = {
.pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE),
.length = SMEMC_SIZE,
.type = MT_DEVICE
- }
+ }, {
+ .virtual = (unsigned long)NAND_VIRT,
+ .pfn = __phys_to_pfn(NAND_PHYS),
+ .length = NAND_SIZE,
+ .type = MT_DEVICE
+ },
};
void __init pxa3xx_map_io(void)
@@ -419,6 +431,13 @@ static int __init pxa3xx_init(void)
*/
ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
+ /*
+ * Disable DFI bus arbitration, to prevent a system bus lock if
+ * somebody disables the NAND clock (unused clock) while this
+ * bit remains set.
+ */
+ NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
+
if ((ret = pxa_init_dma(IRQ_DMA, 32)))
return ret;
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 9769f1eefe3b..00b7f7de28a1 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -365,15 +365,21 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r
user:
if (LDST_L_BIT(instr)) {
unsigned long val;
+ unsigned int __ua_flags = uaccess_save_and_enable();
+
get16t_unaligned_check(val, addr);
+ uaccess_restore(__ua_flags);
/* signed half-word? */
if (instr & 0x40)
val = (signed long)((signed short) val);
regs->uregs[rd] = val;
- } else
+ } else {
+ unsigned int __ua_flags = uaccess_save_and_enable();
put16t_unaligned_check(regs->uregs[rd], addr);
+ uaccess_restore(__ua_flags);
+ }
return TYPE_LDST;
@@ -420,14 +426,21 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
user:
if (load) {
- unsigned long val;
+ unsigned long val, val2;
+ unsigned int __ua_flags = uaccess_save_and_enable();
+
get32t_unaligned_check(val, addr);
+ get32t_unaligned_check(val2, addr + 4);
+
+ uaccess_restore(__ua_flags);
+
regs->uregs[rd] = val;
- get32t_unaligned_check(val, addr + 4);
- regs->uregs[rd2] = val;
+ regs->uregs[rd2] = val2;
} else {
+ unsigned int __ua_flags = uaccess_save_and_enable();
put32t_unaligned_check(regs->uregs[rd], addr);
put32t_unaligned_check(regs->uregs[rd2], addr + 4);
+ uaccess_restore(__ua_flags);
}
return TYPE_LDST;
@@ -458,10 +471,15 @@ do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *reg
trans:
if (LDST_L_BIT(instr)) {
unsigned int val;
+ unsigned int __ua_flags = uaccess_save_and_enable();
get32t_unaligned_check(val, addr);
+ uaccess_restore(__ua_flags);
regs->uregs[rd] = val;
- } else
+ } else {
+ unsigned int __ua_flags = uaccess_save_and_enable();
put32t_unaligned_check(regs->uregs[rd], addr);
+ uaccess_restore(__ua_flags);
+ }
return TYPE_LDST;
fault:
@@ -531,6 +549,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
#endif
if (user_mode(regs)) {
+ unsigned int __ua_flags = uaccess_save_and_enable();
for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
regbits >>= 1, rd += 1)
if (regbits & 1) {
@@ -542,6 +561,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
put32t_unaligned_check(regs->uregs[rd], eaddr);
eaddr += 4;
}
+ uaccess_restore(__ua_flags);
} else {
for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
regbits >>= 1, rd += 1)
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ad9529cc4203..daa1a65f2eb7 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -107,7 +107,6 @@ static const struct of_device_id pxa_ssp_of_ids[] = {
{ .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
{ .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
{ .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
- { .compatible = "mrvl,lpss-ssp", .data = (void *) LPSS_SSP },
{ },
};
MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index d18ee4259ee5..06a15644be38 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -81,7 +81,7 @@
};
idle-states {
- entry-method = "arm,psci";
+ entry-method = "psci";
CPU_SLEEP_0: cpu-sleep-0 {
compatible = "arm,idle-state";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index a712bea3bf2c..cc093a482aa4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -106,7 +106,7 @@
};
idle-states {
- entry-method = "arm,psci";
+ entry-method = "psci";
cpu_sleep: cpu-sleep-0 {
compatible = "arm,idle-state";
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 4562459456a6..ed039688c221 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_HALT_POLL_NS_DEFAULT 500000
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b0329be95cb1..26b066690593 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
@@ -496,7 +496,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
/* preserve the hardware dirty information */
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index e8ca6eaedd02..13671a9cf016 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void)
*/
if (!is_normal_ram(md))
prot = __pgprot(PROT_DEVICE_nGnRE);
- else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+ else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ !PAGE_ALIGNED(md->phys_addr))
prot = PAGE_KERNEL_EXEC;
else
prot = PAGE_KERNEL;
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 08cafc518b9a..0f03a8fe2314 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* save return value regs*/
+ .macro save_return_regs
+ sub sp, sp, #64
+ stp x0, x1, [sp]
+ stp x2, x3, [sp, #16]
+ stp x4, x5, [sp, #32]
+ stp x6, x7, [sp, #48]
+ .endm
+
+ /* restore return value regs*/
+ .macro restore_return_regs
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp, #32]
+ ldp x6, x7, [sp, #48]
+ add sp, sp, #64
+ .endm
+
/*
* void ftrace_graph_caller(void)
*
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
* only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
*/
ENTRY(return_to_handler)
- str x0, [sp, #-16]!
+ save_return_regs
mov x0, x29 // parent's fp
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
mov x30, x0 // restore the original return address
- ldr x0, [sp], #16
+ restore_return_regs
ret
END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index f61f2dd67464..241b9b9729d8 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -20,4 +20,5 @@ generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 61cd1e786a14..91d49c0a3118 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -46,4 +46,5 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index f17c4dc6050c..945544ec603e 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -59,4 +59,5 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += user.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index b7f68192d15b..1778805f6380 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -43,4 +43,5 @@ generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index 8e47b832cc76..1fa084cf1a43 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index f9c86c475bbd..f211839e2cae 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -294,6 +294,8 @@ void pcibios_fixup_bus(struct pci_bus *bus)
printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
#endif
+ pci_read_bridge_bases(bus);
+
if (bus->number == 0) {
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index daee37bd0999..db8ddabc6bd2 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -58,4 +58,5 @@ generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 9de3ba12f6b9..502a91d8dbbd 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
generic-y += vtime.h
+generic-y += word-at-a-time.h
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index d89b6013c941..7cc3be9fa7c6 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -533,9 +533,10 @@ void pcibios_fixup_bus(struct pci_bus *b)
{
struct pci_dev *dev;
- if (b->self)
+ if (b->self) {
+ pci_read_bridge_bases(b);
pcibios_fixup_bridge_resources(b->self);
-
+ }
list_for_each_entry(dev, &b->devices, bus_list)
pcibios_fixup_device_resources(dev);
platform_pci_fixup_bus(b);
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index e0eb704ca1fa..fd104bd221ce 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -9,3 +9,4 @@ generic-y += module.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 0b6b40d37b95..5b4ec541ba7c 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index eeb3a8991fc4..6e5198e2c124 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -344,6 +349,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 3a7006654ce9..f75600b0ca23 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -355,6 +360,7 @@ CONFIG_NE2000=y
# CONFIG_NET_VENDOR_SEEQ is not set
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 0586b323a673..a42d91c389a6 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index ad1dbce07aa4..77f4a11083e9 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -345,6 +350,7 @@ CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b44acacaecf4..5a329f77329b 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -364,6 +369,7 @@ CONFIG_MAC8390=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8afca3753db1..83c80d2030ec 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_SEEQ is not set
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
@@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index ef00875994d9..6cb42c3bf5a2 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 387c2bd90ff1..c7508c30330c 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 35355c1bc714..64b71664a303 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -354,6 +359,7 @@ CONFIG_NE2000=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
@@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 8442d267b877..9a4cab78a2ea 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_MANAGER=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 0e1b542e1555..1a2eaac13dbd 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_LOG_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
index 5a822bb790f7..066e74f666ae 100644
--- a/arch/m68k/include/asm/linkage.h
+++ b/arch/m68k/include/asm/linkage.h
@@ -4,4 +4,34 @@
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+ __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+ __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+ __asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5), "m" (arg6))
+
#endif
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 244e0dbe45db..0793a7f17417 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 356
+#define NR_syscalls 375
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 61fb6cb9d2ae..5e6fae6c275f 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -361,5 +361,24 @@
#define __NR_memfd_create 353
#define __NR_bpf 354
#define __NR_execveat 355
+#define __NR_socket 356
+#define __NR_socketpair 357
+#define __NR_bind 358
+#define __NR_connect 359
+#define __NR_listen 360
+#define __NR_accept4 361
+#define __NR_getsockopt 362
+#define __NR_setsockopt 363
+#define __NR_getsockname 364
+#define __NR_getpeername 365
+#define __NR_sendto 366
+#define __NR_sendmsg 367
+#define __NR_recvfrom 368
+#define __NR_recvmsg 369
+#define __NR_shutdown 370
+#define __NR_recvmmsg 371
+#define __NR_sendmmsg 372
+#define __NR_userfaultfd 373
+#define __NR_membarrier 374
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index a0ec4303f2c8..5dd0e80042f5 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -376,4 +376,22 @@ ENTRY(sys_call_table)
.long sys_memfd_create
.long sys_bpf
.long sys_execveat /* 355 */
-
+ .long sys_socket
+ .long sys_socketpair
+ .long sys_bind
+ .long sys_connect
+ .long sys_listen /* 360 */
+ .long sys_accept4
+ .long sys_getsockopt
+ .long sys_setsockopt
+ .long sys_getsockname
+ .long sys_getpeername /* 365 */
+ .long sys_sendto
+ .long sys_sendmsg
+ .long sys_recvfrom
+ .long sys_recvmsg
+ .long sys_shutdown /* 370 */
+ .long sys_recvmmsg
+ .long sys_sendmmsg
+ .long sys_userfaultfd
+ .long sys_membarrier
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index df31353fd200..29acb89daaaa 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -54,4 +54,5 @@ generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 2f222f355c4b..b0ae88c9fed9 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += syscalls.h
generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 6b8b75266801..ae838ed5fcf2 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -863,7 +863,14 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
void pcibios_fixup_bus(struct pci_bus *bus)
{
- /* Fixup the bus */
+ /* When called from the generic PCI probe, read PCI<->PCI bridge
+ * bases. This is -not- called when generating the PCI tree from
+ * the OF device-tree.
+ */
+ if (bus->self != NULL)
+ pci_read_bridge_bases(bus);
+
+ /* Now fixup the bus bus */
pcibios_setup_bus_self(bus);
/* Now fixup devices on that bus */
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 15ecb4831e12..eeb3953ed8ac 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -293,8 +293,26 @@ static int __init ath79_misc_intc_of_init(
return 0;
}
-IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc",
- ath79_misc_intc_of_init);
+
+static int __init ar7100_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+ return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+ ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+ return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+ ar7240_misc_intc_of_init);
static int __init ar79_cpu_intc_of_init(
struct device_node *node, struct device_node *parent)
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 89a628455bc2..bd634259eab9 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -933,7 +933,7 @@ void __init plat_mem_setup(void)
while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
&& (total < MAX_MEMORY)) {
memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
- __pa_symbol(&__init_end), -1,
+ __pa_symbol(&_end), -1,
0x100000,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (memory >= 0) {
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 40ec4ca3f946..c7fe4d01e79c 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -17,4 +17,5 @@ generic-y += segment.h
generic-y += serial.h
generic-y += trace_clock.h
generic-y += user.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 9801ac982655..fe67f12ac239 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -20,6 +20,9 @@
#ifndef cpu_has_tlb
#define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB)
#endif
+#ifndef cpu_has_ftlb
+#define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB)
+#endif
#ifndef cpu_has_tlbinv
#define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV)
#endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index cd89e9855775..82ad15f11049 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -385,6 +385,7 @@ enum cpu_type_enum {
#define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */
#define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */
#define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */
+#define MIPS_CPU_FTLB 0x20000000000ull /* CPU has Fixed-page-size TLB */
/*
* CPU ASE encodings
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 3a54dbca9f7e..5a1a882e0a75 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -61,6 +61,7 @@
#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h
index b02891f9caaf..21d9607c80d7 100644
--- a/arch/mips/include/asm/maar.h
+++ b/arch/mips/include/asm/maar.h
@@ -66,6 +66,15 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
}
/**
+ * maar_init() - initialise MAARs
+ *
+ * Performs initialisation of MAARs for the current CPU, making use of the
+ * platforms implementation of platform_maar_init where necessary and
+ * duplicating the setup it provides on secondary CPUs.
+ */
+extern void maar_init(void);
+
+/**
* struct maar_config - MAAR configuration data
* @lower: The lowest address that the MAAR pair will affect. Must be
* aligned to a 2^16 byte boundary.
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index d75b75e78ebb..1f1927ab4269 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -194,6 +194,7 @@ BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8)
BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0)
BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0)
BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130)
+BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150)
/* Core Local & Core Other register accessor functions */
BUILD_CM_Cx_RW(reset_release, 0x00)
@@ -316,6 +317,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
#define CM_GCR_L2_CONFIG_ASSOC_SHF 0
#define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0)
+/* GCR_SYS_CONFIG2 register fields */
+#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0
+#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0)
+
/* GCR_Cx_COHERENCE register fields */
#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0
#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0)
@@ -405,4 +410,38 @@ static inline int mips_cm_revision(void)
return read_gcr_rev();
}
+/**
+ * mips_cm_max_vp_width() - return the width in bits of VP indices
+ *
+ * Return: the width, in bits, of VP indices in fields that combine core & VP
+ * indices.
+ */
+static inline unsigned int mips_cm_max_vp_width(void)
+{
+ extern int smp_num_siblings;
+
+ if (mips_cm_revision() >= CM_REV_CM3)
+ return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
+
+ return smp_num_siblings;
+}
+
+/**
+ * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
+ * @cpu: the CPU whose VP ID to calculate
+ *
+ * Hardware such as the GIC uses identifiers for VPs which may not match the
+ * CPU numbers used by Linux. This function calculates the hardware VP
+ * identifier corresponding to a given CPU.
+ *
+ * Return: the VP ID for the CPU.
+ */
+static inline unsigned int mips_cm_vp_id(unsigned int cpu)
+{
+ unsigned int core = cpu_data[cpu].core;
+ unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
+
+ return (core * mips_cm_max_vp_width()) + vp;
+}
+
#endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index d3cd8eac81e3..c64781cf649f 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -487,6 +487,8 @@
/* Bits specific to the MIPS32/64 PRA. */
#define MIPS_CONF_MT (_ULCAST_(7) << 7)
+#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7)
+#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7)
#define MIPS_CONF_AR (_ULCAST_(7) << 10)
#define MIPS_CONF_AT (_ULCAST_(3) << 13)
#define MIPS_CONF_M (_ULCAST_(1) << 31)
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index c03088f9f514..cfabadb135d9 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -377,16 +377,18 @@
#define __NR_memfd_create (__NR_Linux + 354)
#define __NR_bpf (__NR_Linux + 355)
#define __NR_execveat (__NR_Linux + 356)
+#define __NR_userfaultfd (__NR_Linux + 357)
+#define __NR_membarrier (__NR_Linux + 358)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 356
+#define __NR_Linux_syscalls 358
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 356
+#define __NR_O32_Linux_syscalls 358
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -711,16 +713,18 @@
#define __NR_memfd_create (__NR_Linux + 314)
#define __NR_bpf (__NR_Linux + 315)
#define __NR_execveat (__NR_Linux + 316)
+#define __NR_userfaultfd (__NR_Linux + 317)
+#define __NR_membarrier (__NR_Linux + 318)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 316
+#define __NR_Linux_syscalls 318
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 316
+#define __NR_64_Linux_syscalls 318
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1049,15 +1053,17 @@
#define __NR_memfd_create (__NR_Linux + 318)
#define __NR_bpf (__NR_Linux + 319)
#define __NR_execveat (__NR_Linux + 320)
+#define __NR_userfaultfd (__NR_Linux + 321)
+#define __NR_membarrier (__NR_Linux + 322)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 320
+#define __NR_Linux_syscalls 322
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 320
+#define __NR_N32_Linux_syscalls 322
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 4e62bf85d0b0..459cb017306c 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -26,6 +26,7 @@
#include <linux/power/jz4740-battery.h>
#include <linux/power/gpio-charger.h>
+#include <asm/mach-jz4740/gpio.h>
#include <asm/mach-jz4740/jz4740_fb.h>
#include <asm/mach-jz4740/jz4740_mmc.h>
#include <asm/mach-jz4740/jz4740_nand.h>
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index a74e181058b0..8c6d76c9b2d6 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -28,6 +28,7 @@
#include <linux/seq_file.h>
#include <asm/mach-jz4740/base.h>
+#include <asm/mach-jz4740/gpio.h>
#define JZ4740_GPIO_BASE_A (32*0)
#define JZ4740_GPIO_BASE_B (32*1)
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 9f71c06aebf6..209ded16806b 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -39,6 +39,7 @@
mfc0 \dest, CP0_CONFIG, 3
andi \dest, \dest, MIPS_CONF3_MT
beqz \dest, \nomt
+ nop
.endm
.section .text.cps-vec
@@ -223,10 +224,9 @@ LEAF(excep_ejtag)
END(excep_ejtag)
LEAF(mips_cps_core_init)
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
/* Check that the core implements the MT ASE */
has_mt t0, 3f
- nop
.set push
.set mips64r2
@@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes)
PTR_ADDU t0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+ li t9, 0
+#ifdef CONFIG_MIPS_MT_SMP
has_mt ta2, 1f
- li t9, 0
/* Find the number of VPEs present in the core */
mfc0 t1, CP0_MVPCONF0
@@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes)
/* Retrieve the VPE ID from EBase.CPUNum */
mfc0 t9, $15, 1
and t9, t9, t1
+#endif
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE
@@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes)
PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
PTR_ADDU v0, v0, ta3
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
/* If the core doesn't support MT then return */
bnez ta2, 1f
@@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes)
2: .set pop
-#endif /* CONFIG_MIPS_MT */
+#endif /* CONFIG_MIPS_MT_SMP */
/* Return */
jr ra
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 571a8e6ea5bd..09a51d091941 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -410,16 +410,18 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
static inline unsigned int decode_config0(struct cpuinfo_mips *c)
{
unsigned int config0;
- int isa;
+ int isa, mt;
config0 = read_c0_config();
/*
* Look for Standard TLB or Dual VTLB and FTLB
*/
- if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
- (((config0 & MIPS_CONF_MT) >> 7) == 4))
+ mt = config0 & MIPS_CONF_MT;
+ if (mt == MIPS_CONF_MT_TLB)
c->options |= MIPS_CPU_TLB;
+ else if (mt == MIPS_CONF_MT_FTLB)
+ c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB;
isa = (config0 & MIPS_CONF_AT) >> 13;
switch (isa) {
@@ -559,15 +561,18 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
if (cpu_has_tlb) {
if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
c->options |= MIPS_CPU_TLBINV;
+
/*
- * This is a bit ugly. R6 has dropped that field from
- * config4 and the only valid configuration is VTLB+FTLB so
- * set a good value for mmuextdef for that case.
+ * R6 has dropped the MMUExtDef field from config4.
+ * On R6 the fields always describe the FTLB, and only if it is
+ * present according to Config.MT.
*/
- if (cpu_has_mips_r6)
+ if (!cpu_has_mips_r6)
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ else if (cpu_has_ftlb)
mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
else
- mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ mmuextdef = 0;
switch (mmuextdef) {
case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 423ae83af1fb..3375745b9198 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -18,7 +18,7 @@
.set pop
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti)
*/
.align 7
LEAF(resume)
@@ -28,30 +28,6 @@
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
- /*
- * check if we need to save FPU registers
- */
- .set push
- .set noreorder
- beqz a3, 1f
- PTR_L t3, TASK_THREAD_INFO(a0)
- .set pop
-
- /*
- * clear saved user stack CU1 bit
- */
- LONG_L t0, ST_OFF(t3)
- li t1, ~ST0_CU1
- and t0, t0, t1
- LONG_S t0, ST_OFF(t3)
-
- .set push
- .set arch=mips64r2
- fpu_save_double a0 t0 t1 # c0_status passed in t0
- # clobbers t1
- .set pop
-1:
-
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
/* Check if we need to store CVMSEG state */
dmfc0 t0, $11,7 /* CvmMemCtl */
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 5087a4b72e6b..ac27ef7d4d0e 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -31,18 +31,8 @@
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
/*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
-/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti)
*/
LEAF(resume)
mfc0 t1, CP0_STATUS
@@ -50,22 +40,6 @@ LEAF(resume)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
- beqz a3, 1f
-
- PTR_L t3, TASK_THREAD_INFO(a0)
-
- /*
- * clear saved user stack CU1 bit
- */
- lw t0, ST_OFF(t3)
- li t1, ~ST0_CU1
- and t0, t0, t1
- sw t0, ST_OFF(t3)
-
- fpu_save_single a0, t0 # clobbers t0
-
-1:
-
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 4cc13508d967..65a74e4f0f45 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp)
lw t1, PT_EPC(sp) # skip syscall on return
subu v0, v0, __NR_O32_Linux # check syscall number
- sltiu t0, v0, __NR_O32_Linux_syscalls + 1
addiu t1, 4 # skip to next instruction
sw t1, PT_EPC(sp)
- beqz t0, illegal_syscall
-
- sll t0, v0, 2
- la t1, sys_call_table
- addu t1, t0
- lw t2, (t1) # syscall routine
- beqz t2, illegal_syscall
sw a3, PT_R26(sp) # save a3 for syscall restarting
@@ -96,6 +88,16 @@ loads_done:
li t1, _TIF_WORK_SYSCALL_ENTRY
and t0, t1
bnez t0, syscall_trace_entry # -> yes
+syscall_common:
+ sltiu t0, v0, __NR_O32_Linux_syscalls + 1
+ beqz t0, illegal_syscall
+
+ sll t0, v0, 2
+ la t1, sys_call_table
+ addu t1, t0
+ lw t2, (t1) # syscall routine
+
+ beqz t2, illegal_syscall
jalr t2 # Do The Real Thing (TM)
@@ -116,7 +118,7 @@ o32_syscall_exit:
syscall_trace_entry:
SAVE_STATIC
- move s0, t2
+ move s0, v0
move a0, sp
/*
@@ -129,27 +131,18 @@ syscall_trace_entry:
1: jal syscall_trace_enter
- bltz v0, 2f # seccomp failed? Skip syscall
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ move v0, s0 # restore syscall
- move t0, s0
RESTORE_STATIC
lw a0, PT_R4(sp) # Restore argument registers
lw a1, PT_R5(sp)
lw a2, PT_R6(sp)
lw a3, PT_R7(sp)
- jalr t0
-
- li t0, -EMAXERRNO - 1 # error?
- sltu t0, t0, v0
- sw t0, PT_R7(sp) # set error flag
- beqz t0, 1f
-
- lw t1, PT_R2(sp) # syscall number
- negu v0 # error
- sw t1, PT_R0(sp) # save it for syscall restarting
-1: sw v0, PT_R2(sp) # result
+ j syscall_common
-2: j syscall_exit
+1: j syscall_exit
/* ------------------------------------------------------------------------ */
@@ -599,3 +592,5 @@ EXPORT(sys_call_table)
PTR sys_memfd_create
PTR sys_bpf /* 4355 */
PTR sys_execveat
+ PTR sys_userfaultfd
+ PTR sys_membarrier
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a6f6b762c47a..e732981cf99f 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp)
.set at
#endif
- dsubu t0, v0, __NR_64_Linux # check syscall number
- sltiu t0, t0, __NR_64_Linux_syscalls + 1
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
ld t1, PT_EPC(sp) # skip syscall on return
daddiu t1, 4 # skip to next instruction
sd t1, PT_EPC(sp)
#endif
- beqz t0, illegal_syscall
-
- dsll t0, v0, 3 # offset into table
- ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
- # syscall routine
sd a3, PT_R26(sp) # save a3 for syscall restarting
@@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp)
and t0, t1, t0
bnez t0, syscall_trace_entry
+syscall_common:
+ dsubu t2, v0, __NR_64_Linux
+ sltiu t0, t2, __NR_64_Linux_syscalls + 1
+ beqz t0, illegal_syscall
+
+ dsll t0, t2, 3 # offset into table
+ dla t2, sys_call_table
+ daddu t0, t2, t0
+ ld t2, (t0) # syscall routine
+ beqz t2, illegal_syscall
+
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
@@ -78,14 +82,14 @@ n64_syscall_exit:
syscall_trace_entry:
SAVE_STATIC
- move s0, t2
+ move s0, v0
move a0, sp
move a1, v0
jal syscall_trace_enter
- bltz v0, 2f # seccomp failed? Skip syscall
+ bltz v0, 1f # seccomp failed? Skip syscall
- move t0, s0
+ move v0, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
@@ -93,19 +97,9 @@ syscall_trace_entry:
ld a3, PT_R7(sp)
ld a4, PT_R8(sp)
ld a5, PT_R9(sp)
- jalr t0
-
- li t0, -EMAXERRNO - 1 # error?
- sltu t0, t0, v0
- sd t0, PT_R7(sp) # set error flag
- beqz t0, 1f
-
- ld t1, PT_R2(sp) # syscall number
- dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
-1: sd v0, PT_R2(sp) # result
+ j syscall_common
-2: j syscall_exit
+1: j syscall_exit
illegal_syscall:
/* This also isn't a 64-bit syscall, throw an error. */
@@ -436,4 +430,6 @@ EXPORT(sys_call_table)
PTR sys_memfd_create
PTR sys_bpf /* 5315 */
PTR sys_execveat
+ PTR sys_userfaultfd
+ PTR sys_membarrier
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 4b2010654c46..c79484397584 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
and t0, t1, t0
bnez t0, n32_syscall_trace_entry
+syscall_common:
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
@@ -75,9 +76,9 @@ n32_syscall_trace_entry:
move a1, v0
jal syscall_trace_enter
- bltz v0, 2f # seccomp failed? Skip syscall
+ bltz v0, 1f # seccomp failed? Skip syscall
- move t0, s0
+ move t2, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
@@ -85,19 +86,9 @@ n32_syscall_trace_entry:
ld a3, PT_R7(sp)
ld a4, PT_R8(sp)
ld a5, PT_R9(sp)
- jalr t0
+ j syscall_common
- li t0, -EMAXERRNO - 1 # error?
- sltu t0, t0, v0
- sd t0, PT_R7(sp) # set error flag
- beqz t0, 1f
-
- ld t1, PT_R2(sp) # syscall number
- dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
-1: sd v0, PT_R2(sp) # result
-
-2: j syscall_exit
+1: j syscall_exit
not_n32_scall:
/* This is not an n32 compatibility syscall, pass it on to
@@ -429,4 +420,6 @@ EXPORT(sysn32_call_table)
PTR sys_memfd_create
PTR sys_bpf
PTR compat_sys_execveat /* 6320 */
+ PTR sys_userfaultfd
+ PTR sys_membarrier
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f543ff4feef9..6369cfd390c6 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -87,6 +87,7 @@ loads_done:
and t0, t1, t0
bnez t0, trace_a_syscall
+syscall_common:
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
@@ -130,9 +131,9 @@ trace_a_syscall:
1: jal syscall_trace_enter
- bltz v0, 2f # seccomp failed? Skip syscall
+ bltz v0, 1f # seccomp failed? Skip syscall
- move t0, s0
+ move t2, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
@@ -142,19 +143,9 @@ trace_a_syscall:
ld a5, PT_R9(sp)
ld a6, PT_R10(sp)
ld a7, PT_R11(sp) # For indirect syscalls
- jalr t0
+ j syscall_common
- li t0, -EMAXERRNO - 1 # error?
- sltu t0, t0, v0
- sd t0, PT_R7(sp) # set error flag
- beqz t0, 1f
-
- ld t1, PT_R2(sp) # syscall number
- dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
-1: sd v0, PT_R2(sp) # result
-
-2: j syscall_exit
+1: j syscall_exit
/* ------------------------------------------------------------------------ */
@@ -584,4 +575,6 @@ EXPORT(sys32_call_table)
PTR sys_memfd_create
PTR sys_bpf /* 4355 */
PTR compat_sys_execveat
+ PTR sys_userfaultfd
+ PTR sys_membarrier
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 35b8316002f8..479515109e5b 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -338,7 +338,7 @@ static void __init bootmem_init(void)
if (end <= reserved_end)
continue;
#ifdef CONFIG_BLK_DEV_INITRD
- /* mapstart should be after initrd_end */
+ /* Skip zones before initrd and initrd itself */
if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
continue;
#endif
@@ -371,6 +371,14 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
+#ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * mapstart should be after initrd_end
+ */
+ if (initrd_end)
+ mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
/*
* Initialize the boot-time allocator with low memory only.
*/
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index a31896c33716..bd4385a8e6e8 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -42,6 +42,7 @@
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/setup.h>
+#include <asm/maar.h>
cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
@@ -157,6 +158,7 @@ asmlinkage void start_secondary(void)
mips_clockevent_init();
mp_ops->init_secondary();
cpu_report();
+ maar_init();
/*
* XXX parity protection should be folded in here when it's converted
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index f6c44dd332e2..d6d07ad56180 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -64,6 +64,9 @@ void __init prom_init_env(void)
}
if (memsize == 0)
memsize = 256;
+
+ loongson_sysconf.nr_uarts = 1;
+
pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
#else
struct boot_params *boot_p;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index a914dc1cb6d1..d8117be729a2 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
dma_flag = __GFP_DMA;
else
#endif
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 66d0f49c5bec..8770e619185e 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -44,6 +44,7 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
+#include <asm/maar.h>
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
@@ -252,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end,
#endif
}
+unsigned __weak platform_maar_init(unsigned num_pairs)
+{
+ struct maar_config cfg[BOOT_MEM_MAP_MAX];
+ unsigned i, num_configured, num_cfg = 0;
+ phys_addr_t skip;
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ switch (boot_mem_map.map[i].type) {
+ case BOOT_MEM_RAM:
+ case BOOT_MEM_INIT_RAM:
+ break;
+ default:
+ continue;
+ }
+
+ skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
+
+ cfg[num_cfg].lower = boot_mem_map.map[i].addr;
+ cfg[num_cfg].lower += skip;
+
+ cfg[num_cfg].upper = cfg[num_cfg].lower;
+ cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
+ cfg[num_cfg].upper -= skip;
+
+ cfg[num_cfg].attrs = MIPS_MAAR_S;
+ num_cfg++;
+ }
+
+ num_configured = maar_config(cfg, num_cfg, num_pairs);
+ if (num_configured < num_cfg)
+ pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
+ num_pairs, num_cfg);
+
+ return num_configured;
+}
+
+void maar_init(void)
+{
+ unsigned num_maars, used, i;
+ phys_addr_t lower, upper, attr;
+ static struct {
+ struct maar_config cfgs[3];
+ unsigned used;
+ } recorded = { { { 0 } }, 0 };
+
+ if (!cpu_has_maar)
+ return;
+
+ /* Detect the number of MAARs */
+ write_c0_maari(~0);
+ back_to_back_c0_hazard();
+ num_maars = read_c0_maari() + 1;
+
+ /* MAARs should be in pairs */
+ WARN_ON(num_maars % 2);
+
+ /* Set MAARs using values we recorded already */
+ if (recorded.used) {
+ used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
+ BUG_ON(used != recorded.used);
+ } else {
+ /* Configure the required MAARs */
+ used = platform_maar_init(num_maars / 2);
+ }
+
+ /* Disable any further MAARs */
+ for (i = (used * 2); i < num_maars; i++) {
+ write_c0_maari(i);
+ back_to_back_c0_hazard();
+ write_c0_maar(0);
+ back_to_back_c0_hazard();
+ }
+
+ if (recorded.used)
+ return;
+
+ pr_info("MAAR configuration:\n");
+ for (i = 0; i < num_maars; i += 2) {
+ write_c0_maari(i);
+ back_to_back_c0_hazard();
+ upper = read_c0_maar();
+
+ write_c0_maari(i + 1);
+ back_to_back_c0_hazard();
+ lower = read_c0_maar();
+
+ attr = lower & upper;
+ lower = (lower & MIPS_MAAR_ADDR) << 4;
+ upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
+
+ pr_info(" [%d]: ", i / 2);
+ if (!(attr & MIPS_MAAR_V)) {
+ pr_cont("disabled\n");
+ continue;
+ }
+
+ pr_cont("%pa-%pa", &lower, &upper);
+
+ if (attr & MIPS_MAAR_S)
+ pr_cont(" speculate");
+
+ pr_cont("\n");
+
+ /* Record the setup for use on secondary CPUs */
+ if (used <= ARRAY_SIZE(recorded.cfgs)) {
+ recorded.cfgs[recorded.used].lower = lower;
+ recorded.cfgs[recorded.used].upper = upper;
+ recorded.cfgs[recorded.used].attrs = attr;
+ recorded.used++;
+ }
+ }
+}
+
#ifndef CONFIG_NEED_MULTIPLE_NODES
int page_is_ram(unsigned long pagenr)
{
@@ -334,69 +448,6 @@ static inline void mem_init_free_highmem(void)
#endif
}
-unsigned __weak platform_maar_init(unsigned num_pairs)
-{
- struct maar_config cfg[BOOT_MEM_MAP_MAX];
- unsigned i, num_configured, num_cfg = 0;
- phys_addr_t skip;
-
- for (i = 0; i < boot_mem_map.nr_map; i++) {
- switch (boot_mem_map.map[i].type) {
- case BOOT_MEM_RAM:
- case BOOT_MEM_INIT_RAM:
- break;
- default:
- continue;
- }
-
- skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
-
- cfg[num_cfg].lower = boot_mem_map.map[i].addr;
- cfg[num_cfg].lower += skip;
-
- cfg[num_cfg].upper = cfg[num_cfg].lower;
- cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
- cfg[num_cfg].upper -= skip;
-
- cfg[num_cfg].attrs = MIPS_MAAR_S;
- num_cfg++;
- }
-
- num_configured = maar_config(cfg, num_cfg, num_pairs);
- if (num_configured < num_cfg)
- pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
- num_pairs, num_cfg);
-
- return num_configured;
-}
-
-static void maar_init(void)
-{
- unsigned num_maars, used, i;
-
- if (!cpu_has_maar)
- return;
-
- /* Detect the number of MAARs */
- write_c0_maari(~0);
- back_to_back_c0_hazard();
- num_maars = read_c0_maari() + 1;
-
- /* MAARs should be in pairs */
- WARN_ON(num_maars % 2);
-
- /* Configure the required MAARs */
- used = platform_maar_init(num_maars / 2);
-
- /* Disable any further MAARs */
- for (i = (used * 2); i < num_maars; i++) {
- write_c0_maari(i);
- back_to_back_c0_hazard();
- write_c0_maar(0);
- back_to_back_c0_hazard();
- }
-}
-
void __init mem_init(void)
{
#ifdef CONFIG_HIGHMEM
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
index e92726099be0..5d2e0c8d29c0 100644
--- a/arch/mips/net/bpf_jit_asm.S
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -57,15 +57,28 @@
LEAF(sk_load_word)
is_offset_negative(word)
- .globl sk_load_word_positive
-sk_load_word_positive:
+FEXPORT(sk_load_word_positive)
is_offset_in_header(4, word)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
+ .set reorder
lw $r_A, 0(t1)
+ .set noreorder
#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
wsbh t0, $r_A
rotr $r_A, t0, 16
+# else
+ sll t0, $r_A, 24
+ srl t1, $r_A, 24
+ srl t2, $r_A, 8
+ or t0, t0, t1
+ andi t2, t2, 0xff00
+ andi t1, $r_A, 0xff00
+ or t0, t0, t2
+ sll t1, t1, 8
+ or $r_A, t0, t1
+# endif
#endif
jr $r_ra
move $r_ret, zero
@@ -73,15 +86,24 @@ sk_load_word_positive:
LEAF(sk_load_half)
is_offset_negative(half)
- .globl sk_load_half_positive
-sk_load_half_positive:
+FEXPORT(sk_load_half_positive)
is_offset_in_header(2, half)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
+ .set reorder
lh $r_A, 0(t1)
+ .set noreorder
#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
wsbh t0, $r_A
seh $r_A, t0
+# else
+ sll t0, $r_A, 24
+ andi t1, $r_A, 0xff00
+ sra t0, t0, 16
+ srl t1, t1, 8
+ or $r_A, t0, t1
+# endif
#endif
jr $r_ra
move $r_ret, zero
@@ -89,8 +111,7 @@ sk_load_half_positive:
LEAF(sk_load_byte)
is_offset_negative(byte)
- .globl sk_load_byte_positive
-sk_load_byte_positive:
+FEXPORT(sk_load_byte_positive)
is_offset_in_header(1, byte)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
@@ -148,23 +169,47 @@ sk_load_byte_positive:
NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
bpf_slow_path_common(4)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
wsbh t0, $r_s0
jr $r_ra
rotr $r_A, t0, 16
-#endif
+# else
+ sll t0, $r_s0, 24
+ srl t1, $r_s0, 24
+ srl t2, $r_s0, 8
+ or t0, t0, t1
+ andi t2, t2, 0xff00
+ andi t1, $r_s0, 0xff00
+ or t0, t0, t2
+ sll t1, t1, 8
+ jr $r_ra
+ or $r_A, t0, t1
+# endif
+#else
jr $r_ra
- move $r_A, $r_s0
+ move $r_A, $r_s0
+#endif
END(bpf_slow_path_word)
NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
bpf_slow_path_common(2)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
jr $r_ra
wsbh $r_A, $r_s0
-#endif
+# else
+ sll t0, $r_s0, 8
+ andi t1, $r_s0, 0xff00
+ andi t0, t0, 0xff00
+ srl t1, t1, 8
+ jr $r_ra
+ or $r_A, t0, t1
+# endif
+#else
jr $r_ra
move $r_A, $r_s0
+#endif
END(bpf_slow_path_half)
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index c6996cf67a5c..b8a0bf5766f2 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -311,6 +311,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
void pcibios_fixup_bus(struct pci_bus *bus)
{
+ struct pci_dev *dev = bus->self;
+
+ if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_read_bridge_bases(bus);
+ }
}
EXPORT_SYMBOL(PCIBIOS_MIN_IO);
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 6edb9ee6128e..1c8dd0f5cd5d 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index deaa893efba5..3dfe2d31c67b 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -324,6 +324,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
struct pci_dev *dev;
if (bus->self) {
+ pci_read_bridge_bases(bus);
pcibios_fixup_bridge_resources(bus->self);
}
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 914864eb5a25..d63330e88379 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -61,4 +61,5 @@ generic-y += types.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index ab9f4e0ed4cf..ac1662956e0c 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += vtime.h
+generic-y += word-at-a-time.h
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 195886a583ba..827a38d7a9db 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#endif
+#define KVM_HALT_POLL_NS_DEFAULT 500000
/* These values are internal and can be increased later */
#define KVM_NR_IRQCHIPS 1
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 4d65499ee1c1..126d0c4f9b7d 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -369,3 +369,4 @@ SYSCALL_SPU(bpf)
COMPAT_SYS(execveat)
PPC64ONLY(switch_endian)
SYSCALL_SPU(userfaultfd)
+SYSCALL_SPU(membarrier)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4a055b6c2a64..13411be86041 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 365
+#define __NR_syscalls 366
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 6ad58d4c879b..6337738018aa 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -387,5 +387,6 @@
#define __NR_execveat 362
#define __NR_switch_endian 363
#define __NR_userfaultfd 364
+#define __NR_membarrier 365
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index a1d0632d97c6..7587b2ae5f77 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1032,7 +1032,13 @@ void pcibios_set_master(struct pci_dev *dev)
void pcibios_fixup_bus(struct pci_bus *bus)
{
- /* Fixup the bus */
+ /* When called from the generic PCI probe, read PCI<->PCI bridge
+ * bases. This is -not- called when generating the PCI tree from
+ * the OF device-tree.
+ */
+ pci_read_bridge_bases(bus);
+
+ /* Now fixup the bus bus */
pcibios_setup_bus_self(bus);
/* Now fixup devices on that bus */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index cf009167d208..099c79d8c160 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -829,12 +829,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
unsigned long size = kvmppc_get_gpr(vcpu, 4);
unsigned long addr = kvmppc_get_gpr(vcpu, 5);
u64 buf;
+ int srcu_idx;
int ret;
if (!is_power_of_2(size) || (size > sizeof(buf)))
return H_TOO_HARD;
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
if (ret != 0)
return H_TOO_HARD;
@@ -869,6 +872,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
unsigned long addr = kvmppc_get_gpr(vcpu, 5);
unsigned long val = kvmppc_get_gpr(vcpu, 6);
u64 buf;
+ int srcu_idx;
int ret;
switch (size) {
@@ -892,7 +896,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
return H_TOO_HARD;
}
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
if (ret != 0)
return H_TOO_HARD;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9754e6815e52..228049786888 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2692,9 +2692,13 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
(vc->vcore_state == VCORE_RUNNING ||
- vc->vcore_state == VCORE_EXITING))
+ vc->vcore_state == VCORE_EXITING ||
+ vc->vcore_state == VCORE_PIGGYBACK))
kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
+ if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
+ kvmppc_vcore_end_preempt(vc);
+
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
kvmppc_remove_runnable(vc, vcpu);
vcpu->stat.signal_exits++;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2273dcacef39..b98889e9851d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1257,6 +1257,7 @@ mc_cont:
bl kvmhv_accumulate_time
#endif
+ mr r3, r12
/* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit
nop
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 5ad26dd94d77..9043d2e1e2ae 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 6ce4a0b7e8da..8ced426091e1 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -35,6 +35,7 @@
*/
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
+#define KVM_HALT_POLL_NS_DEFAULT 0
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 92ffe397b893..a05218ff3fe4 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -13,3 +13,4 @@ generic-y += sections.h
generic-y += trace_clock.h
generic-y += xor.h
generic-y += serial.h
+generic-y += word-at-a-time.h
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index ee186e13dfe6..f102048d9c0e 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <gxio/iorpc_globals.h>
#include <gxio/iorpc_mpipe.h>
@@ -29,32 +30,6 @@
/* HACK: Avoid pointless "shadow" warnings. */
#define link link_shadow
-/**
- * strscpy - Copy a C-string into a sized buffer, but only if it fits
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- *
- * Use this routine to avoid copying too-long strings.
- * The routine returns the total number of bytes copied
- * (including the trailing NUL) or zero if the buffer wasn't
- * big enough. To ensure that programmers pay attention
- * to the return code, the destination has a single NUL
- * written at the front (if size is non-zero) when the
- * buffer is not big enough.
- */
-static size_t strscpy(char *dest, const char *src, size_t size)
-{
- size_t len = strnlen(src, size) + 1;
- if (len > size) {
- if (size)
- dest[0] = '\0';
- return 0;
- }
- memcpy(dest, src, len);
- return len;
-}
-
int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
{
char file[32];
@@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name)
if (!context)
return GXIO_ERR_NO_DEVICE;
- if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+ if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
return GXIO_ERR_NO_DEVICE;
return gxio_mpipe_info_instance_aux(context, name);
@@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
if (rv >= 0) {
- if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
+ if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
return GXIO_ERR_INVAL_MEMORY_SIZE;
memcpy(link_mac, mac.mac, sizeof(mac.mac));
}
@@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
_gxio_mpipe_link_name_t name;
int rv;
- if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+ if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
return GXIO_ERR_NO_DEVICE;
rv = gxio_mpipe_link_open_aux(context, name, flags);
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index ba35c41c71ff..0b6cacaad933 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -40,4 +40,5 @@ generic-y += termbits.h
generic-y += termios.h
generic-y += trace_clock.h
generic-y += types.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c
index f0da5a237e94..9f1e05e12255 100644
--- a/arch/tile/kernel/usb.c
+++ b/arch/tile/kernel/usb.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/usb/tilegx.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/types.h>
static u64 ehci_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 149ec55f9c46..904f3ebf4220 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -25,4 +25,5 @@ generic-y += preempt.h
generic-y += switch_to.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1fc7a286dc6f..256c45b3ae34 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -62,4 +62,5 @@ generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d3033183ed70..055a01de7c8d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1128,7 +1128,18 @@ END(error_exit)
/* Runs on exception stack */
ENTRY(nmi)
+ /*
+ * Fix up the exception frame if we're on Xen.
+ * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
+ * one value to the stack on native, so it may clobber the rdx
+ * scratch slot, but it won't clobber any of the important
+ * slots past it.
+ *
+ * Xen is a different story, because the Xen frame itself overlaps
+ * the "NMI executing" variable.
+ */
PARAVIRT_ADJUST_EXCEPTION_FRAME
+
/*
* We allow breakpoints in NMIs. If a breakpoint occurs, then
* the iretq it performs will take us out of NMI context.
@@ -1179,9 +1190,12 @@ ENTRY(nmi)
* we don't want to enable interrupts, because then we'll end
* up in an awkward situation in which IRQs are on but NMIs
* are off.
+ *
+ * We also must not push anything to the stack before switching
+ * stacks lest we corrupt the "NMI executing" variable.
*/
- SWAPGS
+ SWAPGS_UNSAFE_STACK
cld
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index e6cf2ad350d1..9727b3b48bd1 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -193,7 +193,7 @@
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
-#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
+#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 155162ea0e00..ae68be92f755 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -86,6 +86,18 @@ extern u64 asmlinkage efi_call(void *fp, ...);
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
u32 type, u64 attribute);
+#ifdef CONFIG_KASAN
+/*
+ * CONFIG_KASAN may redefine memset to __memset. __memset function is present
+ * only in kernel binary. Since the EFI stub linked into a separate binary it
+ * doesn't have __memset(). So we should use standard memset from
+ * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove.
+ */
+#undef memcpy
+#undef memset
+#undef memmove
+#endif
+
#endif /* CONFIG_X86_32 */
extern struct efi_scratch efi_scratch;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 349f80a82b82..2beee0382088 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -40,6 +40,7 @@
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index c1c0a1c14344..b8c14bb7fc8f 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -141,6 +141,8 @@
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
+#define MSR_PEBS_FRONTEND 0x000003f7
+
#define MSR_IA32_POWER_CTL 0x000001fc
#define MSR_IA32_MC0_CTL 0x00000400
@@ -331,6 +333,7 @@
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
#define MSR_K8_TSEG_ADDR 0xc0010112
+#define MSR_K8_TSEG_MASK 0xc0010113
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 655e07a48f6c..67f08230103a 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -41,6 +41,7 @@ struct pvclock_wall_clock {
#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
#define PVCLOCK_GUEST_STOPPED (1 << 1)
+/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
#define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h
index b0ae1c4dc791..217909b4d6f5 100644
--- a/arch/x86/include/uapi/asm/bitsperlong.h
+++ b/arch/x86/include/uapi/asm/bitsperlong.h
@@ -1,7 +1,7 @@
#ifndef __ASM_X86_BITSPERLONG_H
#define __ASM_X86_BITSPERLONG_H
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__ILP32__)
# define __BITS_PER_LONG 64
#else
# define __BITS_PER_LONG 32
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 381c8b9b3a33..20e242ea1bc4 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -34,11 +34,10 @@
struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv);
-static void (*hv_kexec_handler)(void);
-static void (*hv_crash_handler)(struct pt_regs *regs);
-
#if IS_ENABLED(CONFIG_HYPERV)
static void (*vmbus_handler)(void);
+static void (*hv_kexec_handler)(void);
+static void (*hv_crash_handler)(struct pt_regs *regs);
void hyperv_vector_handler(struct pt_regs *regs)
{
@@ -96,8 +95,8 @@ void hv_remove_crash_handler(void)
hv_crash_handler = NULL;
}
EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
-#endif
+#ifdef CONFIG_KEXEC_CORE
static void hv_machine_shutdown(void)
{
if (kexec_in_progress && hv_kexec_handler)
@@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
hv_crash_handler(regs);
native_machine_crash_shutdown(regs);
}
-
+#endif /* CONFIG_KEXEC_CORE */
+#endif /* CONFIG_HYPERV */
static uint32_t __init ms_hyperv_platform(void)
{
@@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void)
no_timer_check = 1;
#endif
+#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
machine_ops.shutdown = hv_machine_shutdown;
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
+#endif
mark_tsc_unstable("running on Hyper-V");
}
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 5edf6d868fc1..165be83a7fa4 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -47,6 +47,7 @@ enum extra_reg_type {
EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
EXTRA_REG_LBR = 2, /* lbr_select */
EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */
+ EXTRA_REG_FE = 4, /* fe_* */
EXTRA_REG_MAX /* number of entries needed */
};
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3fefebfbdf4b..f63360be2238 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -205,6 +205,11 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ /*
+ * Note the low 8 bits eventsel code is not a continuous field, containing
+ * some #GPing bits. These are masked out.
+ */
+ INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
EVENT_EXTRA_END
};
@@ -250,7 +255,7 @@ struct event_constraint intel_bdw_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
- INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
+ INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
EVENT_CONSTRAINT_END
};
@@ -2891,6 +2896,8 @@ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
+PMU_FORMAT_ATTR(frontend, "config1:0-23");
+
static struct attribute *intel_arch3_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
@@ -2907,6 +2914,11 @@ static struct attribute *intel_arch3_formats_attr[] = {
NULL,
};
+static struct attribute *skl_format_attr[] = {
+ &format_attr_frontend.attr,
+ NULL,
+};
+
static __initconst const struct x86_pmu core_pmu = {
.name = "core",
.handle_irq = x86_pmu_handle_irq,
@@ -3516,7 +3528,8 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
- x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
+ skl_format_attr);
WARN_ON(!x86_pmu.format_attrs);
x86_pmu.cpu_events = hsw_events_attrs;
pr_cont("Skylake events, ");
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index 086b12eae794..f32ac13934f2 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -10,12 +10,12 @@ enum perf_msr_id {
PERF_MSR_EVENT_MAX,
};
-bool test_aperfmperf(int idx)
+static bool test_aperfmperf(int idx)
{
return boot_cpu_has(X86_FEATURE_APERFMPERF);
}
-bool test_intel(int idx)
+static bool test_intel(int idx)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
boot_cpu_data.x86 != 6)
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 3d423a101fae..608fb26c7254 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
{ X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
{ X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
- { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 },
+ { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
{ X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
{ X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
{ X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index e068d6683dba..74ca2fe7a0b3 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
}
#ifdef CONFIG_KEXEC_FILE
-static int get_nr_ram_ranges_callback(unsigned long start_pfn,
- unsigned long nr_pfn, void *arg)
+static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
{
- int *nr_ranges = arg;
+ unsigned int *nr_ranges = arg;
(*nr_ranges)++;
return 0;
@@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
ced->image = image;
- walk_system_ram_range(0, -1, &nr_ranges,
+ walk_system_ram_res(0, -1, &nr_ranges,
get_nr_ram_ranges_callback);
ced->max_nr_ranges = nr_ranges;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index f68e48f5f6c2..c2130aef3f9d 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -41,10 +41,18 @@
#include <asm/timer.h>
#include <asm/special_insns.h>
-/* nop stub */
-void _paravirt_nop(void)
-{
-}
+/*
+ * nop stub, which must not clobber anything *including the stack* to
+ * avoid confusing the entry prologues.
+ */
+extern void _paravirt_nop(void);
+asm (".pushsection .entry.text, \"ax\"\n"
+ ".global _paravirt_nop\n"
+ "_paravirt_nop:\n\t"
+ "ret\n\t"
+ ".size _paravirt_nop, . - _paravirt_nop\n\t"
+ ".type _paravirt_nop, @function\n\t"
+ ".popsection");
/* identity function, which can be inlined */
u32 _paravirt_ident_32(u32 x)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6d0e62ae8516..39e585a554b7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
}
+/*
+ * Called from fs/proc with a reference on @p to find the function
+ * which called into schedule(). This needs to be done carefully
+ * because the task might wake up and we might look at a stack
+ * changing under us.
+ */
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long start, bottom, top, sp, fp, ip;
+ int count = 0;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ start = (unsigned long)task_stack_page(p);
+ if (!start)
+ return 0;
+
+ /*
+ * Layout of the stack page:
+ *
+ * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
+ * PADDING
+ * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+ * stack
+ * ----------- bottom = start + sizeof(thread_info)
+ * thread_info
+ * ----------- start
+ *
+ * The tasks stack pointer points at the location where the
+ * framepointer is stored. The data on the stack is:
+ * ... IP FP ... IP FP
+ *
+ * We need to read FP and IP, so we need to adjust the upper
+ * bound by another unsigned long.
+ */
+ top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
+ top -= 2 * sizeof(unsigned long);
+ bottom = start + sizeof(struct thread_info);
+
+ sp = READ_ONCE(p->thread.sp);
+ if (sp < bottom || sp > top)
+ return 0;
+
+ fp = READ_ONCE(*(unsigned long *)sp);
+ do {
+ if (fp < bottom || fp > top)
+ return 0;
+ ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
+ if (!in_sched_functions(ip))
+ return ip;
+ fp = READ_ONCE(*(unsigned long *)fp);
+ } while (count++ < 16 && p->state != TASK_RUNNING);
+ return 0;
+}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index c13df2c735f8..737527b40e5b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
return prev_p;
}
-
-#define top_esp (THREAD_SIZE - sizeof(unsigned long))
-#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
-
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long bp, sp, ip;
- unsigned long stack_page;
- int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
- stack_page = (unsigned long)task_stack_page(p);
- sp = p->thread.sp;
- if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
- return 0;
- /* include/asm-i386/system.h:switch_to() pushes bp last. */
- bp = *(unsigned long *) sp;
- do {
- if (bp < stack_page || bp > top_ebp+stack_page)
- return 0;
- ip = *(unsigned long *) (bp+4);
- if (!in_sched_functions(ip))
- return ip;
- bp = *(unsigned long *) bp;
- } while (count++ < 16);
- return 0;
-}
-
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3c1bbcf12924..b35921a670b2 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -499,30 +499,6 @@ void set_personality_ia32(bool x32)
}
EXPORT_SYMBOL_GPL(set_personality_ia32);
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long stack;
- u64 fp, ip;
- int count = 0;
-
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
- stack = (unsigned long)task_stack_page(p);
- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
- return 0;
- fp = *(u64 *)(p->thread.sp);
- do {
- if (fp < (unsigned long)stack ||
- fp >= (unsigned long)stack+THREAD_SIZE)
- return 0;
- ip = *(u64 *)(fp+8);
- if (!in_sched_functions(ip))
- return ip;
- fp = *(u64 *)fp;
- } while (count++ < 16);
- return 0;
-}
-
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
{
int ret = 0;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 69088a1ba509..ff606f507913 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3322,7 +3322,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
break;
reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
- leaf);
+ iterator.level);
}
walk_shadow_page_lockless_end(vcpu);
@@ -3614,7 +3614,7 @@ static void
__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct rsvd_bits_validate *rsvd_check,
int maxphyaddr, int level, bool nx, bool gbpages,
- bool pse)
+ bool pse, bool amd)
{
u64 exb_bit_rsvd = 0;
u64 gbpages_bit_rsvd = 0;
@@ -3631,7 +3631,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
* Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
* leaf entries) on AMD CPUs only.
*/
- if (guest_cpuid_is_amd(vcpu))
+ if (amd)
nonleaf_bit8_rsvd = rsvd_bits(8, 8);
switch (level) {
@@ -3699,7 +3699,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
cpuid_maxphyaddr(vcpu), context->root_level,
context->nx, guest_cpuid_has_gbpages(vcpu),
- is_pse(vcpu));
+ is_pse(vcpu), guest_cpuid_is_amd(vcpu));
}
static void
@@ -3749,13 +3749,24 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
+ /*
+ * Passing "true" to the last argument is okay; it adds a check
+ * on bit 8 of the SPTEs which KVM doesn't use anyway.
+ */
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, context->nx,
- guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
+ guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
+ true);
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
+static inline bool boot_cpu_is_amd(void)
+{
+ WARN_ON_ONCE(!tdp_enabled);
+ return shadow_x_mask == 0;
+}
+
/*
* the direct page table on host, use as much mmu features as
* possible, however, kvm currently does not do execution-protection.
@@ -3764,11 +3775,11 @@ static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
- if (guest_cpuid_is_amd(vcpu))
+ if (boot_cpu_is_amd())
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, false,
- cpu_has_gbpages, true);
+ cpu_has_gbpages, true, true);
else
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fdb8cb63a6c0..2f9ed1ff0632 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -202,6 +202,7 @@ module_param(npt, int, S_IRUGO);
static int nested = true;
module_param(nested, int, S_IRUGO);
+static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -513,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
if (svm->vmcb->control.next_rip != 0) {
- WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
+ WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
svm->next_rip = svm->vmcb->control.next_rip;
}
@@ -865,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}
-#define MTRR_TYPE_UC_MINUS 7
-#define MTRR2PROTVAL_INVALID 0xff
-
-static u8 mtrr2protval[8];
-
-static u8 fallback_mtrr_type(int mtrr)
-{
- /*
- * WT and WP aren't always available in the host PAT. Treat
- * them as UC and UC- respectively. Everything else should be
- * there.
- */
- switch (mtrr)
- {
- case MTRR_TYPE_WRTHROUGH:
- return MTRR_TYPE_UNCACHABLE;
- case MTRR_TYPE_WRPROT:
- return MTRR_TYPE_UC_MINUS;
- default:
- BUG();
- }
-}
-
-static void build_mtrr2protval(void)
-{
- int i;
- u64 pat;
-
- for (i = 0; i < 8; i++)
- mtrr2protval[i] = MTRR2PROTVAL_INVALID;
-
- /* Ignore the invalid MTRR types. */
- mtrr2protval[2] = 0;
- mtrr2protval[3] = 0;
-
- /*
- * Use host PAT value to figure out the mapping from guest MTRR
- * values to nested page table PAT/PCD/PWT values. We do not
- * want to change the host PAT value every time we enter the
- * guest.
- */
- rdmsrl(MSR_IA32_CR_PAT, pat);
- for (i = 0; i < 8; i++) {
- u8 mtrr = pat >> (8 * i);
-
- if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
- mtrr2protval[mtrr] = __cm_idx2pte(i);
- }
-
- for (i = 0; i < 8; i++) {
- if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
- u8 fallback = fallback_mtrr_type(i);
- mtrr2protval[i] = mtrr2protval[fallback];
- BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
- }
- }
-}
-
static __init int svm_hardware_setup(void)
{
int cpu;
@@ -989,7 +932,6 @@ static __init int svm_hardware_setup(void)
} else
kvm_disable_tdp();
- build_mtrr2protval();
return 0;
err:
@@ -1144,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
return target_tsc - tsc;
}
-static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
-{
- struct kvm_vcpu *vcpu = &svm->vcpu;
-
- /* Unlike Intel, AMD takes the guest's CR0.CD into account.
- *
- * AMD does not have IPAT. To emulate it for the case of guests
- * with no assigned devices, just set everything to WB. If guests
- * have assigned devices, however, we cannot force WB for RAM
- * pages only, so use the guest PAT directly.
- */
- if (!kvm_arch_has_assigned_device(vcpu->kvm))
- *g_pat = 0x0606060606060606;
- else
- *g_pat = vcpu->arch.pat;
-}
-
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
- u8 mtrr;
-
- /*
- * 1. MMIO: trust guest MTRR, so same as item 3.
- * 2. No passthrough: always map as WB, and force guest PAT to WB as well
- * 3. Passthrough: can't guarantee the result, try to trust guest.
- */
- if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
- return 0;
-
- if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) &&
- kvm_read_cr0(vcpu) & X86_CR0_CD)
- return _PAGE_NOCACHE;
-
- mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
- return mtrr2protval[mtrr];
-}
-
static void init_vmcb(struct vcpu_svm *svm, bool init_event)
{
struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1263,7 +1168,8 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
* It also updates the guest-visible cr0 value.
*/
- (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
+ svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
+ kvm_mmu_reset_context(&svm->vcpu);
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
@@ -1276,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = svm->vcpu.arch.pat;
- svm_set_guest_pat(svm, &save->g_pat);
save->cr3 = 0;
save->cr4 = 0;
}
@@ -1671,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (!vcpu->fpu_active)
cr0 |= X86_CR0_TS;
-
- /* These are emulated via page tables. */
- cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
-
+ /*
+ * re-enable caching here because the QEMU bios
+ * does not do it - this results in some delay at
+ * reboot
+ */
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+ cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
mark_dirty(svm->vmcb, VMCB_CR);
update_cr0_intercept(svm);
@@ -3349,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
- case MSR_IA32_CR_PAT:
- if (npt_enabled) {
- if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
- return 1;
- vcpu->arch.pat = data;
- svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
- mark_dirty(svm->vmcb, VMCB_NPT);
- break;
- }
- /* fall through */
default:
return kvm_set_msr_common(vcpu, msr);
}
@@ -4193,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void)
return true;
}
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+ return 0;
+}
+
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64076740251e..06ef4908ba61 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8617,17 +8617,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
u64 ipat = 0;
/* For VT-d and EPT combination
- * 1. MMIO: guest may want to apply WC, trust it.
+ * 1. MMIO: always map as UC
* 2. EPT with VT-d:
* a. VT-d without snooping control feature: can't guarantee the
- * result, try to trust guest. So the same as item 1.
+ * result, try to trust guest.
* b. VT-d with snooping control feature: snooping control feature of
* VT-d engine can guarantee the cache correctness. Just set it
* to WB to keep consistent with host. So the same as item 3.
* 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
* consistent with host MTRR
*/
- if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+ if (is_mmio) {
+ cache = MTRR_TYPE_UNCACHABLE;
+ goto exit;
+ }
+
+ if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
ipat = VMX_EPT_IPAT_BIT;
cache = MTRR_TYPE_WRBACK;
goto exit;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6bbb0dfb99d0..92511d4b7236 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1708,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->pvclock_set_guest_stopped_request = false;
}
- pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO;
-
/* If the host uses TSC clocksource, then it is stable */
if (use_master_clock)
pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
@@ -2007,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
&vcpu->requests);
ka->boot_vcpu_runs_old_kvmclock = tmp;
-
- ka->kvmclock_offset = -get_kernel_ns();
}
vcpu->arch.time = data;
@@ -2190,6 +2186,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
case MSR_K8_SYSCFG:
+ case MSR_K8_TSEG_ADDR:
+ case MSR_K8_TSEG_MASK:
case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
case MSR_K8_INT_PENDING_MSG:
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 30564e2752d3..df48430c279b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
* has been zapped already via cleanup_highmem().
*/
all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
- set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+ set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
rodata_test();
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 09d3afc0a181..dc78a4a9a466 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -166,6 +166,7 @@ void pcibios_fixup_bus(struct pci_bus *b)
{
struct pci_dev *dev;
+ pci_read_bridge_bases(b);
list_for_each_entry(dev, &b->devices, bus_list)
pcibios_fixup_device_resources(dev);
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 1db84c0758b7..6a28ded74211 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -705,6 +705,70 @@ out:
}
/*
+ * Iterate the EFI memory map in reverse order because the regions
+ * will be mapped top-down. The end result is the same as if we had
+ * mapped things forward, but doesn't require us to change the
+ * existing implementation of efi_map_region().
+ */
+static inline void *efi_map_next_entry_reverse(void *entry)
+{
+ /* Initial call */
+ if (!entry)
+ return memmap.map_end - memmap.desc_size;
+
+ entry -= memmap.desc_size;
+ if (entry < memmap.map)
+ return NULL;
+
+ return entry;
+}
+
+/*
+ * efi_map_next_entry - Return the next EFI memory map descriptor
+ * @entry: Previous EFI memory map descriptor
+ *
+ * This is a helper function to iterate over the EFI memory map, which
+ * we do in different orders depending on the current configuration.
+ *
+ * To begin traversing the memory map @entry must be %NULL.
+ *
+ * Returns %NULL when we reach the end of the memory map.
+ */
+static void *efi_map_next_entry(void *entry)
+{
+ if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
+ /*
+ * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
+ * config table feature requires us to map all entries
+ * in the same order as they appear in the EFI memory
+ * map. That is to say, entry N must have a lower
+ * virtual address than entry N+1. This is because the
+ * firmware toolchain leaves relative references in
+ * the code/data sections, which are split and become
+ * separate EFI memory regions. Mapping things
+ * out-of-order leads to the firmware accessing
+ * unmapped addresses.
+ *
+ * Since we need to map things this way whether or not
+ * the kernel actually makes use of
+ * EFI_PROPERTIES_TABLE, let's just switch to this
+ * scheme by default for 64-bit.
+ */
+ return efi_map_next_entry_reverse(entry);
+ }
+
+ /* Initial call */
+ if (!entry)
+ return memmap.map;
+
+ entry += memmap.desc_size;
+ if (entry >= memmap.map_end)
+ return NULL;
+
+ return entry;
+}
+
+/*
* Map the efi memory ranges of the runtime services and update new_mmap with
* virtual addresses.
*/
@@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
unsigned long left = 0;
efi_memory_desc_t *md;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ p = NULL;
+ while ((p = efi_map_next_entry(p))) {
md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
#ifdef CONFIG_X86_64
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 63c223dff5f1..b56855a1382a 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -28,4 +28,5 @@ generic-y += statfs.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index d27b4dcf221f..b848cc3dc913 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -210,6 +210,10 @@ subsys_initcall(pcibios_init);
void pcibios_fixup_bus(struct pci_bus *bus)
{
+ if (bus->parent) {
+ /* This is a subordinate bridge */
+ pci_read_bridge_bases(bus);
+ }
}
void pcibios_set_master(struct pci_dev *dev)
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 1e28ddb656b8..8764c241e5bb 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu)
return cpu;
}
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
+int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+ const struct cpumask *online_mask)
{
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
cpumask_var_t cpus;
@@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
cpumask_clear(cpus);
nr_cpus = nr_uniq_cpus = 0;
- for_each_online_cpu(i) {
+ for_each_cpu(i, online_mask) {
nr_cpus++;
first_sibling = get_first_sibling(i);
if (!cpumask_test_cpu(first_sibling, cpus))
@@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
queue = 0;
for_each_possible_cpu(i) {
- if (!cpu_online(i)) {
+ if (!cpumask_test_cpu(i, online_mask)) {
map[i] = 0;
continue;
}
@@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
if (!map)
return NULL;
- if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
+ if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
return map;
kfree(map);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 279c5d674edf..788fffd9b409 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
unsigned int i, first = 1;
ssize_t ret = 0;
- blk_mq_disable_hotplug();
-
for_each_cpu(i, hctx->cpumask) {
if (first)
ret += sprintf(ret + page, "%u", i);
@@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
first = 0;
}
- blk_mq_enable_hotplug();
-
ret += sprintf(ret + page, "\n");
return ret;
}
@@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
struct blk_mq_ctx *ctx;
int i;
- if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+ if (!hctx->nr_ctx)
return;
hctx_for_each_ctx(hctx, ctx, i)
@@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
struct blk_mq_ctx *ctx;
int i, ret;
- if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+ if (!hctx->nr_ctx)
return 0;
ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
@@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk)
struct blk_mq_ctx *ctx;
int i, j;
+ blk_mq_disable_hotplug();
+
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_unregister_hctx(hctx);
@@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk)
kobject_put(&q->mq_kobj);
kobject_put(&disk_to_dev(disk)->kobj);
+
+ q->mq_sysfs_init_done = false;
+ blk_mq_enable_hotplug();
}
static void blk_mq_sysfs_init(struct request_queue *q)
@@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk)
struct blk_mq_hw_ctx *hctx;
int ret, i;
+ blk_mq_disable_hotplug();
+
blk_mq_sysfs_init(q);
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
if (ret < 0)
- return ret;
+ goto out;
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
queue_for_each_hw_ctx(q, hctx, i) {
- hctx->flags |= BLK_MQ_F_SYSFS_UP;
ret = blk_mq_register_hctx(hctx);
if (ret)
break;
}
- if (ret) {
+ if (ret)
blk_mq_unregister_disk(disk);
- return ret;
- }
+ else
+ q->mq_sysfs_init_done = true;
+out:
+ blk_mq_enable_hotplug();
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(blk_mq_register_disk);
@@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i;
+ if (!q->mq_sysfs_init_done)
+ return;
+
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
}
@@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i, ret = 0;
+ if (!q->mq_sysfs_init_done)
+ return ret;
+
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 9115c6d59948..ed96474d75cb 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
}
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
{
- struct blk_mq_tags *tags = hctx->tags;
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ struct blk_mq_tags *tags = hctx->tags;
+
+ /*
+ * If not software queues are currently mapped to this
+ * hardware queue, there's nothing to check
+ */
+ if (!blk_mq_hw_queue_mapped(hctx))
+ continue;
+
+ if (tags->nr_reserved_tags)
+ bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
+ bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
+ false);
+ }
- if (tags->nr_reserved_tags)
- bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
- bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
- false);
}
-EXPORT_SYMBOL(blk_mq_tag_busy_iter);
static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
{
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 9eb2cf4f01cb..d468a79f2c4a 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+ void *priv);
enum {
BLK_MQ_TAG_CACHE_MIN = 1,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2d67b4047a0..7785ae96267a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq)
* Ends all I/O on a request. It does not handle partial completions.
* The actual completion happens out-of-order, through a IPI handler.
**/
-void blk_mq_complete_request(struct request *rq)
+void blk_mq_complete_request(struct request *rq, int error)
{
struct request_queue *q = rq->q;
if (unlikely(blk_should_fake_timeout(q)))
return;
- if (!blk_mark_rq_complete(rq))
+ if (!blk_mark_rq_complete(rq)) {
+ rq->errors = error;
__blk_mq_complete_request(rq);
+ }
}
EXPORT_SYMBOL(blk_mq_complete_request);
@@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
* If a request wasn't started before the queue was
* marked dying, kill it here or it'll go unnoticed.
*/
- if (unlikely(blk_queue_dying(rq->q))) {
- rq->errors = -EIO;
- blk_mq_complete_request(rq);
- }
+ if (unlikely(blk_queue_dying(rq->q)))
+ blk_mq_complete_request(rq, -EIO);
return;
}
if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv)
.next = 0,
.next_set = 0,
};
- struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i) {
- /*
- * If not software queues are currently mapped to this
- * hardware queue, there's nothing to check
- */
- if (!blk_mq_hw_queue_mapped(hctx))
- continue;
-
- blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
- }
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
if (data.next_set) {
data.next = blk_rq_timeout(round_jiffies_up(data.next));
mod_timer(&q->timeout, data.next);
} else {
+ struct blk_mq_hw_ctx *hctx;
+
queue_for_each_hw_ctx(q, hctx, i) {
/* the hctx may be unmapped, so check it here */
if (blk_mq_hw_queue_mapped(hctx))
@@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
}
}
-static void blk_mq_map_swqueue(struct request_queue *q)
+static void blk_mq_map_swqueue(struct request_queue *q,
+ const struct cpumask *online_mask)
{
unsigned int i;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
+ /*
+ * Avoid others reading imcomplete hctx->cpumask through sysfs
+ */
+ mutex_lock(&q->sysfs_lock);
+
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
hctx->nr_ctx = 0;
@@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q)
*/
queue_for_each_ctx(q, ctx, i) {
/* If the cpu isn't online, the cpu is mapped to first hctx */
- if (!cpu_online(i))
+ if (!cpumask_test_cpu(i, online_mask))
continue;
hctx = q->mq_ops->map_queue(q, i);
cpumask_set_cpu(i, hctx->cpumask);
- cpumask_set_cpu(i, hctx->tags->cpumask);
ctx->index_hw = hctx->nr_ctx;
hctx->ctxs[hctx->nr_ctx++] = ctx;
}
+ mutex_unlock(&q->sysfs_lock);
+
queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_ctxmap *map = &hctx->ctx_map;
@@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx->next_cpu = cpumask_first(hctx->cpumask);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
+
+ queue_for_each_ctx(q, ctx, i) {
+ if (!cpumask_test_cpu(i, online_mask))
+ continue;
+
+ hctx = q->mq_ops->map_queue(q, i);
+ cpumask_set_cpu(i, hctx->tags->cpumask);
+ }
}
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
@@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q)
kfree(hctx);
}
+ kfree(q->mq_map);
+ q->mq_map = NULL;
+
kfree(q->queue_hw_ctx);
/* ctx kobj stays in queue_ctx */
@@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (blk_mq_init_hw_queues(q, set))
goto err_hctxs;
+ get_online_cpus();
mutex_lock(&all_q_mutex);
- list_add_tail(&q->all_q_node, &all_q_list);
- mutex_unlock(&all_q_mutex);
+ list_add_tail(&q->all_q_node, &all_q_list);
blk_mq_add_queue_tag_set(set, q);
+ blk_mq_map_swqueue(q, cpu_online_mask);
- blk_mq_map_swqueue(q);
+ mutex_unlock(&all_q_mutex);
+ put_online_cpus();
return q;
@@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
+ mutex_lock(&all_q_mutex);
+ list_del_init(&q->all_q_node);
+ mutex_unlock(&all_q_mutex);
+
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
blk_mq_free_hw_queues(q, set);
percpu_ref_exit(&q->mq_usage_counter);
-
- kfree(q->mq_map);
-
- q->mq_map = NULL;
-
- mutex_lock(&all_q_mutex);
- list_del_init(&q->all_q_node);
- mutex_unlock(&all_q_mutex);
}
/* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
+static void blk_mq_queue_reinit(struct request_queue *q,
+ const struct cpumask *online_mask)
{
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
blk_mq_sysfs_unregister(q);
- blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+ blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
/*
* redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
@@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q)
* involves free and re-allocate memory, worthy doing?)
*/
- blk_mq_map_swqueue(q);
+ blk_mq_map_swqueue(q, online_mask);
blk_mq_sysfs_register(q);
}
@@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
unsigned long action, void *hcpu)
{
struct request_queue *q;
+ int cpu = (unsigned long)hcpu;
+ /*
+ * New online cpumask which is going to be set in this hotplug event.
+ * Declare this cpumasks as global as cpu-hotplug operation is invoked
+ * one-by-one and dynamically allocating this could result in a failure.
+ */
+ static struct cpumask online_new;
/*
- * Before new mappings are established, hotadded cpu might already
- * start handling requests. This doesn't break anything as we map
- * offline CPUs to first hardware queue. We will re-init the queue
- * below to get optimal settings.
+ * Before hotadded cpu starts handling requests, new mappings must
+ * be established. Otherwise, these requests in hw queue might
+ * never be dispatched.
+ *
+ * For example, there is a single hw queue (hctx) and two CPU queues
+ * (ctx0 for CPU0, and ctx1 for CPU1).
+ *
+ * Now CPU1 is just onlined and a request is inserted into
+ * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
+ * still zero.
+ *
+ * And then while running hw queue, flush_busy_ctxs() finds bit0 is
+ * set in pending bitmap and tries to retrieve requests in
+ * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0,
+ * so the request in ctx1->rq_list is ignored.
*/
- if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
- action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ cpumask_copy(&online_new, cpu_online_mask);
+ break;
+ case CPU_UP_PREPARE:
+ cpumask_copy(&online_new, cpu_online_mask);
+ cpumask_set_cpu(cpu, &online_new);
+ break;
+ default:
return NOTIFY_OK;
+ }
mutex_lock(&all_q_mutex);
@@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
}
list_for_each_entry(q, &all_q_list, all_q_node)
- blk_mq_queue_reinit(q);
+ blk_mq_queue_reinit(q, &online_new);
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_unfreeze_queue(q);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 6a48c4c0d8a2..f4fea7964910 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void);
* CPU -> queue mappings
*/
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
+extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+ const struct cpumask *online_mask);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
/*
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 6d88dd15c98d..197096632412 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
srlen = cert->raw_serial_size;
q = cert->raw_serial;
}
- if (srlen > 1 && *q == 0) {
- srlen--;
- q++;
- }
ret = -ENOMEM;
desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 2614a839c60d..42c66b64c12c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
goto err_exit;
mutex_lock(&ec->mutex);
+ result = -ENODATA;
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
+ result = 0;
q->handler = acpi_ec_get_query_handler(handler);
ec_dbg_evt("Query(0x%02x) scheduled",
q->handler->query_bit);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 6da0f9beab19..c9336751e5e3 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev)
/* Interrupt Line values above 0xF are forbidden */
if (dev->irq > 0 && (dev->irq <= 0xF) &&
+ acpi_isa_irq_available(dev->irq) &&
(acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
pin_name(dev->pin), dev->irq);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 3b4ea98e3ea0..7c8408b946ca 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void)
PIRQ_PENALTY_PCI_POSSIBLE;
}
}
- /* Add a penalty for the SCI */
- acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
+
return 0;
}
@@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
irq = link->irq.possible[i];
}
}
+ if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+ printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
+ "Try pci=noacpi or acpi=off\n",
+ acpi_device_name(link->device),
+ acpi_device_bid(link->device));
+ return -ENODEV;
+ }
/* Attempt to enable the link device at this IRQ. */
if (acpi_pci_link_set(link, irq)) {
@@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active)
}
}
+bool acpi_isa_irq_available(int irq)
+{
+ return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+ acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
+}
+
/*
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
* PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index a8da3a50e374..0f5cb37636bc 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1578,9 +1578,7 @@ he_stop(struct he_dev *he_dev)
kfree(he_dev->rbpl_virt);
kfree(he_dev->rbpl_table);
-
- if (he_dev->rbpl_pool)
- dma_pool_destroy(he_dev->rbpl_pool);
+ dma_pool_destroy(he_dev->rbpl_pool);
if (he_dev->rbrq_base)
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
@@ -1594,8 +1592,7 @@ he_stop(struct he_dev *he_dev)
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
he_dev->tpdrq_base, he_dev->tpdrq_phys);
- if (he_dev->tpd_pool)
- dma_pool_destroy(he_dev->tpd_pool);
+ dma_pool_destroy(he_dev->tpd_pool);
if (he_dev->pci_dev) {
pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 74e18b0a6d89..3d7fb6516f74 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -805,7 +805,12 @@ static void solos_bh(unsigned long card_arg)
continue;
}
- skb = alloc_skb(size + 1, GFP_ATOMIC);
+ /* Use netdev_alloc_skb() because it adds NET_SKB_PAD of
+ * headroom, and ensures we can route packets back out an
+ * Ethernet interface (for example) without having to
+ * reallocate. Adding NET_IP_ALIGN also ensures that both
+ * PPPoATM and PPPoEoBR2684 packets end up aligned. */
+ skb = netdev_alloc_skb_ip_align(NULL, size + 1);
if (!skb) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n");
@@ -869,7 +874,10 @@ static void solos_bh(unsigned long card_arg)
/* Allocate RX skbs for any ports which need them */
if (card->using_dma && card->atmdev[port] &&
!card->rx_skb[port]) {
- struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC);
+ /* Unlike the MMIO case (qv) we can't add NET_IP_ALIGN
+ * here; the FPGA can only DMA to addresses which are
+ * aligned to 4 bytes. */
+ struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE);
if (skb) {
SKB_CB(skb)->dma_addr =
dma_map_single(&card->dev->dev, skb->data,
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 764280a91776..e9fd32e91668 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
if (sibling == cpu) /* skip itself */
continue;
+
sib_cpu_ci = get_cpu_cacheinfo(sibling);
+ if (!sib_cpu_ci->info_list)
+ continue;
+
sib_leaf = sib_cpu_ci->info_list + index;
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
static void free_cache_attributes(unsigned int cpu)
{
+ if (!per_cpu_cacheinfo(cpu))
+ return;
+
cache_shared_cpu_map_remove(cpu);
kfree(per_cpu_cacheinfo(cpu));
@@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DEAD:
cache_remove_dev(cpu);
- if (per_cpu_cacheinfo(cpu))
- free_cache_attributes(cpu);
+ free_cache_attributes(cpu);
break;
}
return notifier_from_errno(rc);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 28cd75c535b0..7ae7cd990fbf 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
u32 microvolt[3] = {0};
int count, ret;
- count = of_property_count_u32_elems(opp->np, "opp-microvolt");
- if (!count)
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!of_find_property(opp->np, "opp-microvolt", NULL))
return 0;
+ count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+ if (count < 0) {
+ dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
+ __func__, count);
+ return count;
+ }
+
/* There can be one or three elements here */
if (count != 1 && count != 3) {
dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
@@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
* share a common logic which is isolated here.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*
* Locking: The internal device_opp and opp structures are RCU protected.
@@ -1151,7 +1158,7 @@ unlock:
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*/
int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*/
int dev_pm_opp_disable(struct device *dev, unsigned long freq)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f9889b6bc02c..674f800a3b57 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
{
const bool write = cmd->rq->cmd_flags & REQ_WRITE;
struct loop_device *lo = cmd->rq->q->queuedata;
- int ret = -EIO;
+ int ret = 0;
- if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+ if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+ ret = -EIO;
goto failed;
+ }
ret = do_req_filebacked(lo, cmd->rq);
-
failed:
- if (ret)
- cmd->rq->errors = -EIO;
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
}
static void loop_queue_write_work(struct work_struct *work)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a295b98c6bae..1c9e4fe5aa44 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
case NULL_IRQ_SOFTIRQ:
switch (queue_mode) {
case NULL_Q_MQ:
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(cmd->rq, cmd->rq->errors);
break;
case NULL_Q_RQ:
blk_complete_request(cmd->rq);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b97fc3fe0916..6f04771f1019 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
spin_unlock_irqrestore(req->q->queue_lock, flags);
return;
}
+
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
if (cmd_rq->ctx == CMD_CTX_CANCELLED)
- req->errors = -EINTR;
- else
- req->errors = status;
+ status = -EINTR;
} else {
- req->errors = nvme_error_status(status);
+ status = nvme_error_status(status);
}
- } else
- req->errors = 0;
+ }
+
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
u32 result = le32_to_cpup(&cqe->result);
req->special = (void *)(uintptr_t)result;
@@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
}
nvme_free_iod(nvmeq->dev, iod);
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, status);
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ns && ns->ms && !blk_integrity_rq(req)) {
if (!(ns->pi_type && ns->ms == 8) &&
req->cmd_type != REQ_TYPE_DRV_PRIV) {
- req->errors = -EFAULT;
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, -EFAULT);
return BLK_MQ_RQ_QUEUE_OK;
}
}
@@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
list_sort(NULL, &dev->namespaces, ns_cmp);
}
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+ struct nvme_queue *nvmeq;
+ int i;
+
+ for (i = 0; i < dev->online_queues; i++) {
+ nvmeq = dev->queues[i];
+
+ if (!nvmeq->tags || !(*nvmeq->tags))
+ continue;
+
+ irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+ blk_mq_tags_cpumask(*nvmeq->tags));
+ }
+}
+
static void nvme_dev_scan(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work)
return;
nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
kfree(ctrl);
+ nvme_set_irq_hints(dev);
}
/*
@@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = {
.compat_ioctl = nvme_dev_ioctl,
};
-static void nvme_set_irq_hints(struct nvme_dev *dev)
-{
- struct nvme_queue *nvmeq;
- int i;
-
- for (i = 0; i < dev->online_queues; i++) {
- nvmeq = dev->queues[i];
-
- if (!nvmeq->tags || !(*nvmeq->tags))
- continue;
-
- irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
- blk_mq_tags_cpumask(*nvmeq->tags));
- }
-}
-
static int nvme_dev_start(struct nvme_dev *dev)
{
int result;
@@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev)
if (result)
goto free_tags;
- nvme_set_irq_hints(dev);
-
dev->event_limit = 1;
return result;
@@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
} else {
nvme_unfreeze_queues(dev);
nvme_dev_add(dev);
- nvme_set_irq_hints(dev);
}
return 0;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e93899cc6f60..6ca35495a5be 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
- blk_mq_complete_request(vbr->req);
+ blk_mq_complete_request(vbr->req, vbr->req->errors);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index deb3f001791f..767657565de6 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
+ struct pending_req *req, *n;
+ int i = 0, j;
+
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
wake_up(&blkif->shutdown_wq);
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
/* Remove all persistent grants and the cache of ballooned pages. */
xen_blkbk_free_caches(blkif);
+ /* Check that there is no request in use */
+ list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
+ list_del(&req->free_list);
+
+ for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
+ kfree(req->segments[j]);
+
+ for (j = 0; j < MAX_INDIRECT_PAGES; j++)
+ kfree(req->indirect_pages[j]);
+
+ kfree(req);
+ i++;
+ }
+
+ WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+ blkif->nr_ring_pages = 0;
+
return 0;
}
static void xen_blkif_free(struct xen_blkif *blkif)
{
- struct pending_req *req, *n;
- int i = 0, j;
xen_blkif_disconnect(blkif);
xen_vbd_free(&blkif->vbd);
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
BUG_ON(!list_empty(&blkif->free_pages));
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
- /* Check that there is no request in use */
- list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
- list_del(&req->free_list);
-
- for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
- kfree(req->segments[j]);
-
- for (j = 0; j < MAX_INDIRECT_PAGES; j++)
- kfree(req->indirect_pages[j]);
-
- kfree(req);
- i++;
- }
-
- WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-
kmem_cache_free(xen_blkif_cachep, blkif);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0823a96902f8..611170896b8c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
+ int error;
spin_lock_irqsave(&info->io_lock, flags);
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
continue;
}
- req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+ error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- req->errors = -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
}
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, error);
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- req->errors = -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- req->errors = -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
}
- if (unlikely(req->errors)) {
- if (req->errors == -EOPNOTSUPP)
- req->errors = 0;
+ if (unlikely(error)) {
+ if (error == -EOPNOTSUPP)
+ error = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, error);
break;
default:
BUG();
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index c37cf754a985..3c77645405e5 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -344,11 +344,12 @@ static int xgene_rng_probe(struct platform_device *pdev)
if (IS_ERR(ctx->csr_base))
return PTR_ERR(ctx->csr_base);
- ctx->irq = platform_get_irq(pdev, 0);
- if (ctx->irq < 0) {
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
- return ctx->irq;
+ return rc;
}
+ ctx->irq = rc;
dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
ctx->csr_base, ctx->irq);
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index bb2c2b050964..d3c1742ded1a 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np)
bc_timer.freq = clk_get_rate(timer_clk);
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
return;
}
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index edacf3902e10..1cea08cf603e 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np)
int irq, error;
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
pr_err("%s: failed to map interrupts\n", __func__);
return;
}
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index b60698b30d30..bc2a55bc35e4 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -687,6 +687,33 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
int mv_cesa_queue_req(struct crypto_async_request *req);
+/*
+ * Helper function that indicates whether a crypto request needs to be
+ * cleaned up or not after being enqueued using mv_cesa_queue_req().
+ */
+static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
+ int ret)
+{
+ /*
+ * The queue still had some space, the request was queued
+ * normally, so there's no need to clean it up.
+ */
+ if (ret == -EINPROGRESS)
+ return false;
+
+ /*
+ * The queue had not space left, but since the request is
+ * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
+ * the backlog and will be processed later. There's no need to
+ * clean it up.
+ */
+ if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return false;
+
+ /* Request wasn't queued, we need to clean it up */
+ return true;
+}
+
/* TDMA functions */
static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 0745cf3b9c0e..3df2f4e7adb2 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -189,7 +189,6 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
-
creq->req.base.engine = engine;
if (creq->req.base.type == CESA_DMA_REQ)
@@ -431,7 +430,7 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
return ret;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
@@ -551,7 +550,7 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
return ret;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
@@ -693,7 +692,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
return ret;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index ae9272eb9c1a..e8d0d7128137 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -739,10 +739,8 @@ static int mv_cesa_ahash_update(struct ahash_request *req)
return 0;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS) {
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
- return ret;
- }
return ret;
}
@@ -766,7 +764,7 @@ static int mv_cesa_ahash_final(struct ahash_request *req)
return 0;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;
@@ -791,7 +789,7 @@ static int mv_cesa_ahash_finup(struct ahash_request *req)
return 0;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index a57b4194de28..0a5ca0ba5d64 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -88,6 +88,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
struct pci_dev *parent = pdev->bus->self;
uint16_t bridge_ctl = 0;
+ if (accel_dev->is_vf)
+ return;
+
dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
accel_dev->accel_id);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a165b4bfd330..dd24375b76dd 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
return desc;
}
+void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+{
+ memset(&desc->lld, 0, sizeof(desc->lld));
+ INIT_LIST_HEAD(&desc->descs_list);
+ desc->direction = DMA_TRANS_NONE;
+ desc->xfer_size = 0;
+ desc->active_xfer = false;
+}
+
/* Call must be protected by lock. */
static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
{
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
desc = list_first_entry(&atchan->free_descs_list,
struct at_xdmac_desc, desc_node);
list_del(&desc->desc_node);
- desc->active_xfer = false;
+ at_xdmac_init_used_desc(desc);
}
return desc;
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
if (xt->src_inc) {
if (xt->src_sgl)
- chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM;
+ chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
else
chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
}
if (xt->dst_inc) {
if (xt->dst_sgl)
- chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM;
+ chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
else
chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ff284c8e3d5..09479d4be4db 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
mutex_lock(&dma_list_mutex);
if (chan->client_count == 0) {
+ struct dma_device *device = chan->device;
+
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
err = dma_chan_get(chan);
- if (err)
+ if (err) {
pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
+ chan = NULL;
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+ }
} else
chan = NULL;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cf1c87fa1edd..bedce038c6e2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
- int r = nr_channels - i - 1;
dwc->chan.device = &dw->dma;
dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* 7 is highest priority & 0 is lowest. */
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
- dwc->priority = r;
+ dwc->priority = nr_channels - i - 1;
else
dwc->priority = i;
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Hardware configuration */
if (autocfg) {
unsigned int dwc_params;
+ unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
void __iomem *addr = chip->regs + r * sizeof(u32);
dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 18c14e1f1414..48d6d9e94f67 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
struct idma64_desc *desc = idma64c->desc;
struct idma64_hw_desc *hw;
size_t bytes = desc->length;
- u64 llp;
- u32 ctlhi;
+ u64 llp = channel_readq(idma64c, LLP);
+ u32 ctlhi = channel_readl(idma64c, CTL_HI);
unsigned int i = 0;
- llp = channel_readq(idma64c, LLP);
do {
hw = &desc->hw[i];
- } while ((hw->llp != llp) && (++i < desc->ndesc));
+ if (hw->llp == llp)
+ break;
+ bytes -= hw->len;
+ } while (++i < desc->ndesc);
if (!i)
return bytes;
- do {
- bytes -= desc->hw[--i].len;
- } while (i);
+ /* The current chunk is not fully transfered yet */
+ bytes += desc->hw[--i].len;
- ctlhi = channel_readl(idma64c, CTL_HI);
return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 5cb61ce01036..fc4156afa070 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan)
return;
/* clear the channel mapping in DRCMR */
- reg = pxad_drcmr(chan->drcmr);
- writel_relaxed(0, chan->phy->base + reg);
+ if (chan->drcmr <= DRCMR_CHLNUM) {
+ reg = pxad_drcmr(chan->drcmr);
+ writel_relaxed(0, chan->phy->base + reg);
+ }
spin_lock_irqsave(&pdev->phy_lock, flags);
for (i = 0; i < 32; i++)
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
"%s(); phy=%p(%d) misaligned=%d\n", __func__,
phy, phy->idx, misaligned);
- reg = pxad_drcmr(phy->vchan->drcmr);
- writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+ reg = pxad_drcmr(phy->vchan->drcmr);
+ writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ }
dalgn = phy_readl_relaxed(phy, DALGN);
if (misaligned)
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
struct dma_async_tx_descriptor *tx;
struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
+ INIT_LIST_HEAD(&vd->node);
tx = vchan_tx_prep(vc, vd, tx_flags);
tx->tx_submit = pxad_tx_submit;
dev_dbg(&chan->vc.chan.dev->device,
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan,
width = chan->cfg.src_addr_width;
dev_addr = chan->cfg.src_addr;
*dev_src = dev_addr;
- *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
+ *dcmd |= PXA_DCMD_INCTRGADDR;
+ if (chan->drcmr <= DRCMR_CHLNUM)
+ *dcmd |= PXA_DCMD_FLOWSRC;
}
if (dir == DMA_MEM_TO_DEV) {
maxburst = chan->cfg.dst_maxburst;
width = chan->cfg.dst_addr_width;
dev_addr = chan->cfg.dst_addr;
*dev_dst = dev_addr;
- *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
+ *dcmd |= PXA_DCMD_INCSRCADDR;
+ if (chan->drcmr <= DRCMR_CHLNUM)
+ *dcmd |= PXA_DCMD_FLOWTRG;
}
if (dir == DMA_MEM_TO_MEM)
*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan,
else
curr = phy_readl_relaxed(chan->phy, DTADR);
+ /*
+ * curr has to be actually read before checking descriptor
+ * completion, so that a curr inside a status updater
+ * descriptor implies the following test returns true, and
+ * preventing reordering of curr load and the test.
+ */
+ rmb();
+ if (is_desc_completed(vd))
+ goto out;
+
for (i = 0; i < sw_desc->nb_desc - 1; i++) {
hw_desc = sw_desc->hw_desc[i];
if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index a1a500d96ff2..1661d518224a 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract)
static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
{
struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
- struct sun4i_dma_promise *promise;
+ struct sun4i_dma_promise *promise, *tmp;
/* Free all the demands and completed demands */
- list_for_each_entry(promise, &contract->demands, list)
+ list_for_each_entry_safe(promise, tmp, &contract->demands, list)
kfree(promise);
- list_for_each_entry(promise, &contract->completed_demands, list)
+ list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
kfree(promise);
kfree(contract);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index b23e8d52d126..8d57b1b12e41 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -59,7 +59,6 @@
#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
-#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
#define XGENE_DMA_RING_CMD_OFFSET 0x2C
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
return flyby_type[src_cnt];
}
-static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
-{
- u32 __iomem *cmd_base = ring->cmd_base;
- u32 ring_state = ioread32(&cmd_base[1]);
-
- return XGENE_DMA_RING_DESC_CNT(ring_state);
-}
-
static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
dma_addr_t *paddr)
{
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
}
-static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
- struct xgene_dma_desc_sw *desc_sw)
+static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc_sw)
{
+ struct xgene_dma_ring *ring = &chan->tx_ring;
struct xgene_dma_desc_hw *desc_hw;
- /* Check if can push more descriptor to hw for execution */
- if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
- return -EBUSY;
-
/* Get hw descriptor from DMA tx ring */
desc_hw = &ring->desc_hw[ring->head];
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
}
+ /* Increment the pending transaction count */
+ chan->pending += ((desc_sw->flags &
+ XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
+
/* Notify the hw that we have descriptor ready for execution */
iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
2 : 1, ring->cmd);
-
- return 0;
}
/**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
{
struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
- int ret;
/*
* If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
if (chan->pending >= chan->max_outstanding)
return;
- ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
- if (ret)
- return;
+ xgene_chan_xfer_request(chan, desc_sw);
/*
* Delete this element from ld pending queue and append it to
* ld running queue
*/
list_move_tail(&desc_sw->node, &chan->ld_running);
-
- /* Increment the pending transaction count */
- chan->pending++;
}
}
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
* Decrement the pending transaction count
* as we have processed one
*/
- chan->pending--;
+ chan->pending -= ((desc_sw->flags &
+ XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
/*
* Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
struct xgene_dma_ring *ring,
enum xgene_dma_ring_cfgsize cfgsize)
{
+ int ret;
+
/* Setup DMA ring descriptor variables */
ring->pdma = chan->pdma;
ring->cfgsize = cfgsize;
ring->num = chan->pdma->ring_num++;
ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
- ring->size = xgene_dma_get_ring_size(chan, cfgsize);
- if (ring->size <= 0)
- return ring->size;
+ ret = xgene_dma_get_ring_size(chan, cfgsize);
+ if (ret <= 0)
+ return ret;
+ ring->size = ret;
/* Allocate memory for DMA ring descriptor */
ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
/* Set the max outstanding request possible to this channel */
- chan->max_outstanding = rx_ring->slots;
+ chan->max_outstanding = tx_ring->slots;
return ret;
}
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 39915a6b7986..c017fcd8e07c 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct dma_chan *chan;
struct zx_dma_chan *c;
- if (request > d->dma_requests)
+ if (request >= d->dma_requests)
return NULL;
chan = dma_get_any_slave_channel(&d->slave);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index a07addde297b..8dd0af1d50bc 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -159,7 +159,7 @@ static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
{
if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
- *attached = new ? true : false;
+ *attached = ((new >> idx) & 0x1) ? true : false;
return true;
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index d8de6a8dd4de..665efca59487 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -139,6 +139,14 @@ config QCOM_SCM
bool
depends on ARM || ARM64
+config QCOM_SCM_32
+ def_bool y
+ depends on QCOM_SCM && ARM
+
+config QCOM_SCM_64
+ def_bool y
+ depends on QCOM_SCM && ARM64
+
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 000830fc6707..2ee83474a3c1 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -13,7 +13,8 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
-obj-$(CONFIG_QCOM_SCM) += qcom_scm-32.o
+obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
+obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
obj-y += broadcom/
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index e29560e6b40b..950c87f5d279 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -13,6 +13,7 @@
*/
#include <linux/efi.h>
+#include <linux/sort.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -305,6 +306,44 @@ fail:
*/
#define EFI_RT_VIRTUAL_BASE 0x40000000
+static int cmp_mem_desc(const void *l, const void *r)
+{
+ const efi_memory_desc_t *left = l, *right = r;
+
+ return (left->phys_addr > right->phys_addr) ? 1 : -1;
+}
+
+/*
+ * Returns whether region @left ends exactly where region @right starts,
+ * or false if either argument is NULL.
+ */
+static bool regions_are_adjacent(efi_memory_desc_t *left,
+ efi_memory_desc_t *right)
+{
+ u64 left_end;
+
+ if (left == NULL || right == NULL)
+ return false;
+
+ left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
+
+ return left_end == right->phys_addr;
+}
+
+/*
+ * Returns whether region @left and region @right have compatible memory type
+ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
+ */
+static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
+ efi_memory_desc_t *right)
+{
+ static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
+ EFI_MEMORY_WC | EFI_MEMORY_UC |
+ EFI_MEMORY_RUNTIME;
+
+ return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+}
+
/*
* efi_get_virtmap() - create a virtual mapping for the EFI memory map
*
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
int *count)
{
u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
- efi_memory_desc_t *out = runtime_map;
+ efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
int l;
- for (l = 0; l < map_size; l += desc_size) {
- efi_memory_desc_t *in = (void *)memory_map + l;
+ /*
+ * To work around potential issues with the Properties Table feature
+ * introduced in UEFI 2.5, which may split PE/COFF executable images
+ * in memory into several RuntimeServicesCode and RuntimeServicesData
+ * regions, we need to preserve the relative offsets between adjacent
+ * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
+ * The easiest way to find adjacent regions is to sort the memory map
+ * before traversing it.
+ */
+ sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+
+ for (l = 0; l < map_size; l += desc_size, prev = in) {
u64 paddr, size;
+ in = (void *)memory_map + l;
if (!(in->attribute & EFI_MEMORY_RUNTIME))
continue;
+ paddr = in->phys_addr;
+ size = in->num_pages * EFI_PAGE_SIZE;
+
/*
* Make the mapping compatible with 64k pages: this allows
* a 4k page size kernel to kexec a 64k page size kernel and
* vice versa.
*/
- paddr = round_down(in->phys_addr, SZ_64K);
- size = round_up(in->num_pages * EFI_PAGE_SIZE +
- in->phys_addr - paddr, SZ_64K);
-
- /*
- * Avoid wasting memory on PTEs by choosing a virtual base that
- * is compatible with section mappings if this region has the
- * appropriate size and physical alignment. (Sections are 2 MB
- * on 4k granule kernels)
- */
- if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
- efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ if (!regions_are_adjacent(prev, in) ||
+ !regions_have_compatible_memory_type_attrs(prev, in)) {
+
+ paddr = round_down(in->phys_addr, SZ_64K);
+ size += in->phys_addr - paddr;
+
+ /*
+ * Avoid wasting memory on PTEs by choosing a virtual
+ * base that is compatible with section mappings if this
+ * region has the appropriate size and physical
+ * alignment. (Sections are 2 MB on 4k granule kernels)
+ */
+ if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+ efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ else
+ efi_virt_base = round_up(efi_virt_base, SZ_64K);
+ }
in->virt_addr = efi_virt_base + in->phys_addr - paddr;
efi_virt_base += size;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index e334a01cf92f..6b6548fda089 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,10 +5,6 @@
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
-#undef memcpy
-#undef memset
-#undef memmove
-
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
new file mode 100644
index 000000000000..bb6555f6d63b
--- /dev/null
+++ b/drivers/firmware/qcom_scm-64.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/qcom_scm.h>
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void __qcom_scm_cpu_power_down(u32 flags)
+{
+}
+
+int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+ return -ENOTSUPP;
+}
+
+int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+ return -ENOTSUPP;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 668939a14206..6647fb26ef25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size;
extern int amdgpu_enable_scheduler;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
+extern int amdgpu_enable_semaphores;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
@@ -890,7 +891,7 @@ struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler *scheduler;
+ struct amd_gpu_scheduler sched;
spinlock_t fence_lock;
struct mutex *ring_lock;
@@ -1201,8 +1202,6 @@ struct amdgpu_gfx {
struct amdgpu_irq_src priv_inst_irq;
/* gfx status */
uint32_t gfx_current_status;
- /* sync signal for const engine */
- unsigned ce_sync_offs;
/* ce ram size*/
unsigned ce_ram_size;
};
@@ -1274,8 +1273,10 @@ struct amdgpu_job {
uint32_t num_ibs;
struct mutex job_lock;
struct amdgpu_user_fence uf;
- int (*free_job)(struct amdgpu_job *sched_job);
+ int (*free_job)(struct amdgpu_job *job);
};
+#define to_amdgpu_job(sched_job) \
+ container_of((sched_job), struct amdgpu_job, base)
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 496ed2192eba..84d68d658f8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
return -ENOMEM;
r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
if (r) {
dev_err(rdev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 98d59ee640ce..cd639c362df3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
int time;
n = AMDGPU_BENCHMARK_ITERATIONS;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
+ NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
+ NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 6b1243f9f86d..8e995148f56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
if (ret)
return ret;
ret = amdgpu_bo_reserve(bo, false);
@@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
true, domain, flags,
- NULL, &placement, &obj);
+ NULL, &placement, NULL,
+ &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
@@ -207,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
return ret;
}
-static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
- cgs_handle_t *handle)
-{
- CGS_FUNC_ADEV;
- int r;
- uint32_t dma_handle;
- struct drm_gem_object *obj;
- struct amdgpu_bo *bo;
- struct drm_device *dev = adev->ddev;
- struct drm_file *file_priv = NULL, *priv;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(priv, &dev->filelist, lhead) {
- rcu_read_lock();
- if (priv->pid == get_pid(task_pid(current)))
- file_priv = priv;
- rcu_read_unlock();
- if (file_priv)
- break;
- }
- mutex_unlock(&dev->struct_mutex);
- r = dev->driver->prime_fd_to_handle(dev,
- file_priv, dmabuf_fd,
- &dma_handle);
- spin_lock(&file_priv->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&file_priv->object_idr, dma_handle);
- if (obj == NULL) {
- spin_unlock(&file_priv->table_lock);
- return -EINVAL;
- }
- spin_unlock(&file_priv->table_lock);
- bo = gem_to_amdgpu_bo(obj);
- *handle = (cgs_handle_t)bo;
- return 0;
-}
-
static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -809,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
- amdgpu_cgs_import_gpu_mem,
amdgpu_cgs_add_irq_source,
amdgpu_cgs_irq_get,
amdgpu_cgs_irq_put
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3b355aeb62fd..cb3c274edb0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -154,42 +154,42 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
- uint64_t *chunk_array = NULL;
+ uint64_t *chunk_array;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned size, i;
- int r = 0;
+ unsigned size;
+ int i;
+ int ret;
- if (!cs->in.num_chunks)
- goto out;
+ if (cs->in.num_chunks == 0)
+ return 0;
+
+ chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ if (!chunk_array)
+ return -ENOMEM;
p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
if (!p->ctx) {
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_chunk;
}
+
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
/* get chunks */
INIT_LIST_HEAD(&p->validated);
- chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
- if (chunk_array == NULL) {
- r = -ENOMEM;
- goto out;
- }
-
chunk_array_user = (uint64_t __user *)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
- r = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ goto put_bo_list;
}
p->nchunks = cs->in.num_chunks;
p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
- if (p->chunks == NULL) {
- r = -ENOMEM;
- goto out;
+ if (!p->chunks) {
+ ret = -ENOMEM;
+ goto put_bo_list;
}
for (i = 0; i < p->nchunks; i++) {
@@ -200,8 +200,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
chunk_ptr = (void __user *)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
- r = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ i--;
+ goto free_partial_kdata;
}
p->chunks[i].chunk_id = user_chunk.chunk_id;
p->chunks[i].length_dw = user_chunk.length_dw;
@@ -212,13 +213,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
if (p->chunks[i].kdata == NULL) {
- r = -ENOMEM;
- goto out;
+ ret = -ENOMEM;
+ i--;
+ goto free_partial_kdata;
}
size *= sizeof(uint32_t);
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- r = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ goto free_partial_kdata;
}
switch (p->chunks[i].chunk_id) {
@@ -238,15 +240,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
gobj = drm_gem_object_lookup(p->adev->ddev,
p->filp, handle);
if (gobj == NULL) {
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_partial_kdata;
}
p->uf.bo = gem_to_amdgpu_bo(gobj);
p->uf.offset = fence_data->offset;
} else {
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_partial_kdata;
}
break;
@@ -254,19 +256,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
break;
default:
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_partial_kdata;
}
}
p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!p->ibs)
- r = -ENOMEM;
+ if (!p->ibs) {
+ ret = -ENOMEM;
+ goto free_all_kdata;
+ }
-out:
kfree(chunk_array);
- return r;
+ return 0;
+
+free_all_kdata:
+ i = p->nchunks - 1;
+free_partial_kdata:
+ for (; i >= 0; i--)
+ drm_free_large(p->chunks[i].kdata);
+ kfree(p->chunks);
+put_bo_list:
+ if (p->bo_list)
+ amdgpu_bo_list_put(p->bo_list);
+ amdgpu_ctx_put(p->ctx);
+free_chunk:
+ kfree(chunk_array);
+
+ return ret;
}
/* Returns how many bytes TTM can move per IB.
@@ -321,25 +339,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
return max(bytes_moved_threshold, 1024*1024ull);
}
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
+int amdgpu_cs_list_validate(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct list_head *validated)
{
- struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_device *adev = p->adev;
struct amdgpu_bo_list_entry *lobj;
- struct list_head duplicates;
struct amdgpu_bo *bo;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
int r;
- INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
- if (unlikely(r != 0)) {
- return r;
- }
-
- list_for_each_entry(lobj, &p->validated, tv.head) {
+ list_for_each_entry(lobj, validated, tv.head) {
bo = lobj->robj;
if (!bo->pin_count) {
u32 domain = lobj->prefered_domains;
@@ -373,7 +383,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
domain = lobj->allowed_domains;
goto retry;
}
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
return r;
}
}
@@ -386,6 +395,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_cs_buckets buckets;
+ struct list_head duplicates;
bool need_mmap_lock = false;
int i, r;
@@ -405,8 +415,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
- r = amdgpu_cs_list_validate(p);
+ INIT_LIST_HEAD(&duplicates);
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
+ if (unlikely(r != 0))
+ goto error_reserve;
+
+ r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
+ if (r)
+ goto error_validate;
+
+ r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
+
+error_validate:
+ if (r)
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+error_reserve:
if (need_mmap_lock)
up_read(&current->mm->mmap_sem);
@@ -772,15 +796,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
+static int amdgpu_cs_free_job(struct amdgpu_job *job)
{
int i;
- if (sched_job->ibs)
- for (i = 0; i < sched_job->num_ibs; i++)
- amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
- kfree(sched_job->ibs);
- if (sched_job->uf.bo)
- drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
+ if (job->ibs)
+ for (i = 0; i < job->num_ibs; i++)
+ amdgpu_ib_free(job->adev, &job->ibs[i]);
+ kfree(job->ibs);
+ if (job->uf.bo)
+ drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
return 0;
}
@@ -804,7 +828,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- amdgpu_cs_parser_fini(parser, r, false);
+ kfree(parser);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
@@ -842,7 +866,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job)
return -ENOMEM;
- job->base.sched = ring->scheduler;
+ job->base.sched = &ring->sched;
job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
job->adev = parser->adev;
job->ibs = parser->ibs;
@@ -857,7 +881,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
job->free_job = amdgpu_cs_free_job;
mutex_lock(&job->job_lock);
- r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+ r = amd_sched_entity_push_job(&job->base);
if (r) {
mutex_unlock(&job->job_lock);
amdgpu_cs_free_job(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 20cbc4eb5a6f..e0b80ccdfe8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
for (i = 0; i < adev->num_rings; i++) {
struct amd_sched_rq *rq;
if (kernel)
- rq = &adev->rings[i]->scheduler->kernel_rq;
+ rq = &adev->rings[i]->sched.kernel_rq;
else
- rq = &adev->rings[i]->scheduler->sched_rq;
- r = amd_sched_entity_init(adev->rings[i]->scheduler,
+ rq = &adev->rings[i]->sched.sched_rq;
+ r = amd_sched_entity_init(&adev->rings[i]->sched,
&ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
if (i < adev->num_rings) {
for (j = 0; j < i; j++)
- amd_sched_entity_fini(adev->rings[j]->scheduler,
+ amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx);
return r;
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
if (amdgpu_enable_scheduler) {
for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(adev->rings[i]->scheduler,
+ amd_sched_entity_fini(&adev->rings[i]->sched,
&ctx->rings[i].entity);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6ff6ae945794..6068d8207d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->vram_scratch.robj);
+ NULL, NULL, &adev->vram_scratch.robj);
if (r) {
return r;
}
@@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
if (adev->wb.wb_obj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj);
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
+ &adev->wb.wb_obj);
if (r) {
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
return r;
@@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
/* turn off display hw */
+ drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
+ drm_modeset_unlock_all(dev);
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (fbcon) {
drm_helper_resume_force_mode(dev);
/* turn on display hw */
+ drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
+ drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0fcc0bd1622c..adb48353f2e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0;
int amdgpu_enable_scheduler = 0;
int amdgpu_sched_jobs = 16;
int amdgpu_sched_hw_submission = 2;
+int amdgpu_enable_semaphores = 1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
+module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
+
static struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1be2bd6d07ea..b3fc26c59787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* Init the fence driver for the requested ring (all asics).
* Helper function for amdgpu_fence_driver_init().
*/
-void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
{
- int i;
+ int i, r;
ring->fence_drv.cpu_addr = NULL;
ring->fence_drv.gpu_addr = 0;
@@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
amdgpu_fence_check_lockup);
ring->fence_drv.ring = ring;
+ init_waitqueue_head(&ring->fence_drv.fence_queue);
+
if (amdgpu_enable_scheduler) {
- ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
- ring->idx,
- amdgpu_sched_hw_submission,
- (void *)ring->adev);
- if (!ring->scheduler)
- DRM_ERROR("Failed to create scheduler on ring %d.\n",
- ring->idx);
+ r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+ amdgpu_sched_hw_submission, ring->name);
+ if (r) {
+ DRM_ERROR("Failed to create scheduler on ring %s.\n",
+ ring->name);
+ return r;
+ }
}
+
+ return 0;
}
/**
@@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
wake_up_all(&ring->fence_drv.fence_queue);
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- if (ring->scheduler)
- amd_sched_destroy(ring->scheduler);
+ amd_sched_fini(&ring->sched);
ring->fence_drv.initialized = false;
}
mutex_unlock(&adev->ring_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cbd3a486c5c2..7312d729d300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->gart.robj);
+ NULL, NULL, &adev->gart.robj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5839fab374bf..7297ca3a0ba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
}
}
retry:
- r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj);
+ r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
+ flags, NULL, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
&args->data.data_size_bytes,
&args->data.flags);
} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
+ if (args->data.data_size_bytes > sizeof(args->data.data)) {
+ r = -EINVAL;
+ goto unreserve;
+ }
r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
if (!r)
r = amdgpu_bo_set_metadata(robj, args->data.data,
@@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
args->data.flags);
}
+unreserve:
amdgpu_bo_unreserve(robj);
out:
drm_gem_object_unreference_unlocked(gobj);
@@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos;
struct ww_acquire_ctx ticket;
- struct list_head list;
+ struct list_head list, duplicates;
unsigned domain;
int r;
INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
tv.bo = &bo_va->bo->tbo;
tv.shared = true;
@@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ /* Provide duplicates to avoid -EALREADY */
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_free;
@@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
int r;
args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
- args->size = args->pitch * args->height;
+ args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
r = amdgpu_gem_object_create(adev, args->size, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 5c8a803acedc..534fc04e80fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, &adev->irq.ih.ring_obj);
+ NULL, NULL, &adev->irq.ih.ring_obj);
if (r) {
DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0aba8e9bc8a0..7c42ff670080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev)
*/
int amdgpu_irq_postinstall(struct drm_device *dev)
{
- dev->max_vblank_count = 0x001fffff;
+ dev->max_vblank_count = 0x00ffffff;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 22367939ebf1..8c735f544b66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
- unsigned n, alloc_size = info->read_mmr_reg.count * 4;
+ unsigned n, alloc_size;
uint32_t *regs;
unsigned se_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
@@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
- regs = kmalloc(alloc_size, GFP_KERNEL);
+ regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
if (!regs)
return -ENOMEM;
+ alloc_size = info->read_mmr_reg.count * sizeof(*regs);
for (i = 0; i < info->read_mmr_reg.count; i++)
if (amdgpu_asic_read_register(adev, se_num, sh_num,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 08b09d55b96f..1a7708f365f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct ttm_placement *placement,
+ struct reservation_object *resv,
struct amdgpu_bo **bo_ptr)
{
struct amdgpu_bo *bo;
@@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
/* Kernel allocation are uninterruptible */
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
- acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
+ acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0)) {
return r;
}
@@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
- struct sg_table *sg, struct amdgpu_bo **bo_ptr)
+ struct sg_table *sg,
+ struct reservation_object *resv,
+ struct amdgpu_bo **bo_ptr)
{
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
@@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
amdgpu_ttm_placement_init(adev, &placement,
placements, domain, flags);
- return amdgpu_bo_create_restricted(adev, size, byte_align,
- kernel, domain, flags,
- sg,
- &placement,
- bo_ptr);
+ return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+ domain, flags, sg, &placement,
+ resv, bo_ptr);
}
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (metadata == NULL)
return -EINVAL;
- buffer = kzalloc(metadata_size, GFP_KERNEL);
+ buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
if (buffer == NULL)
return -ENOMEM;
- memcpy(buffer, metadata, metadata_size);
-
kfree(bo->metadata);
bo->metadata_flags = flags;
bo->metadata = buffer;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 6ea18dcec561..3c2ff4567798 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
+ struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct ttm_placement *placement,
+ struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index d9652fe32d6a..59f735a933a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
+ struct reservation_object *resv = attach->dmabuf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
int ret;
+ ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
+ ww_mutex_unlock(&resv->lock);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 9bec91484c24..30dce235ddeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->adev = adev;
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- amdgpu_fence_driver_init_ring(ring);
+ r = amdgpu_fence_driver_init_ring(ring);
+ if (r)
+ return r;
}
- init_waitqueue_head(&ring->fence_drv.fence_queue);
-
r = amdgpu_wb_get(adev, &ring->rptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
if (ring->ring_obj == NULL) {
r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, &ring->ring_obj);
+ NULL, NULL, &ring->ring_obj);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 74dad270362c..e90712443fe9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&sa_manager->flist[i]);
}
- r = amdgpu_bo_create(adev, size, align, true,
- domain, 0, NULL, &sa_manager->bo);
+ r = amdgpu_bo_create(adev, size, align, true, domain,
+ 0, NULL, NULL, &sa_manager->bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
@@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
struct amd_sched_fence *s_fence;
s_fence = to_amd_sched_fence(f);
- if (s_fence)
- return s_fence->scheduler->ring_id;
+ if (s_fence) {
+ struct amdgpu_ring *ring;
+
+ ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+ return ring->idx;
+ }
+
a_fence = to_amdgpu_fence(f);
if (a_fence)
return a_fence->ring->idx;
@@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
}
#if defined(CONFIG_DEBUG_FS)
+
+static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
+{
+ struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
+
+ if (a_fence)
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ a_fence->seq, a_fence->ring->idx);
+
+ if (s_fence) {
+ struct amdgpu_ring *ring;
+
+
+ ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+ seq_printf(m, " protected by 0x%016x on ring %d",
+ s_fence->base.seqno, ring->idx);
+ }
+}
+
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
@@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
}
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
soffset, eoffset, eoffset - soffset);
- if (i->fence) {
- struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
- struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
- if (a_fence)
- seq_printf(m, " protected by 0x%016llx on ring %d",
- a_fence->seq, a_fence->ring->idx);
- if (s_fence)
- seq_printf(m, " protected by 0x%016x on ring %d",
- s_fence->base.seqno,
- s_fence->scheduler->ring_id);
-
- }
+ if (i->fence)
+ amdgpu_sa_bo_dump_fence(i->fence, m);
seq_printf(m, "\n");
}
spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index de98fbd2971e..2e946b2cad88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -27,63 +27,48 @@
#include <drm/drmP.h>
#include "amdgpu.h"
-static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
{
- struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
- return amdgpu_sync_get_fence(&sched_job->ibs->sync);
+ struct amdgpu_job *job = to_amdgpu_job(sched_job);
+ return amdgpu_sync_get_fence(&job->ibs->sync);
}
-static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
+static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
{
- struct amdgpu_job *sched_job;
- struct amdgpu_fence *fence;
+ struct amdgpu_fence *fence = NULL;
+ struct amdgpu_job *job;
int r;
- if (!job) {
+ if (!sched_job) {
DRM_ERROR("job is null\n");
return NULL;
}
- sched_job = (struct amdgpu_job *)job;
- mutex_lock(&sched_job->job_lock);
- r = amdgpu_ib_schedule(sched_job->adev,
- sched_job->num_ibs,
- sched_job->ibs,
- sched_job->base.owner);
- if (r)
+ job = to_amdgpu_job(sched_job);
+ mutex_lock(&job->job_lock);
+ r = amdgpu_ib_schedule(job->adev,
+ job->num_ibs,
+ job->ibs,
+ job->base.owner);
+ if (r) {
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;
- fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
-
- if (sched_job->free_job)
- sched_job->free_job(sched_job);
+ }
- mutex_unlock(&sched_job->job_lock);
- return &fence->base;
+ fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
err:
- DRM_ERROR("Run job error\n");
- mutex_unlock(&sched_job->job_lock);
- job->sched->ops->process_job(job);
- return NULL;
-}
+ if (job->free_job)
+ job->free_job(job);
-static void amdgpu_sched_process_job(struct amd_sched_job *job)
-{
- struct amdgpu_job *sched_job;
-
- if (!job) {
- DRM_ERROR("job is null\n");
- return;
- }
- sched_job = (struct amdgpu_job *)job;
- /* after processing job, free memory */
- fence_put(&sched_job->base.s_fence->base);
- kfree(sched_job);
+ mutex_unlock(&job->job_lock);
+ fence_put(&job->base.s_fence->base);
+ kfree(job);
+ return fence ? &fence->base : NULL;
}
struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_sched_dependency,
.run_job = amdgpu_sched_run_job,
- .process_job = amdgpu_sched_process_job
};
int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
@@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job)
return -ENOMEM;
- job->base.sched = ring->scheduler;
+ job->base.sched = &ring->sched;
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
job->adev = adev;
job->ibs = ibs;
@@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
mutex_init(&job->job_lock);
job->free_job = free_job;
mutex_lock(&job->job_lock);
- r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+ r = amd_sched_entity_push_job(&job->base);
if (r) {
mutex_unlock(&job->job_lock);
kfree(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 068aeaff7183..4921de15b451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
if (a_fence)
return a_fence->ring->adev == adev;
- if (s_fence)
- return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+
+ if (s_fence) {
+ struct amdgpu_ring *ring;
+
+ ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+ return ring->adev == adev;
+ }
+
return false;
}
@@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
fence_put(e->fence);
kfree(e);
}
+
+ if (amdgpu_enable_semaphores)
+ return 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_fence *fence = sync->sync_to[i];
+ if (!fence)
+ continue;
+
+ r = fence_wait(&fence->base, false);
+ if (r)
+ return r;
+ }
+
return 0;
}
@@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
return -EINVAL;
}
- if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
+ if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
+ (count >= AMDGPU_NUM_SYNCS)) {
/* not enough room, wait manually */
r = fence_wait(&fence->base, false);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index f80b1a43be8a..4865615e9c06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_cleanup;
}
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, &vram_obj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM, 0,
+ NULL, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
struct fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b5abd5cde413..364cbe975332 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->stollen_vga_memory);
+ NULL, NULL, &adev->stollen_vga_memory);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 482e66797ae6..5cc95f1a7dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
err = -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2cf6c6b06e3b..d0312364d950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->uvd.vcpu_bo);
+ NULL, NULL, &adev->uvd.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
@@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
return -EINVAL;
}
- if (msg_type == 1) {
+ switch (msg_type) {
+ case 0:
+ /* it's a create msg, calc image size (width * height) */
+ amdgpu_bo_kunmap(bo);
+
+ /* try to alloc a new handle */
+ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ DRM_ERROR("Handle 0x%x already in use!\n", handle);
+ return -EINVAL;
+ }
+
+ if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
+ adev->uvd.filp[i] = ctx->parser->filp;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("No more free UVD handles!\n");
+ return -EINVAL;
+
+ case 1:
/* it's a decode msg, calc buffer sizes */
r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
amdgpu_bo_kunmap(bo);
if (r)
return r;
- } else if (msg_type == 2) {
+ /* validate the handle */
+ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ if (adev->uvd.filp[i] != ctx->parser->filp) {
+ DRM_ERROR("UVD handle collision detected!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
+ return -ENOENT;
+
+ case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
- } else {
- /* it's a create msg */
- amdgpu_bo_kunmap(bo);
-
- if (msg_type != 0) {
- DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
- return -EINVAL;
- }
-
- /* it's a create msg, no special handling needed */
- }
-
- /* create or decode, validate the handle */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
- if (atomic_read(&adev->uvd.handles[i]) == handle)
- return 0;
- }
- /* handle not found try to alloc a new one */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
- if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
- adev->uvd.filp[i] = ctx->parser->filp;
- return 0;
- }
+ default:
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+ return -EINVAL;
}
-
- DRM_ERROR("No more free UVD handles!\n");
+ BUG();
return -EINVAL;
}
@@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}
static int amdgpu_uvd_free_job(
- struct amdgpu_job *sched_job)
+ struct amdgpu_job *job)
{
- amdgpu_ib_free(sched_job->adev, sched_job->ibs);
- kfree(sched_job->ibs);
+ amdgpu_ib_free(job->adev, job->ibs);
+ kfree(job->ibs);
return 0;
}
@@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &bo);
+ NULL, NULL, &bo);
if (r)
return r;
@@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &bo);
+ NULL, NULL, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 3cab96c42aa8..74f2038ac747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->vce.vcpu_bo);
+ NULL, NULL, &adev->vce.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
@@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
static int amdgpu_vce_free_job(
- struct amdgpu_job *sched_job)
+ struct amdgpu_job *job)
{
- amdgpu_ib_free(sched_job->adev, sched_job->ibs);
- kfree(sched_job->ibs);
+ amdgpu_ib_free(job->adev, job->ibs);
+ kfree(job->ibs);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f68b7cdc370a..1e14531353e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
}
}
-int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
+int amdgpu_vm_free_job(struct amdgpu_job *job)
{
int i;
- for (i = 0; i < sched_job->num_ibs; i++)
- amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
- kfree(sched_job->ibs);
+ for (i = 0; i < job->num_ibs; i++)
+ amdgpu_ib_free(job->adev, &job->ibs[i]);
+ kfree(job->ibs);
return 0;
}
@@ -686,31 +686,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_fence_pts - fence page tables after an update
- *
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @fence: fence to use
- *
- * Fence the page tables in the range @start - @end (cayman+).
- *
- * Global and local mutex must be locked!
- */
-static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
- uint64_t start, uint64_t end,
- struct fence *fence)
-{
- unsigned i;
-
- start >>= amdgpu_vm_block_size;
- end >>= amdgpu_vm_block_size;
-
- for (i = start; i <= end; ++i)
- amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
-}
-
-/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
@@ -813,8 +788,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_vm_fence_pts(vm, mapping->it.start,
- mapping->it.last + 1, f);
+ amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
fence_put(*fence);
*fence = fence_get(f);
@@ -855,7 +829,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
int r;
if (mem) {
- addr = mem->start << PAGE_SHIFT;
+ addr = (u64)mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_TT)
addr += adev->vm_manager.vram_base_offset;
} else {
@@ -1089,6 +1063,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
+ struct reservation_object *resv = vm->page_directory->tbo.resv;
struct amdgpu_bo *pt;
if (vm->page_tables[pt_idx].bo)
@@ -1097,11 +1072,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* drop mutex to allocate and clear page table */
mutex_unlock(&vm->mutex);
+ ww_mutex_lock(&resv->lock, NULL);
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
- NULL, &pt);
+ NULL, resv, &pt);
+ ww_mutex_unlock(&resv->lock);
if (r)
goto error_free;
@@ -1303,7 +1280,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
- NULL, &vm->page_directory);
+ NULL, NULL, &vm->page_directory);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index cd6edc40c9cd..1e0bba29e167 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
if (ext_encoder)
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index a72ffc7d6c26..e33180d3314a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev)
* 3. map kernel virtual address
*/
ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf);
+ true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
+ toc_buf);
if (ret) {
dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
@@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev)
}
ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf);
+ true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
+ smu_buf);
if (ret) {
dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index 322edea65857..bda1249eb871 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, toc_buf);
+ NULL, NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
@@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, smu_buf);
+ NULL, NULL, smu_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4bd1e5cf65ca..e992bf2ff66c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev,
adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&adev->gfx.mec.hpd_eop_obj);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev,
sizeof(struct bonaire_mqd),
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&ring->mqd_obj);
if (r) {
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
@@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
-
- /* instruct DE to set a magic number */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(5)));
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1);
-
- /* let CE wait till condition satisfied */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
- WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
- WAIT_REG_MEM_FUNCTION(3) | /* == */
- WAIT_REG_MEM_ENGINE(2))); /* ce */
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1);
- amdgpu_ring_write(ring, 0xffffffff);
- amdgpu_ring_write(ring, 4); /* poll interval */
-
- /* instruct CE to reset wb of ce_sync to zero */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
- WRITE_DATA_DST_SEL(5) |
- WR_CONFIRM));
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 0);
-}
-
/*
* vm
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ if (usepfp) {
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0x0);
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
- gfx_v7_0_ce_sync_me(ring);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
}
}
@@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->gfx.rlc.save_restore_obj);
+ NULL, NULL,
+ &adev->gfx.rlc.save_restore_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->gfx.rlc.clear_state_obj);
+ NULL, NULL,
+ &adev->gfx.rlc.clear_state_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
@@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->gfx.rlc.cp_table_obj);
+ NULL, NULL,
+ &adev->gfx.rlc.cp_table_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
@@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle)
return r;
}
- r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
- if (r) {
- DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
- return r;
- }
-
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
@@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle)
r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GDS, 0,
- NULL, &adev->gds.gds_gfx_bo);
+ NULL, NULL, &adev->gds.gds_gfx_bo);
if (r)
return r;
r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GWS, 0,
- NULL, &adev->gds.gws_gfx_bo);
+ NULL, NULL, &adev->gds.gws_gfx_bo);
if (r)
return r;
r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_OA, 0,
- NULL, &adev->gds.oa_gfx_bo);
+ NULL, NULL, &adev->gds.oa_gfx_bo);
if (r)
return r;
@@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
-
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 53f07439a512..cb4f68f53f24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev,
adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&adev->gfx.mec.hpd_eop_obj);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
}
- r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
- if (r) {
- DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
- return r;
- }
-
/* set up the gfx ring */
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
@@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle)
/* reserve GDS, GWS and OA resource for gfx */
r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GDS, 0,
+ AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
NULL, &adev->gds.gds_gfx_bo);
if (r)
return r;
r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GWS, 0,
+ AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
NULL, &adev->gds.gws_gfx_bo);
if (r)
return r;
r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_OA, 0,
+ AMDGPU_GEM_DOMAIN_OA, 0, NULL,
NULL, &adev->gds.oa_gfx_bo);
if (r)
return r;
@@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
-
gfx_v8_0_mec_fini(adev);
return 0;
@@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
sizeof(struct vi_mqd),
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
- &ring->mqd_obj);
+ NULL, &ring->mqd_obj);
if (r) {
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
return r;
@@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
+
}
/**
@@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
return true;
}
-static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring)
+static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct amdgpu_device *adev = ring->adev;
- u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
-
- /* instruct DE to set a magic number */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(5)));
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1);
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+ uint64_t addr = ring->fence_drv.gpu_addr;
- /* let CE wait till condition satisfied */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
- WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
- WAIT_REG_MEM_FUNCTION(3) | /* == */
- WAIT_REG_MEM_ENGINE(2))); /* ce */
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1);
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3))); /* equal */
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
- /* instruct CE to reset wb of ce_sync to zero */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
- WRITE_DATA_DST_SEL(5) |
- WR_CONFIRM));
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 0);
-}
-
-static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
-{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ if (usepfp) {
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
- WRITE_DATA_DST_SEL(0)));
+ WRITE_DATA_DST_SEL(0)) |
+ WR_CONFIRM);
if (vm_id < 8) {
amdgpu_ring_write(ring,
(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
@@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
-
- /* synce CE with ME to prevent CE fetch CEIB before context switch done */
- gfx_v8_0_ce_sync_me(ring);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 774528ab8704..fab5471d25d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+ if (!addr && !status)
+ return 0;
+
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9a07742620d0..7bc9e9fcf3d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+ if (!addr && !status)
+ return 0;
+
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c900aa942ade..966d4b2ed9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, toc_buf);
+ NULL, NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 1f5ac941a610..5421309c1862 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, toc_buf);
+ NULL, NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
@@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, smu_buf);
+ NULL, NULL, smu_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5fac5da694f0..ed50dd725788 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = uvd_v4_2_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v4_2_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 2d5c59c318af..9ad8b9906c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = uvd_v5_0_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d9f553fce531..7e9934fa4193 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Skip this for APU for now */
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_uvd_suspend(adev);
+ if (r)
+ return r;
+ }
r = uvd_v6_0_hw_fini(adev);
if (r)
return r;
- r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
-
return r;
}
@@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
+ /* Skip this for APU for now */
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+ }
r = uvd_v6_0_hw_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 552d9e75ad1b..b55ceb14fdcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1400,7 +1400,8 @@ static int vi_common_early_init(void *handle)
case CHIP_CARRIZO:
adev->has_uvd = true;
adev->cg_flags = 0;
- adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE;
+ /* Disable UVD pg */
+ adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
adev->external_rev_id = adev->rev_id + 0x1;
if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true;
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
index 488642f08267..3b47ae313e36 100644
--- a/drivers/gpu/drm/amd/include/cgs_linux.h
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -27,19 +27,6 @@
#include "cgs_common.h"
/**
- * cgs_import_gpu_mem() - Import dmabuf handle
- * @cgs_device: opaque device handle
- * @dmabuf_fd: DMABuf file descriptor
- * @handle: memory handle (output)
- *
- * Must be called in the process context that dmabuf_fd belongs to.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
- cgs_handle_t *handle);
-
-/**
* cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
* @private_data: private data provided to cgs_add_irq_source
* @src_id: interrupt source ID
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
struct cgs_os_ops {
- cgs_import_gpu_mem_t import_gpu_mem;
-
/* IRQ handling */
cgs_add_irq_source_t add_irq_source;
cgs_irq_get_t irq_get;
cgs_irq_put_t irq_put;
};
-#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \
- CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
private_data)
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
new file mode 100644
index 000000000000..144f50acc971
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -0,0 +1,41 @@
+#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _GPU_SCHED_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu_sched
+#define TRACE_INCLUDE_FILE gpu_sched_trace
+
+TRACE_EVENT(amd_sched_job,
+ TP_PROTO(struct amd_sched_job *sched_job),
+ TP_ARGS(sched_job),
+ TP_STRUCT__entry(
+ __field(struct amd_sched_entity *, entity)
+ __field(const char *, name)
+ __field(u32, job_count)
+ __field(int, hw_job_count)
+ ),
+
+ TP_fast_assign(
+ __entry->entity = sched_job->s_entity;
+ __entry->name = sched_job->sched->name;
+ __entry->job_count = kfifo_len(
+ &sched_job->s_entity->job_queue) / sizeof(sched_job);
+ __entry->hw_job_count = atomic_read(
+ &sched_job->sched->hw_rq_count);
+ ),
+ TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->name, __entry->job_count,
+ __entry->hw_job_count)
+);
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9259f1b6664c..3697eeeecf82 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -27,6 +27,9 @@
#include <drm/drmP.h>
#include "gpu_scheduler.h"
+#define CREATE_TRACE_POINTS
+#include "gpu_sched_trace.h"
+
static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
@@ -65,29 +68,29 @@ static struct amd_sched_job *
amd_sched_rq_select_job(struct amd_sched_rq *rq)
{
struct amd_sched_entity *entity;
- struct amd_sched_job *job;
+ struct amd_sched_job *sched_job;
spin_lock(&rq->lock);
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
- job = amd_sched_entity_pop_job(entity);
- if (job) {
+ sched_job = amd_sched_entity_pop_job(entity);
+ if (sched_job) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
- return job;
+ return sched_job;
}
}
}
list_for_each_entry(entity, &rq->entities, list) {
- job = amd_sched_entity_pop_job(entity);
- if (job) {
+ sched_job = amd_sched_entity_pop_job(entity);
+ if (sched_job) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
- return job;
+ return sched_job;
}
if (entity == rq->current_entity)
@@ -115,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_rq *rq,
uint32_t jobs)
{
+ int r;
+
if (!(sched && entity && rq))
return -EINVAL;
memset(entity, 0, sizeof(struct amd_sched_entity));
- entity->belongto_rq = rq;
- entity->scheduler = sched;
- entity->fence_context = fence_context_alloc(1);
- if(kfifo_alloc(&entity->job_queue,
- jobs * sizeof(void *),
- GFP_KERNEL))
- return -EINVAL;
+ INIT_LIST_HEAD(&entity->list);
+ entity->rq = rq;
+ entity->sched = sched;
spin_lock_init(&entity->queue_lock);
+ r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
+ if (r)
+ return r;
+
atomic_set(&entity->fence_seq, 0);
+ entity->fence_context = fence_context_alloc(1);
/* Add the entity to the run queue */
amd_sched_rq_add_entity(rq, entity);
+
return 0;
}
@@ -146,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
- return entity->scheduler == sched &&
- entity->belongto_rq != NULL;
+ return entity->sched == sched &&
+ entity->rq != NULL;
}
/**
@@ -177,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
- struct amd_sched_rq *rq = entity->belongto_rq;
+ struct amd_sched_rq *rq = entity->rq;
if (!amd_sched_entity_is_initialized(sched, entity))
return;
@@ -198,22 +205,22 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
fence_put(f);
- amd_sched_wakeup(entity->scheduler);
+ amd_sched_wakeup(entity->sched);
}
static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->scheduler;
- struct amd_sched_job *job;
+ struct amd_gpu_scheduler *sched = entity->sched;
+ struct amd_sched_job *sched_job;
if (ACCESS_ONCE(entity->dependency))
return NULL;
- if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
+ if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
return NULL;
- while ((entity->dependency = sched->ops->dependency(job))) {
+ while ((entity->dependency = sched->ops->dependency(sched_job))) {
if (fence_add_callback(entity->dependency, &entity->cb,
amd_sched_entity_wakeup))
@@ -222,32 +229,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
return NULL;
}
- return job;
+ return sched_job;
}
/**
* Helper to submit a job to the job queue
*
- * @job The pointer to job required to submit
+ * @sched_job The pointer to job required to submit
*
* Returns true if we could submit the job.
*/
-static bool amd_sched_entity_in(struct amd_sched_job *job)
+static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
{
- struct amd_sched_entity *entity = job->s_entity;
+ struct amd_sched_entity *entity = sched_job->s_entity;
bool added, first = false;
spin_lock(&entity->queue_lock);
- added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
+ added = kfifo_in(&entity->job_queue, &sched_job,
+ sizeof(sched_job)) == sizeof(sched_job);
- if (added && kfifo_len(&entity->job_queue) == sizeof(job))
+ if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
first = true;
spin_unlock(&entity->queue_lock);
/* first job wakes up scheduler */
if (first)
- amd_sched_wakeup(job->sched);
+ amd_sched_wakeup(sched_job->sched);
return added;
}
@@ -255,7 +263,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job)
/**
* Submit a job to the job queue
*
- * @job The pointer to job required to submit
+ * @sched_job The pointer to job required to submit
*
* Returns 0 for success, negative error code otherwise.
*/
@@ -271,9 +279,9 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
fence_get(&fence->base);
sched_job->s_fence = fence;
- wait_event(entity->scheduler->job_scheduled,
+ wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
-
+ trace_amd_sched_job(sched_job);
return 0;
}
@@ -301,30 +309,28 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
static struct amd_sched_job *
amd_sched_select_job(struct amd_gpu_scheduler *sched)
{
- struct amd_sched_job *job;
+ struct amd_sched_job *sched_job;
if (!amd_sched_ready(sched))
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
- job = amd_sched_rq_select_job(&sched->kernel_rq);
- if (job == NULL)
- job = amd_sched_rq_select_job(&sched->sched_rq);
+ sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
+ if (sched_job == NULL)
+ sched_job = amd_sched_rq_select_job(&sched->sched_rq);
- return job;
+ return sched_job;
}
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
{
- struct amd_sched_job *sched_job =
- container_of(cb, struct amd_sched_job, cb);
- struct amd_gpu_scheduler *sched;
+ struct amd_sched_fence *s_fence =
+ container_of(cb, struct amd_sched_fence, cb);
+ struct amd_gpu_scheduler *sched = s_fence->sched;
- sched = sched_job->sched;
- amd_sched_fence_signal(sched_job->s_fence);
atomic_dec(&sched->hw_rq_count);
- fence_put(&sched_job->s_fence->base);
- sched->ops->process_job(sched_job);
+ amd_sched_fence_signal(s_fence);
+ fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker);
}
@@ -338,87 +344,82 @@ static int amd_sched_main(void *param)
while (!kthread_should_stop()) {
struct amd_sched_entity *entity;
- struct amd_sched_job *job;
+ struct amd_sched_fence *s_fence;
+ struct amd_sched_job *sched_job;
struct fence *fence;
wait_event_interruptible(sched->wake_up_worker,
kthread_should_stop() ||
- (job = amd_sched_select_job(sched)));
+ (sched_job = amd_sched_select_job(sched)));
- if (!job)
+ if (!sched_job)
continue;
- entity = job->s_entity;
+ entity = sched_job->s_entity;
+ s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- fence = sched->ops->run_job(job);
+ fence = sched->ops->run_job(sched_job);
if (fence) {
- r = fence_add_callback(fence, &job->cb,
+ r = fence_add_callback(fence, &s_fence->cb,
amd_sched_process_job);
if (r == -ENOENT)
- amd_sched_process_job(fence, &job->cb);
+ amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n", r);
fence_put(fence);
+ } else {
+ DRM_ERROR("Failed to run job!\n");
+ amd_sched_process_job(NULL, &s_fence->cb);
}
- count = kfifo_out(&entity->job_queue, &job, sizeof(job));
- WARN_ON(count != sizeof(job));
+ count = kfifo_out(&entity->job_queue, &sched_job,
+ sizeof(sched_job));
+ WARN_ON(count != sizeof(sched_job));
wake_up(&sched->job_scheduled);
}
return 0;
}
/**
- * Create a gpu scheduler
+ * Init a gpu scheduler instance
*
+ * @sched The pointer to the scheduler
* @ops The backend operations for this scheduler.
- * @ring The the ring id for the scheduler.
* @hw_submissions Number of hw submissions to do.
+ * @name Name used for debugging
*
- * Return the pointer to scheduler for success, otherwise return NULL
+ * Return 0 on success, otherwise error code.
*/
-struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
- unsigned ring, unsigned hw_submission,
- void *priv)
+int amd_sched_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_backend_ops *ops,
+ unsigned hw_submission, const char *name)
{
- struct amd_gpu_scheduler *sched;
-
- sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
- if (!sched)
- return NULL;
-
sched->ops = ops;
- sched->ring_id = ring;
sched->hw_submission_limit = hw_submission;
- sched->priv = priv;
- snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
+ sched->name = name;
amd_sched_rq_init(&sched->sched_rq);
amd_sched_rq_init(&sched->kernel_rq);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
atomic_set(&sched->hw_rq_count, 0);
+
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
- DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
- kfree(sched);
- return NULL;
+ DRM_ERROR("Failed to create scheduler for %s.\n", name);
+ return PTR_ERR(sched->thread);
}
- return sched;
+ return 0;
}
/**
* Destroy a gpu scheduler
*
* @sched The pointer to the scheduler
- *
- * return 0 if succeed. -1 if failed.
*/
-int amd_sched_destroy(struct amd_gpu_scheduler *sched)
+void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
kthread_stop(sched->thread);
- kfree(sched);
- return 0;
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 2af0e4d4d817..80b64dc22214 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -38,13 +38,15 @@ struct amd_sched_rq;
*/
struct amd_sched_entity {
struct list_head list;
- struct amd_sched_rq *belongto_rq;
- atomic_t fence_seq;
- /* the job_queue maintains the jobs submitted by clients */
- struct kfifo job_queue;
+ struct amd_sched_rq *rq;
+ struct amd_gpu_scheduler *sched;
+
spinlock_t queue_lock;
- struct amd_gpu_scheduler *scheduler;
+ struct kfifo job_queue;
+
+ atomic_t fence_seq;
uint64_t fence_context;
+
struct fence *dependency;
struct fence_cb cb;
};
@@ -62,13 +64,13 @@ struct amd_sched_rq {
struct amd_sched_fence {
struct fence base;
- struct amd_gpu_scheduler *scheduler;
+ struct fence_cb cb;
+ struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
};
struct amd_sched_job {
- struct fence_cb cb;
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
@@ -91,32 +93,29 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
- struct fence *(*dependency)(struct amd_sched_job *job);
- struct fence *(*run_job)(struct amd_sched_job *job);
- void (*process_job)(struct amd_sched_job *job);
+ struct fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct fence *(*run_job)(struct amd_sched_job *sched_job);
};
/**
* One scheduler is implemented for each hardware ring
*/
struct amd_gpu_scheduler {
- struct task_struct *thread;
+ struct amd_sched_backend_ops *ops;
+ uint32_t hw_submission_limit;
+ const char *name;
struct amd_sched_rq sched_rq;
struct amd_sched_rq kernel_rq;
- atomic_t hw_rq_count;
- struct amd_sched_backend_ops *ops;
- uint32_t ring_id;
wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled;
- uint32_t hw_submission_limit;
- char name[20];
- void *priv;
+ atomic_t hw_rq_count;
+ struct task_struct *thread;
};
-struct amd_gpu_scheduler *
-amd_sched_create(struct amd_sched_backend_ops *ops,
- uint32_t ring, uint32_t hw_submission, void *priv);
-int amd_sched_destroy(struct amd_gpu_scheduler *sched);
+int amd_sched_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_backend_ops *ops,
+ uint32_t hw_submission, const char *name);
+void amd_sched_fini(struct amd_gpu_scheduler *sched);
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index e62c37920e11..d802638094f4 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
if (fence == NULL)
return NULL;
fence->owner = owner;
- fence->scheduler = s_entity->scheduler;
+ fence->sched = s_entity->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&s_entity->fence_seq);
@@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence)
static const char *amd_sched_fence_get_timeline_name(struct fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- return (const char *)fence->scheduler->name;
+ return (const char *)fence->sched->name;
}
static bool amd_sched_fence_enable_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e23df5fd3836..bf27a07dbce3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb);
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
- cancel_work_sync(&mstb->mgr->work);
-
/*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
{
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+
if (!port->input) {
port->vcpi.num_slots = 0;
kfree(port->cached_edid);
- /* we can't destroy the connector here, as
- we might be holding the mode_config.mutex
- from an EDID retrieval */
+ /*
+ * The only time we don't have a connector
+ * on an output port is if the connector init
+ * fails.
+ */
if (port->connector) {
+ /* we can't destroy the connector here, as
+ * we might be holding the mode_config.mutex
+ * from an EDID retrieval */
+
mutex_lock(&mgr->destroy_connector_lock);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
return;
}
+ /* no need to clean up vcpi
+ * as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
-
- if (!port->input && port->vcpi.vcpi > 0)
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
}
kfree(port);
-
- (*mgr->cbs->hotplug)(mgr);
}
static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
}
}
-static void build_mst_prop_path(struct drm_dp_mst_port *port,
- struct drm_dp_mst_branch *mstb,
+static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
+ int pnum,
char *proppath,
size_t proppath_size)
{
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
}
- snprintf(temp, sizeof(temp), "-%d", port->port_num);
+ snprintf(temp, sizeof(temp), "-%d", pnum);
strlcat(proppath, temp, proppath_size);
}
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_port_teardown_pdt(port, old_pdt);
ret = drm_dp_port_setup_pdt(port);
- if (ret == true) {
+ if (ret == true)
drm_dp_send_link_address(mstb->mgr, port->mstb);
- port->mstb->link_address_sent = true;
- }
}
if (created && !port->input) {
char proppath[255];
- build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
- if (port->port_num >= 8) {
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+ if (!port->connector) {
+ /* remove it from the port list */
+ mutex_lock(&mstb->mgr->lock);
+ list_del(&port->next);
+ mutex_unlock(&mstb->mgr->lock);
+ /* drop port list reference */
+ drm_dp_put_port(port);
+ goto out;
+ }
+ if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ drm_mode_connector_set_tile_property(port->connector);
}
+ (*mstb->mgr->cbs->register_connector)(port->connector);
}
+out:
/* put reference to this port */
drm_dp_put_port(port);
}
@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *mstb_child;
- if (!mstb->link_address_sent) {
+ if (!mstb->link_address_sent)
drm_dp_send_link_address(mgr, mstb);
- mstb->link_address_sent = true;
- }
+
list_for_each_entry(port, &mstb->ports, next) {
if (port->input)
continue;
@@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
mutex_unlock(&mgr->qlock);
}
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb)
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
{
int len;
struct drm_dp_sideband_msg_tx *txmsg;
@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
- return -ENOMEM;
+ return;
txmsg->dst = mstb;
len = build_link_address(txmsg);
+ mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
}
(*mgr->cbs->hotplug)(mgr);
}
- } else
+ } else {
+ mstb->link_address_sent = false;
DRM_DEBUG_KMS("link address failed %d\n", ret);
+ }
kfree(txmsg);
- return 0;
}
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
+ flush_work(&mgr->work);
+ flush_work(&mgr->destroy_connector_work);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
if (port->cached_edid)
edid = drm_edid_duplicate(port->cached_edid);
- else
+ else {
edid = drm_get_edid(connector, &port->aux.ddc);
-
- drm_mode_connector_set_tile_property(connector);
+ drm_mode_connector_set_tile_property(connector);
+ }
drm_dp_put_port(port);
return edid;
}
@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
struct drm_dp_mst_port *port;
-
+ bool send_hotplug = false;
/*
* Not a regular list traverse as we have to drop the destroy
* connector lock before destroying the connector, to avoid AB->BA
@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
if (!port->input && port->vcpi.vcpi > 0)
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
kfree(port);
+ send_hotplug = true;
}
+ if (send_hotplug)
+ (*mgr->cbs->hotplug)(mgr);
}
/**
@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
*/
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
+ flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 418d299f3b12..ca08c472311b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
struct drm_crtc *crtc = mode_set->crtc;
int ret;
- if (crtc->funcs->cursor_set) {
+ if (crtc->funcs->cursor_set2) {
+ ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+ if (ret)
+ error = true;
+ } else if (crtc->funcs->cursor_set) {
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
if (ret)
error = true;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9a860ca1e9d7..d93e7378c077 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit);
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d734780b31c0..a18164f2f6d2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
}
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+/**
+ * drm_kms_helper_poll_enable_locked - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work without
+ * locking the mode_config mutex.
+ *
+ * This is like drm_kms_helper_poll_enable() however it is to be
+ * called from a context where the mode_config mutex is locked
+ * already.
+ */
+void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
- __drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable_locked(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
mutex_lock(&dev->mode_config.mutex);
- __drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable_locked(dev);
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index cbdb78ef3bac..e6cbaca821a4 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -37,7 +37,6 @@
* DECON stands for Display and Enhancement controller.
*/
-#define DECON_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
#define WINDOWS_NR 2
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
-static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- if (adjusted_mode->vrefresh == 0)
- adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
-
- return true;
-}
-
static void decon_commit(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
- .mode_fixup = decon_mode_fixup,
.commit = decon_commit,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index d66ade0efac8..124fb9a56f02 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int exynos_dp_suspend(struct device *dev)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- exynos_dp_disable(&dp->encoder);
- return 0;
-}
-
-static int exynos_dp_resume(struct device *dev)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- exynos_dp_enable(&dp->encoder);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_dp_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
-};
-
static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = {
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
- .pm = &exynos_dp_pm_ops,
.of_match_table = exynos_dp_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index c68a6a2a9b57..7f55ba6771c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
{
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
int exynos_drm_device_subdrv_probe(struct drm_device *dev)
{
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
int exynos_drm_device_subdrv_remove(struct drm_device *dev)
{
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
{
@@ -111,7 +107,6 @@ err:
}
return ret;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
{
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
subdrv->close(dev, subdrv->dev, file);
}
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 0872aa2f450f..ed28823d3b35 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
exynos_crtc->ops->disable(exynos_crtc);
}
-static bool
-exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (exynos_crtc->ops->mode_fixup)
- return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
- adjusted_mode);
-
- return true;
-}
-
static void
exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.enable = exynos_drm_crtc_enable,
.disable = exynos_drm_crtc_disable,
- .mode_fixup = exynos_drm_crtc_mode_fixup,
.mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 831d2e4cacf9..ae9e6b2d3758 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_connector *connector;
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev)
return 0;
}
+#endif
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index b7ba21dfb696..6c717ba672db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -82,7 +82,6 @@ struct exynos_drm_plane {
*
* @enable: enable the device
* @disable: disable the device
- * @mode_fixup: fix mode data before applying it
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -103,9 +102,6 @@ struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
void (*enable)(struct exynos_drm_crtc *crtc);
void (*disable)(struct exynos_drm_crtc *crtc);
- bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
void (*commit)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 2a652359af64..dd3a5e6d58c8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
.set_addr = fimc_dst_set_addr,
};
-static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
-{
- DRM_DEBUG_KMS("enable[%d]\n", enable);
-
- if (enable) {
- clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
- clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
- ctx->suspended = false;
- } else {
- clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
- clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
- ctx->suspended = true;
- }
-
- return 0;
-}
-
static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
{
struct fimc_context *ctx = dev_id;
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+ if (enable) {
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
+ ctx->suspended = false;
+ } else {
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int fimc_suspend(struct device *dev)
{
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 750a9e6b9e8d..3d1aba67758b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -41,7 +41,6 @@
* CPU Interface.
*/
-#define FIMD_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
/* position control register for hardware window 0, 2 ~ 4.*/
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
-static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- if (adjusted_mode->vrefresh == 0)
- adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
-
- return true;
-}
-
static void fimd_commit(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
return;
val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
- writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+ writel(val, ctx->regs + DP_MIE_CLKCON);
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable = fimd_enable,
.disable = fimd_disable,
- .mode_fixup = fimd_mode_fixup,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3734c34aed16..c17efdb238a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
@@ -1230,7 +1229,6 @@ err:
g2d_put_cmdlist(g2d, node);
return ret;
}
-EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
out:
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f12fbc36b120..407afedb6003 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
nr_pages = obj->size >> PAGE_SHIFT;
if (!is_drm_iommu_supported(dev)) {
- dma_addr_t start_addr;
- unsigned int i = 0;
-
obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
if (!obj->pages) {
DRM_ERROR("failed to allocate pages.\n");
return -ENOMEM;
}
+ }
- obj->cookie = dma_alloc_attrs(dev->dev,
- obj->size,
- &obj->dma_addr, GFP_KERNEL,
- &obj->dma_attrs);
- if (!obj->cookie) {
- DRM_ERROR("failed to allocate buffer.\n");
+ obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
+ GFP_KERNEL, &obj->dma_attrs);
+ if (!obj->cookie) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ if (obj->pages)
drm_free_large(obj->pages);
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
+
+ if (obj->pages) {
+ dma_addr_t start_addr;
+ unsigned int i = 0;
start_addr = obj->dma_addr;
while (i < nr_pages) {
- obj->pages[i] = phys_to_page(start_addr);
+ obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
+ start_addr));
start_addr += PAGE_SIZE;
i++;
}
} else {
- obj->pages = dma_alloc_attrs(dev->dev, obj->size,
- &obj->dma_addr, GFP_KERNEL,
- &obj->dma_attrs);
- if (!obj->pages) {
- DRM_ERROR("failed to allocate buffer.\n");
- return -ENOMEM;
- }
+ obj->pages = obj->cookie;
}
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)obj->dma_addr, obj->size);
- if (!is_drm_iommu_supported(dev)) {
- dma_free_attrs(dev->dev, obj->size, obj->cookie,
- (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
- drm_free_large(obj->pages);
- } else
- dma_free_attrs(dev->dev, obj->size, obj->pages,
- (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+ dma_free_attrs(dev->dev, obj->size, obj->cookie,
+ (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
- obj->dma_addr = (dma_addr_t)NULL;
+ if (!is_drm_iommu_supported(dev))
+ drm_free_large(obj->pages);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
* once dmabuf's refcount becomes 0.
*/
if (obj->import_attach)
- goto out;
-
- exynos_drm_free_buf(exynos_gem_obj);
-
-out:
- drm_gem_free_mmap_offset(obj);
+ drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
+ else
+ exynos_drm_free_buf(exynos_gem_obj);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
kfree(exynos_gem_obj);
- exynos_gem_obj = NULL;
}
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
return exynos_gem_obj->size;
}
-
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret < 0) {
+ drm_gem_object_release(obj);
+ kfree(exynos_gem_obj);
+ return ERR_PTR(ret);
+ }
+
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
return exynos_gem_obj;
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj);
}
-int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
+static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
struct vm_area_struct *vma)
{
struct drm_device *drm_dev = exynos_gem_obj->base.dev;
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
-{ struct exynos_drm_gem_obj *exynos_gem_obj;
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_exynos_gem_info *args = data;
struct drm_gem_object *obj;
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ unsigned int flags;
int ret;
/*
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- if (is_drm_iommu_supported(dev)) {
- exynos_gem_obj = exynos_drm_gem_create(dev,
- EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
- args->size);
- } else {
- exynos_gem_obj = exynos_drm_gem_create(dev,
- EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
- args->size);
- }
+ if (is_drm_iommu_supported(dev))
+ flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
+ else
+ flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
+ exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
if (IS_ERR(exynos_gem_obj)) {
dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem_obj);
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
-
*offset = drm_vma_node_offset_addr(&obj->vma_node);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
-out:
drm_gem_object_unreference(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
err_close_vm:
drm_gem_vm_close(vma);
- drm_gem_free_mmap_offset(obj);
return ret;
}
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
if (ret < 0)
goto err_free_large;
+ exynos_gem_obj->sgt = sgt;
+
if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index cd62f8410d1e..b62d1007c0e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -39,6 +39,7 @@
* - this address could be physical address without IOMMU and
* device address with IOMMU.
* @pages: Array of backing pages.
+ * @sgt: Imported sg_table.
*
* P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj {
dma_addr_t dma_addr;
struct dma_attrs dma_attrs;
struct page **pages;
+ struct sg_table *sgt;
};
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
-/* create a private gem object and initialize it. */
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
- unsigned long size);
-
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 425e70625388..2f5c118f4c8e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
static int rotator_clk_crtl(struct rot_context *rot, bool enable)
{
if (enable) {
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 82be6b86a168..d1e300dcd544 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -58,7 +58,8 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
- unsigned int index, value, ret;
+ unsigned int value;
+ int index, ret;
index = fsl_dcu_drm_plane_index(plane);
if (index < 0)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5a244ab9395b..39d73dbc1c47 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -640,6 +640,32 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
/*
+ * On HSW, the DSL reg (0x70000) appears to return 0 if we
+ * read it just before the start of vblank. So try it again
+ * so we don't accidentally end up spanning a vblank frame
+ * increment, causing the pipe_update_end() code to squak at us.
+ *
+ * The nature of this problem means we can't simply check the ISR
+ * bit and return the vblank start value; nor can we use the scanline
+ * debug register in the transcoder as it appears to have the same
+ * problem. We may need to extend this to include other platforms,
+ * but so far testing only shows the problem on HSW.
+ */
+ if (IS_HASWELL(dev) && !position) {
+ int i, temp;
+
+ for (i = 0; i < 100; i++) {
+ udelay(1);
+ temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
+ DSL_LINEMASK_GEN3;
+ if (temp != position) {
+ position = temp;
+ break;
+ }
+ }
+ }
+
+ /*
* See update_scanline_offset() for the details on the
* scanline_offset adjustment.
*/
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 89c1a8ce1f98..2a5c76faf9f8 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -430,7 +430,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/**
* intel_audio_codec_disable - Disable the audio codec for HD audio
- * @encoder: encoder on which to disable audio
+ * @intel_encoder: encoder on which to disable audio
*
* The disable sequences must be performed before disabling the transcoder or
* port.
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b3e437b3bb54..c19e669ffe50 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id)
const struct bdb_header *bdb = _bdb;
const u8 *base = _bdb;
int index = 0;
- u16 total, current_size;
+ u32 total, current_size;
u8 current_id;
/* skip to first section */
@@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id)
current_size = *((const u16 *)(base + index));
index += 2;
+ /* The MIPI Sequence Block v3+ has a separate size field. */
+ if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
+ current_size = *((const u32 *)(base + index + 1));
+
if (index + current_size > total)
return NULL;
@@ -799,6 +803,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
return;
}
+ /* Fail gracefully for forward incompatible sequence block. */
+ if (sequence->version >= 3) {
+ DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
+ return;
+ }
+
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
block_size = get_blocksize(sequence);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8cc9264f7809..cf418be7d30a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15087,9 +15087,12 @@ static void readout_plane_state(struct intel_crtc *crtc,
plane_state = to_intel_plane_state(p->base.state);
- if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
+ if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
plane_state->visible = primary_get_hw_state(crtc);
- else {
+ if (plane_state->visible)
+ crtc->base.state->plane_mask |=
+ 1 << drm_plane_index(&p->base);
+ } else {
if (active)
p->disable_plane(&p->base, &crtc->base);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3e4be5a3becd..6ade06888432 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
+ return connector;
+}
+
+static void intel_dp_register_mst_connector(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
drm_modeset_lock_all(dev);
intel_connector_add_to_fbdev(intel_connector);
drm_modeset_unlock_all(dev);
drm_connector_register(&intel_connector->base);
- return connector;
}
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
static struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
+ .register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
};
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 53c0173a39fe..b17785719598 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable_locked(dev);
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 72e0edd7bbde..7412caedcf7f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
read_pointer = ring->next_context_status_buffer;
- write_pointer = status_pointer & 0x07;
+ write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
if (read_pointer > write_pointer)
- write_pointer += 6;
+ write_pointer += GEN8_CSB_ENTRIES;
spin_lock(&ring->execlist_lock);
while (read_pointer < write_pointer) {
read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
- (read_pointer % 6) * 8);
+ (read_pointer % GEN8_CSB_ENTRIES) * 8);
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
- (read_pointer % 6) * 8 + 4);
+ (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue;
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
spin_unlock(&ring->execlist_lock);
WARN(submit_contexts > 2, "More than two context complete events?\n");
- ring->next_context_status_buffer = write_pointer % 6;
+ ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
- _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
+ _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
+ ((u32)ring->next_context_status_buffer &
+ GEN8_CSB_PTR_MASK) << 8));
}
static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 next_context_status_buffer_hw;
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring));
- ring->next_context_status_buffer = 0;
+
+ /*
+ * Instead of resetting the Context Status Buffer (CSB) read pointer to
+ * zero, we need to read the write pointer from hardware and use its
+ * value because "this register is power context save restored".
+ * Effectively, these states have been observed:
+ *
+ * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
+ * BDW | CSB regs not reset | CSB regs reset |
+ * CHT | CSB regs not reset | CSB regs not reset |
+ */
+ next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
+ & GEN8_CSB_PTR_MASK);
+
+ /*
+ * When the CSB registers are reset (also after power-up / gpu reset),
+ * CSB write pointer is set to all 1's, which is not valid, use '5' in
+ * this special case, so the first element read is CSB[0].
+ */
+ if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
+ next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
+
+ ring->next_context_status_buffer = next_context_status_buffer_hw;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 64f89f9982a2..3c63bb32ad81 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,6 +25,8 @@
#define _INTEL_LRC_H_
#define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x07
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index af7fdb3bd663..7401cf90b0db 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
}
if (power_well->data == SKL_DISP_PW_1) {
- intel_prepare_ddi(dev);
+ if (!dev_priv->power_domains.initializing)
+ intel_prepare_ddi(dev);
gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
}
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 87de15ea1f93..b35b5b2db4ec 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -186,17 +186,19 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
sysram = vmalloc(size);
if (!sysram)
- return -ENOMEM;
+ goto err_sysram;
info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_alloc_fbi;
+ }
info->par = mfbdev;
ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
if (ret)
- return ret;
+ goto err_framebuffer_init;
mfbdev->sysram = sysram;
mfbdev->size = size;
@@ -225,7 +227,17 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("allocated %dx%d\n",
fb->width, fb->height);
+
return 0;
+
+err_framebuffer_init:
+ drm_fb_helper_release_fbi(helper);
+err_alloc_fbi:
+ vfree(sysram);
+err_sysram:
+ drm_gem_object_unreference_unlocked(gobj);
+
+ return ret;
}
static int mga_fbdev_destroy(struct drm_device *dev,
@@ -276,23 +288,26 @@ int mgag200_fbdev_init(struct mga_device *mdev)
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
if (ret)
- return ret;
+ goto err_fb_helper;
ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
if (ret)
- goto fini;
+ goto err_fb_setup;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(mdev->dev);
ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
if (ret)
- goto fini;
+ goto err_fb_setup;
return 0;
-fini:
+err_fb_setup:
drm_fb_helper_fini(&mfbdev->helper);
+err_fb_helper:
+ mdev->mfbdev = NULL;
+
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index de06388069e7..b1a0f5656175 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -220,7 +220,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
}
r = mgag200_mm_init(mdev);
if (r)
- goto out;
+ goto err_mm;
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&mga_mode_funcs;
@@ -233,7 +233,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
r = mgag200_modeset_init(mdev);
if (r) {
dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
- goto out;
+ goto err_modeset;
}
/* Make small buffers to store a hardware cursor (double buffered icon updates) */
@@ -241,20 +241,24 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
&mdev->cursor.pixels_1);
mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
&mdev->cursor.pixels_2);
- if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1)
- goto cursor_nospace;
- mdev->cursor.pixels_current = mdev->cursor.pixels_1;
- mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
- goto cursor_done;
- cursor_nospace:
- mdev->cursor.pixels_1 = NULL;
- mdev->cursor.pixels_2 = NULL;
- dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n");
- cursor_done:
-
-out:
- if (r)
- mgag200_driver_unload(dev);
+ if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) {
+ mdev->cursor.pixels_1 = NULL;
+ mdev->cursor.pixels_2 = NULL;
+ dev_warn(&dev->pdev->dev,
+ "Could not allocate space for cursors. Not doing hardware cursors.\n");
+ } else {
+ mdev->cursor.pixels_current = mdev->cursor.pixels_1;
+ mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
+ }
+
+ return 0;
+
+err_modeset:
+ drm_mode_config_cleanup(dev);
+ mgag200_mm_fini(mdev);
+err_mm:
+ dev->dev_private = NULL;
+
return r;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 7c6225c84ba6..4649bd2ed340 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->hdisplay,
adjusted_mode->vdisplay);
- if (qcrtc->index == 0)
+ if (bo->is_primary == false)
recreate_primary = true;
if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
@@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
drm_connector_to_qxl_output(connector);
struct drm_device *ddev = connector->dev;
struct qxl_device *qdev = ddev->dev_private;
- int connected;
+ bool connected = false;
/* The first monitor is always connected */
- connected = (output->index == 0) ||
- (qdev->client_monitors_config &&
- qdev->client_monitors_config->count > output->index &&
- qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
+ if (!qdev->client_monitors_config) {
+ if (output->index == 0)
+ connected = true;
+ } else
+ connected = qdev->client_monitors_config->count > output->index &&
+ qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
DRM_DEBUG("#%d connected: %d\n", output->index, connected);
if (!connected)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c3872598b85a..65adb9c72377 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
}
break;
case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
if (ext_encoder)
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d8319dae8358..f3f562f6d848 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1573,10 +1573,12 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
+ drm_modeset_lock_all(dev);
/* turn off display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
+ drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1734,9 +1736,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (fbcon) {
drm_helper_resume_force_mode(dev);
/* turn on display hw */
+ drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
+ drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 5e09c061847f..6cddae44fa6e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
{
struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector;
struct drm_connector *connector;
@@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
+ return connector;
+}
+
+static void radeon_dp_register_mst_connector(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
drm_modeset_lock_all(dev);
radeon_fb_add_connector(rdev, connector);
drm_modeset_unlock_all(dev);
drm_connector_register(connector);
- return connector;
}
static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector,
+ .register_connector = radeon_dp_register_mst_connector,
.destroy_connector = radeon_dp_destroy_mst_connector,
.hotplug = radeon_dp_mst_hotplug,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 7214858ffcea..1aa657fe31cb 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,40 +48,10 @@ struct radeon_fbdev {
struct radeon_device *rdev;
};
-/**
- * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
- *
- * @info: fbdev info
- *
- * This function hides the cursor on all CRTCs used by fbdev.
- */
-static int radeon_fb_helper_set_par(struct fb_info *info)
-{
- int ret;
-
- ret = drm_fb_helper_set_par(info);
-
- /* XXX: with universal plane support fbdev will automatically disable
- * all non-primary planes (including the cursor)
- */
- if (ret == 0) {
- struct drm_fb_helper *fb_helper = info->par;
- int i;
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
- radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
- }
- }
-
- return ret;
-}
-
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = radeon_fb_helper_set_par,
+ .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 787cd8fd897f..e9115d3f67b0 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,6 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d9b7de25613..745e996d2dbc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret)
return ret;
man = &bdev->man[mem_type];
+ if (!man->has_type || !man->use_type)
+ continue;
type_ok = ttm_bo_mt_compatible(man, mem_type, place,
&cur_flags);
@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!type_ok)
continue;
+ type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (mem_type == TTM_PL_SYSTEM)
break;
- if (man->has_type && man->use_type) {
- type_found = true;
- ret = (*man->func->get_node)(man, bo, place, mem);
- if (unlikely(ret))
- return ret;
- }
+ ret = (*man->func->get_node)(man, bo, place, mem);
+ if (unlikely(ret))
+ return ret;
+
if (mem->mm_node)
break;
}
@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0;
}
- if (!type_found)
- return -EINVAL;
-
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret)
return ret;
man = &bdev->man[mem_type];
- if (!man->has_type)
+ if (!man->has_type || !man->use_type)
continue;
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
continue;
+ type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret == -ERESTARTSYS)
has_erestartsys = true;
}
- ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
- return ret;
+
+ if (!type_found) {
+ printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
+ return -EINVAL;
+ }
+
+ return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 67720f70fe29..b49445df8a7e 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI
+ depends on DRM && PCI && X86
select FB_DEFERRED_IO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 5ae8f921da2a..8a76821177a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
+ if (ret) {
+ (void) vmw_cmdbuf_man_process(man);
+ ret = drm_mm_insert_node_generic(&man->mm, info->node,
+ info->page_size, 0, 0,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ }
+
spin_unlock_bh(&man->lock);
info->done = !ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index ce659a125f2b..092ea81eeff7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
- int ret;
if (list_empty(&res->mob_head))
return 0;
@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b20bd9908..2c7a25c71af2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master;
-
- dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
- dev_priv->mmio_size);
-
- dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
- dev_priv->mmio_size);
+ dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
+ dev_priv->mmio_size);
if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM;
@@ -913,7 +909,6 @@ out_no_device:
out_err4:
iounmap(dev_priv->mmio_virt);
out_err3:
- arch_phys_wc_del(dev_priv->mmio_mtrr);
vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
- arch_phys_wc_del(dev_priv->mmio_mtrr);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6d02de6dc36c..f19fd39b43e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -376,7 +376,6 @@ struct vmw_private {
uint32_t initial_width;
uint32_t initial_height;
u32 __iomem *mmio_virt;
- int mmio_mtrr;
uint32_t capabilities;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
@@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size,
bool shareable,
uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf);
+ struct vmw_dma_buffer **p_dma_buf,
+ struct ttm_base_object **p_base);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf,
uint32_t *handle);
@@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t id, struct vmw_dma_buffer **out);
+ uint32_t id, struct vmw_dma_buffer **out,
+ struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index b56565457c96..5da5de0cb522 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
+ NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL;
@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
+ NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 61fb7f3de311..15a6c01cd016 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct drm_crtc *crtc;
u32 num_units = 0;
u32 i, k;
- int ret;
dirty->dev_priv = dev_priv;
@@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
if (!dirty->cmd) {
DRM_ERROR("Couldn't reserve fifo space "
"for dirty blits.\n");
- return ret;
+ return -ENOMEM;
}
memset(dirty->cmd, 0, dirty->fifo_reserve_size);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 76069f093ccf..222c9c2123a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
if (ret)
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c1912f852b42..e57667ca7557 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
*out_surf = NULL;
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
return ret;
}
@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size,
bool shareable,
uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf)
+ struct vmw_dma_buffer **p_dma_buf,
+ struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *user_bo;
struct ttm_buffer_object *tmp;
@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
}
*p_dma_buf = &user_bo->dma;
+ if (p_base) {
+ *p_base = &user_bo->prime.base;
+ kref_get(&(*p_base)->refcount);
+ }
*handle = user_bo->prime.base.hash.key;
out_no_base_object:
@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct vmw_dma_buffer *dma_buf;
struct vmw_user_dma_buffer *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_base_object *buffer_base;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
switch (arg->op) {
case drm_vmw_synccpu_grab:
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
+ &buffer_base);
if (unlikely(ret != 0))
return ret;
@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
dma);
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
vmw_dmabuf_unreference(&dma_buf);
+ ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&